diff --git a/src/main/java/com/github/_1c_syntax/bsl/languageserver/BSLLanguageServer.java b/src/main/java/com/github/_1c_syntax/bsl/languageserver/BSLLanguageServer.java index 3c3687960c7..a4590b73524 100644 --- a/src/main/java/com/github/_1c_syntax/bsl/languageserver/BSLLanguageServer.java +++ b/src/main/java/com/github/_1c_syntax/bsl/languageserver/BSLLanguageServer.java @@ -53,6 +53,8 @@ import org.eclipse.lsp4j.RenameOptions; import org.eclipse.lsp4j.SaveOptions; import org.eclipse.lsp4j.SelectionRangeRegistrationOptions; +import org.eclipse.lsp4j.SemanticTokensLegend; +import org.eclipse.lsp4j.SemanticTokensWithRegistrationOptions; import org.eclipse.lsp4j.ServerCapabilities; import org.eclipse.lsp4j.ServerInfo; import org.eclipse.lsp4j.TextDocumentClientCapabilities; @@ -87,6 +89,7 @@ public class BSLLanguageServer implements LanguageServer, ProtocolExtension { private final ClientCapabilitiesHolder clientCapabilitiesHolder; private final ServerContext context; private final ServerInfo serverInfo; + private final SemanticTokensLegend legend; private boolean shutdownWasCalled; @@ -94,7 +97,7 @@ public class BSLLanguageServer implements LanguageServer, ProtocolExtension { public CompletableFuture initialize(InitializeParams params) { clientCapabilitiesHolder.setCapabilities(params.getCapabilities()); - + setConfigurationRoot(params); var factory = new NamedForkJoinWorkerThreadFactory("populate-context-"); @@ -123,6 +126,7 @@ public CompletableFuture initialize(InitializeParams params) { capabilities.setRenameProvider(getRenameProvider(params)); capabilities.setInlayHintProvider(getInlayHintProvider()); capabilities.setExecuteCommandProvider(getExecuteCommandProvider()); + capabilities.setSemanticTokensProvider(getSemanticTokensProvider()); var result = new InitializeResult(capabilities, serverInfo); @@ -336,4 +340,12 @@ private ExecuteCommandOptions getExecuteCommandProvider() { executeCommandOptions.setWorkDoneProgress(Boolean.FALSE); return executeCommandOptions; } + + private SemanticTokensWithRegistrationOptions getSemanticTokensProvider() { + var semanticTokensProvider = new SemanticTokensWithRegistrationOptions(legend); + semanticTokensProvider.setFull(Boolean.TRUE); + semanticTokensProvider.setRange(Boolean.FALSE); + return semanticTokensProvider; + } + } diff --git a/src/main/java/com/github/_1c_syntax/bsl/languageserver/BSLTextDocumentService.java b/src/main/java/com/github/_1c_syntax/bsl/languageserver/BSLTextDocumentService.java index 313d6238aab..1e9d2afe57b 100644 --- a/src/main/java/com/github/_1c_syntax/bsl/languageserver/BSLTextDocumentService.java +++ b/src/main/java/com/github/_1c_syntax/bsl/languageserver/BSLTextDocumentService.java @@ -43,6 +43,7 @@ import com.github._1c_syntax.bsl.languageserver.providers.ReferencesProvider; import com.github._1c_syntax.bsl.languageserver.providers.RenameProvider; import com.github._1c_syntax.bsl.languageserver.providers.SelectionRangeProvider; +import com.github._1c_syntax.bsl.languageserver.providers.SemanticTokensProvider; import com.github._1c_syntax.bsl.languageserver.utils.Ranges; import jakarta.annotation.PreDestroy; import lombok.RequiredArgsConstructor; @@ -86,6 +87,8 @@ import org.eclipse.lsp4j.Range; import org.eclipse.lsp4j.ReferenceParams; import org.eclipse.lsp4j.RenameParams; +import org.eclipse.lsp4j.SemanticTokens; +import org.eclipse.lsp4j.SemanticTokensParams; import org.eclipse.lsp4j.SelectionRange; import org.eclipse.lsp4j.SelectionRangeParams; import org.eclipse.lsp4j.SymbolInformation; @@ -125,6 +128,7 @@ public class BSLTextDocumentService implements TextDocumentService, ProtocolExte private final ColorProvider colorProvider; private final RenameProvider renameProvider; private final InlayHintProvider inlayHintProvider; + private final SemanticTokensProvider semanticTokensProvider; private final ExecutorService executorService = Executors.newCachedThreadPool(new CustomizableThreadFactory("text-document-service-")); @@ -288,6 +292,18 @@ public CompletableFuture> prepareCallHierarchy(CallHiera ); } + @Override + public CompletableFuture semanticTokensFull(SemanticTokensParams params) { + DocumentContext documentContext = context.getDocument(params.getTextDocument().getUri()); + if (documentContext == null) { + return CompletableFuture.completedFuture(null); + } + + return CompletableFuture.supplyAsync(() -> semanticTokensProvider.getSemanticTokensFull(documentContext, params)); + } + + + @Override public CompletableFuture> callHierarchyIncomingCalls( CallHierarchyIncomingCallsParams params diff --git a/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java b/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java new file mode 100644 index 00000000000..6a349593bae --- /dev/null +++ b/src/main/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProvider.java @@ -0,0 +1,518 @@ +/* + * This file is a part of BSL Language Server. + * + * Copyright (c) 2018-2025 + * Alexey Sosnoviy , Nikita Fedkin and contributors + * + * SPDX-License-Identifier: LGPL-3.0-or-later + * + * BSL Language Server is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 3.0 of the License, or (at your option) any later version. + * + * BSL Language Server is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with BSL Language Server. + */ +package com.github._1c_syntax.bsl.languageserver.providers; + +import com.github._1c_syntax.bsl.languageserver.ClientCapabilitiesHolder; +import com.github._1c_syntax.bsl.languageserver.context.DocumentContext; +import com.github._1c_syntax.bsl.languageserver.context.symbol.ParameterDefinition; +import com.github._1c_syntax.bsl.languageserver.context.symbol.VariableSymbol; +import com.github._1c_syntax.bsl.languageserver.context.symbol.description.MethodDescription; +import com.github._1c_syntax.bsl.languageserver.references.ReferenceIndex; +import com.github._1c_syntax.bsl.languageserver.references.ReferenceResolver; +import com.github._1c_syntax.bsl.languageserver.references.model.OccurrenceType; +import com.github._1c_syntax.bsl.languageserver.utils.Ranges; +import com.github._1c_syntax.bsl.languageserver.utils.Trees; +import com.github._1c_syntax.bsl.parser.BSLLexer; +import com.github._1c_syntax.bsl.parser.BSLParser; +import com.github._1c_syntax.bsl.parser.BSLParser.AnnotationContext; +import com.github._1c_syntax.bsl.parser.BSLParser.AnnotationParamNameContext; +import com.github._1c_syntax.bsl.parser.BSLParser.CompilerDirectiveContext; +import com.github._1c_syntax.bsl.parser.BSLParser.Preproc_nativeContext; +import com.github._1c_syntax.bsl.parser.BSLParser.PreprocessorContext; +import com.github._1c_syntax.bsl.parser.BSLParser.RegionEndContext; +import com.github._1c_syntax.bsl.parser.BSLParser.RegionStartContext; +import com.github._1c_syntax.bsl.parser.BSLParser.UseContext; +import com.github._1c_syntax.bsl.parser.BSLParserRuleContext; +import lombok.RequiredArgsConstructor; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.eclipse.lsp4j.ClientCapabilities; +import org.eclipse.lsp4j.Position; +import org.eclipse.lsp4j.Range; +import org.eclipse.lsp4j.SemanticTokenModifiers; +import org.eclipse.lsp4j.SemanticTokenTypes; +import org.eclipse.lsp4j.SemanticTokens; +import org.eclipse.lsp4j.SemanticTokensCapabilities; +import org.eclipse.lsp4j.SemanticTokensLegend; +import org.eclipse.lsp4j.SemanticTokensParams; +import org.eclipse.lsp4j.SymbolKind; +import org.eclipse.lsp4j.TextDocumentClientCapabilities; +import org.springframework.stereotype.Component; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; + +@Component +@RequiredArgsConstructor +public class SemanticTokensProvider { + + private static final Set NUMBER_TYPES = Set.of( + BSLLexer.DECIMAL, + BSLLexer.FLOAT + ); + + private static final Set STRING_TYPES = Set.of( + BSLLexer.STRING, + BSLLexer.STRINGPART, + BSLLexer.STRINGSTART, + BSLLexer.STRINGTAIL, + BSLLexer.PREPROC_STRING + ); + + private static final Set OPERATOR_TYPES = Set.of( + BSLLexer.LPAREN, + BSLLexer.RPAREN, + BSLLexer.LBRACK, + BSLLexer.RBRACK, + BSLLexer.COMMA, + BSLLexer.SEMICOLON, + BSLLexer.COLON, + BSLLexer.DOT, + BSLLexer.PLUS, + BSLLexer.MINUS, + BSLLexer.MUL, + BSLLexer.QUOTIENT, + BSLLexer.MODULO, + BSLLexer.ASSIGN, + BSLLexer.NOT_EQUAL, + BSLLexer.LESS, + BSLLexer.LESS_OR_EQUAL, + BSLLexer.GREATER, + BSLLexer.GREATER_OR_EQUAL, + BSLLexer.QUESTION, + BSLLexer.TILDA + ); + + private static final Set ANNOTATION_TOKENS = Set.of( + BSLLexer.ANNOTATION_ATSERVERNOCONTEXT_SYMBOL, + BSLLexer.ANNOTATION_ATCLIENTATSERVERNOCONTEXT_SYMBOL, + BSLLexer.ANNOTATION_ATCLIENTATSERVER_SYMBOL, + BSLLexer.ANNOTATION_ATCLIENT_SYMBOL, + BSLLexer.ANNOTATION_ATSERVER_SYMBOL, + BSLLexer.ANNOTATION_BEFORE_SYMBOL, + BSLLexer.ANNOTATION_AFTER_SYMBOL, + BSLLexer.ANNOTATION_AROUND_SYMBOL, + BSLLexer.ANNOTATION_CHANGEANDVALIDATE_SYMBOL, + BSLLexer.ANNOTATION_CUSTOM_SYMBOL + ); + + private final SemanticTokensLegend legend; + private final ClientCapabilitiesHolder clientCapabilitiesHolder; + private final ReferenceResolver referenceResolver; + private final ReferenceIndex referenceIndex; + + private static final String[] NO_MODIFIERS = new String[0]; + private static final String[] DOC_ONLY = new String[]{SemanticTokenModifiers.Documentation}; + + public SemanticTokens getSemanticTokensFull(DocumentContext documentContext, @SuppressWarnings("unused") SemanticTokensParams params) { + List entries = new ArrayList<>(); + + boolean multilineTokenSupport = clientCapabilitiesHolder.getCapabilities() + .map(ClientCapabilities::getTextDocument) + .map(TextDocumentClientCapabilities::getSemanticTokens) + .map(SemanticTokensCapabilities::getMultilineTokenSupport) + .orElse(false); + + // collect description ranges for describable symbols + List descriptionRanges = new ArrayList<>(); + BitSet documentationLines = new BitSet(); + + // 1) Symbols: methods/functions, variables, parameters + var symbolTree = documentContext.getSymbolTree(); + for (var method : symbolTree.getMethods()) { + var semanticTokenType = method.isFunction() ? SemanticTokenTypes.Function : SemanticTokenTypes.Method; + addRange(entries, method.getSubNameRange(), semanticTokenType); + for (ParameterDefinition parameter : method.getParameters()) { + addRange(entries, parameter.getRange(), SemanticTokenTypes.Parameter); + } + method.getDescription() + .map(MethodDescription::getRange) + .filter(r -> !Ranges.isEmpty(r)) + .ifPresent(r -> { + descriptionRanges.add(r); + if (!multilineTokenSupport) { + markLines(documentationLines, r); + } + }); + } + for (VariableSymbol variableSymbol : symbolTree.getVariables()) { + Range nameRange = variableSymbol.getVariableNameRange(); + if (!Ranges.isEmpty(nameRange)) { + Position pos = nameRange.getStart(); + boolean isDefinition = referenceResolver.findReference(documentContext.getUri(), pos) + .map(ref -> ref.getOccurrenceType() == OccurrenceType.DEFINITION) + .orElse(false); + if (isDefinition) { + addRange(entries, nameRange, SemanticTokenTypes.Variable, SemanticTokenModifiers.Definition); + } else { + addRange(entries, nameRange, SemanticTokenTypes.Variable); + } + } + variableSymbol.getDescription().ifPresent(desc -> { + var r = desc.getRange(); + if (!Ranges.isEmpty(r)) { + descriptionRanges.add(r); + if (!multilineTokenSupport) { + markLines(documentationLines, r); + } + } + desc.getTrailingDescription().ifPresent(trailing -> { + var tr = trailing.getRange(); + if (!Ranges.isEmpty(tr)) { + descriptionRanges.add(tr); + if (!multilineTokenSupport) { + markLines(documentationLines, tr); + } + } + }); + }); + } + + if (multilineTokenSupport) { + for (Range r : descriptionRanges) { + // compute multi-line token length using document text + int length = documentContext.getText(r).length(); + addRange(entries, r, length, SemanticTokenTypes.Comment, DOC_ONLY); + } + } + + // 2) Comments (lexer type LINE_COMMENT) + for (Token commentToken : documentContext.getComments()) { + Range commentRange = Ranges.create(commentToken); + if (multilineTokenSupport) { + boolean insideDescription = descriptionRanges.stream().anyMatch(r -> Ranges.containsRange(r, commentRange)); + if (insideDescription) { + continue; + } + addRange(entries, commentRange, SemanticTokenTypes.Comment); + } else { + int commentLine = commentToken.getLine() - 1; + boolean isDocumentation = documentationLines.get(commentLine); + if (isDocumentation) { + addRange(entries, commentRange, SemanticTokenTypes.Comment, DOC_ONLY); + } else { + addRange(entries, commentRange, SemanticTokenTypes.Comment); + } + } + } + + // 3) AST-driven annotations and compiler directives + addAnnotationsFromAst(entries, documentContext); + addPreprocessorFromAst(entries, documentContext); + + // 3.1) Method call occurrences as Method tokens + addMethodCallTokens(entries, documentContext); + + // 4) Lexical tokens on default channel: strings, numbers, macros, operators, keywords + List tokens = documentContext.getTokensFromDefaultChannel(); + for (Token token : tokens) { + var tokenType = token.getType(); + var tokenText = Objects.toString(token.getText(), ""); + if (tokenText.isEmpty()) { + continue; + } + + // strings + if (STRING_TYPES.contains(tokenType)) { + addRange(entries, Ranges.create(token), SemanticTokenTypes.String); + continue; + } + + // date literals in single quotes + if (tokenType == BSLLexer.DATETIME) { + addRange(entries, Ranges.create(token), SemanticTokenTypes.String); + continue; + } + + // numbers + if (NUMBER_TYPES.contains(tokenType)) { + addRange(entries, Ranges.create(token), SemanticTokenTypes.Number); + continue; + } + + // operators and punctuators + if (OPERATOR_TYPES.contains(tokenType)) { + addRange(entries, Ranges.create(token), SemanticTokenTypes.Operator); + continue; + } + + // Skip '&' and all ANNOTATION_* symbol tokens here to avoid duplicate Decorator emission (handled via AST) + if (tokenType == BSLLexer.AMPERSAND || ANNOTATION_TOKENS.contains(tokenType)) { + continue; + } + + // specific literals as keywords: undefined/boolean/null + if (tokenType == BSLLexer.UNDEFINED + || tokenType == BSLLexer.TRUE + || tokenType == BSLLexer.FALSE + || tokenType == BSLLexer.NULL) { + addRange(entries, Ranges.create(token), SemanticTokenTypes.Keyword); + continue; + } + + // keywords (by symbolic name suffix), skip PREPROC_* (handled via AST) + String symbolicName = BSLLexer.VOCABULARY.getSymbolicName(tokenType); + if (symbolicName != null && symbolicName.endsWith("_KEYWORD") && !symbolicName.startsWith("PREPROC_")) { + addRange(entries, Ranges.create(token), SemanticTokenTypes.Keyword); + } + } + + // 5) Build delta-encoded data + List data = toDeltaEncoded(entries); + return new SemanticTokens(data); + } + + private static void markLines(BitSet lines, Range range) { + int startLine = range.getStart().getLine(); + int endLine = range.getEnd().getLine(); + lines.set(startLine, endLine + 1); // inclusive end + } + + private void addAnnotationsFromAst(List entries, DocumentContext documentContext) { + ParseTree parseTree = documentContext.getAst(); + + // compiler directives: single Decorator from '&' through directive symbol + for (var compilerDirective : Trees.findAllRuleNodes(parseTree, BSLParser.RULE_compilerDirective)) { + var ampersand = compilerDirective.AMPERSAND().getSymbol(); // '&' + if (compilerDirective.compilerDirectiveSymbol() != null) { + var symbolToken = compilerDirective.compilerDirectiveSymbol().getStart(); + addRange(entries, Ranges.create(ampersand, symbolToken), SemanticTokenTypes.Decorator); + } else { + addRange(entries, Ranges.create(ampersand), SemanticTokenTypes.Decorator); + } + } + + // annotations: single Decorator from '&' through annotation name; params identifiers as Parameter + for (var annotation : Trees.findAllRuleNodes(parseTree, BSLParser.RULE_annotation)) { + var ampersand = annotation.AMPERSAND().getSymbol(); // '&' + if (annotation.annotationName() != null) { + var annotationNameToken = annotation.annotationName().getStart(); + addRange(entries, Ranges.create(ampersand, annotationNameToken), SemanticTokenTypes.Decorator); + } else { + addRange(entries, Ranges.create(ampersand), SemanticTokenTypes.Decorator); + } + + var annotationParams = annotation.annotationParams(); + if (annotationParams != null) { + for (var annotationParamName : Trees.findAllRuleNodes(annotationParams, BSLParser.RULE_annotationParamName)) { + addRange(entries, Ranges.create(annotationParamName.IDENTIFIER()), SemanticTokenTypes.Parameter); + } + } + } + } + + private void addPreprocessorFromAst(List entries, DocumentContext documentContext) { + ParseTree parseTree = documentContext.getAst(); + + // 1) Regions as Namespace: handle all regionStart and regionEnd nodes explicitly + for (var regionStart : Trees.findAllRuleNodes(parseTree, BSLParser.RULE_regionStart)) { + // Namespace only for '#'+keyword part to avoid overlap with region name token + var preprocessor = Trees.getAncestorByRuleIndex(regionStart, BSLParser.RULE_preprocessor); + if (preprocessor != null && regionStart.PREPROC_REGION() != null) { + addRange(entries, Ranges.create(preprocessor.getStart(), regionStart.PREPROC_REGION().getSymbol()), SemanticTokenTypes.Namespace); + } else { + addNamespaceForPreprocessorNode(entries, regionStart); + } + // region name highlighted as Variable (consistent with #Использовать ) + if (regionStart.regionName() != null) { + addRange(entries, Ranges.create(regionStart.regionName()), SemanticTokenTypes.Variable); + } + } + for (var regionEnd : Trees.findAllRuleNodes(parseTree, BSLParser.RULE_regionEnd)) { + addNamespaceForPreprocessorNode(entries, regionEnd); + } + + // 1.1) Use directives as Namespace: #Использовать ... (moduleAnnotations scope) + for (var use : Trees.findAllRuleNodes(parseTree, BSLParser.RULE_use)) { + addNamespaceForUse(entries, use); + } + + // 1.2) Native directives as Macro: #NATIVE (moduleAnnotations scope) + for (var nativeCtx : Trees.findAllRuleNodes(parseTree, BSLParser.RULE_preproc_native)) { + var hash = nativeCtx.HASH(); + var nativeKw = nativeCtx.PREPROC_NATIVE(); + if (hash != null) { + addRange(entries, Ranges.create(hash), SemanticTokenTypes.Macro); + } + if (nativeKw != null) { + addRange(entries, Ranges.create(nativeKw), SemanticTokenTypes.Macro); + } + } + + // 2) Other preprocessor directives: Macro for each HASH and PREPROC_* token, + // excluding region start/end (handled as Namespace) + for (var preprocessor : Trees.findAllRuleNodes(parseTree, BSLParser.RULE_preprocessor)) { + boolean containsRegion = (preprocessor.regionStart() != null) || (preprocessor.regionEnd() != null); + if (containsRegion) { + continue; // region handled as Namespace above + } + + for (Token token : Trees.getTokens(preprocessor)) { + if (token.getChannel() != Token.DEFAULT_CHANNEL) { + continue; + } + String symbolicName = BSLLexer.VOCABULARY.getSymbolicName(token.getType()); + if (token.getType() == BSLLexer.HASH || (symbolicName != null && symbolicName.startsWith("PREPROC_"))) { + addRange(entries, Ranges.create(token), SemanticTokenTypes.Macro); + } + } + } + } + + private void addNamespaceForPreprocessorNode(List entries, BSLParserRuleContext preprocessorChildNode) { + var preprocessor = Trees.getAncestorByRuleIndex(preprocessorChildNode, BSLParser.RULE_preprocessor); + if (preprocessor == null) { + return; + } + var hashToken = preprocessor.getStart(); + if (hashToken == null) { + return; + } + var endToken = preprocessorChildNode.getStop(); + addRange(entries, Ranges.create(hashToken, endToken), SemanticTokenTypes.Namespace); + } + + private void addNamespaceForUse(List entries, UseContext useCtx) { + TerminalNode hashNode = useCtx.HASH(); + TerminalNode useNode = useCtx.PREPROC_USE_KEYWORD(); + + if (hashNode != null && useNode != null) { + addRange(entries, Ranges.create(hashNode, useNode), SemanticTokenTypes.Namespace); + } else if (hashNode != null) { + addRange(entries, Ranges.create(hashNode), SemanticTokenTypes.Namespace); + } else { + // no-op + } + + Optional.ofNullable(useCtx.usedLib()) + .map(BSLParser.UsedLibContext::PREPROC_IDENTIFIER) + .ifPresent(id -> addRange(entries, Ranges.create(id), SemanticTokenTypes.Variable)); + } + + private void addRange(List entries, Range range, String type) { + addRange(entries, range, type, NO_MODIFIERS); + } + + private void addRange(List entries, Range range, String type, String... modifiers) { + if (Ranges.isEmpty(range)) { + return; + } + int typeIdx = legend.getTokenTypes().indexOf(type); + if (typeIdx < 0) { + return; + } + int line = range.getStart().getLine(); + int start = range.getStart().getCharacter(); + int length = Math.max(0, range.getEnd().getCharacter() - range.getStart().getCharacter()); + if (length > 0) { + int modifierMask = 0; + if (modifiers != null) { + for (String mod : modifiers) { + if (mod == null) continue; + int idx = legend.getTokenModifiers().indexOf(mod); + if (idx >= 0) { + modifierMask |= (1 << idx); + } + } + } + entries.add(new TokenEntry(line, start, length, typeIdx, modifierMask)); + } + } + + // overload to add token with explicit precomputed length (used for multi-line tokens) + private void addRange(List entries, Range range, int explicitLength, String type, String... modifiers) { + if (Ranges.isEmpty(range)) { + return; + } + int typeIdx = legend.getTokenTypes().indexOf(type); + if (typeIdx < 0) { + return; + } + int line = range.getStart().getLine(); + int start = range.getStart().getCharacter(); + int length = Math.max(0, explicitLength); + if (length > 0) { + int modifierMask = 0; + if (modifiers != null) { + for (String mod : modifiers) { + if (mod == null) continue; + int idx = legend.getTokenModifiers().indexOf(mod); + if (idx >= 0) { + modifierMask |= (1 << idx); + } + } + } + entries.add(new TokenEntry(line, start, length, typeIdx, modifierMask)); + } + } + + private List toDeltaEncoded(List entries) { + // de-dup and sort + Set uniq = new HashSet<>(entries); + List sorted = new ArrayList<>(uniq); + sorted.sort(Comparator + .comparingInt(TokenEntry::line) + .thenComparingInt(TokenEntry::start)); + + List data = new ArrayList<>(sorted.size() * 5); + var prevLine = 0; + var prevChar = 0; + var first = true; + + for (TokenEntry tokenEntry : sorted) { + int deltaLine = first ? tokenEntry.line : (tokenEntry.line - prevLine); + int prevCharOrZero = (deltaLine == 0) ? prevChar : 0; + int deltaStart = first ? tokenEntry.start : (tokenEntry.start - prevCharOrZero); + + data.add(deltaLine); + data.add(deltaStart); + data.add(tokenEntry.length); + data.add(tokenEntry.type); + data.add(tokenEntry.modifiers); + + prevLine = tokenEntry.line; + prevChar = tokenEntry.start; + first = false; + } + return data; + } + + private void addMethodCallTokens(List entries, DocumentContext documentContext) { + for (var reference : referenceIndex.getReferencesFrom(documentContext.getUri(), SymbolKind.Method)) { + if (!reference.isSourceDefinedSymbolReference()) { + continue; + } + + reference.getSourceDefinedSymbol() + .ifPresent(symbol -> addRange(entries, reference.getSelectionRange(), SemanticTokenTypes.Method)); + } + } + + private record TokenEntry(int line, int start, int length, int type, int modifiers) {} +} diff --git a/src/main/java/com/github/_1c_syntax/bsl/languageserver/semantictokens/SemanticTokensLegendConfiguration.java b/src/main/java/com/github/_1c_syntax/bsl/languageserver/semantictokens/SemanticTokensLegendConfiguration.java new file mode 100644 index 00000000000..7c967b795db --- /dev/null +++ b/src/main/java/com/github/_1c_syntax/bsl/languageserver/semantictokens/SemanticTokensLegendConfiguration.java @@ -0,0 +1,62 @@ +/* + * This file is a part of BSL Language Server. + * + * Copyright (c) 2018-2025 + * Alexey Sosnoviy , Nikita Fedkin and contributors + * + * SPDX-License-Identifier: LGPL-3.0-or-later + * + * BSL Language Server is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 3.0 of the License, or (at your option) any later version. + * + * BSL Language Server is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with BSL Language Server. + */ +package com.github._1c_syntax.bsl.languageserver.semantictokens; + +import org.eclipse.lsp4j.SemanticTokenModifiers; +import org.eclipse.lsp4j.SemanticTokenTypes; +import org.eclipse.lsp4j.SemanticTokensLegend; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import java.util.List; + +@Configuration +public class SemanticTokensLegendConfiguration { + + @Bean + public SemanticTokensLegend semanticTokensLegend() { + // Types we actually emit from the provider + List tokenTypes = List.of( + SemanticTokenTypes.Keyword, + SemanticTokenTypes.String, + SemanticTokenTypes.Number, + SemanticTokenTypes.Comment, + SemanticTokenTypes.Function, + SemanticTokenTypes.Method, + SemanticTokenTypes.Variable, + SemanticTokenTypes.Parameter, + SemanticTokenTypes.Macro, + SemanticTokenTypes.Decorator, + SemanticTokenTypes.Operator, + SemanticTokenTypes.Namespace + ); + + // Add tokenModifiers we plan to emit + List tokenModifiers = List.of( + SemanticTokenModifiers.Documentation, + SemanticTokenModifiers.Definition + ); + + return new SemanticTokensLegend(tokenTypes, tokenModifiers); + } + +} diff --git a/src/main/java/com/github/_1c_syntax/bsl/languageserver/utils/Trees.java b/src/main/java/com/github/_1c_syntax/bsl/languageserver/utils/Trees.java index 78876ebab6a..f6612650ec2 100644 --- a/src/main/java/com/github/_1c_syntax/bsl/languageserver/utils/Trees.java +++ b/src/main/java/com/github/_1c_syntax/bsl/languageserver/utils/Trees.java @@ -64,9 +64,9 @@ public final class Trees { /** * Обертки Trees */ - - public static Collection findAllRuleNodes(ParseTree t, int ruleIndex) { - return org.antlr.v4.runtime.tree.Trees.findAllRuleNodes(t, ruleIndex); + @SuppressWarnings("unchecked") + public static Collection findAllRuleNodes(ParseTree t, int ruleIndex) { + return (Collection) org.antlr.v4.runtime.tree.Trees.findAllRuleNodes(t, ruleIndex); } public static List getChildren(Tree t) { @@ -163,13 +163,14 @@ private static List getDescendantsWithFilter(ParseTree parent, ParseT * BSLParserRuleContext parent = Trees.getAncestorByRuleIndex(ctx, BSLParser.RULE_statement); */ @Nullable - public static BSLParserRuleContext getAncestorByRuleIndex(BSLParserRuleContext element, int type) { + @SuppressWarnings("unchecked") + public static T getAncestorByRuleIndex(BSLParserRuleContext element, int type) { var parent = element.getParent(); if (parent == null) { return null; } if (parent.getRuleIndex() == type) { - return parent; + return (T) parent; } return getAncestorByRuleIndex(parent, type); } diff --git a/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java b/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java new file mode 100644 index 00000000000..7d04a317db9 --- /dev/null +++ b/src/test/java/com/github/_1c_syntax/bsl/languageserver/providers/SemanticTokensProviderTest.java @@ -0,0 +1,680 @@ +/* + * This file is a part of BSL Language Server. + * + * Copyright (c) 2018-2025 + * Alexey Sosnoviy , Nikita Fedkin and contributors + * + * SPDX-License-Identifier: LGPL-3.0-or-later + * + * BSL Language Server is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 3.0 of the License, or (at your option) any later version. + * + * BSL Language Server is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with BSL Language Server. + */ +package com.github._1c_syntax.bsl.languageserver.providers; + +import com.github._1c_syntax.bsl.languageserver.ClientCapabilitiesHolder; +import com.github._1c_syntax.bsl.languageserver.context.DocumentContext; +import com.github._1c_syntax.bsl.languageserver.util.TestUtils; +import com.github._1c_syntax.bsl.parser.BSLLexer; +import com.github._1c_syntax.bsl.languageserver.references.ReferenceIndex; +import com.github._1c_syntax.bsl.languageserver.references.model.Reference; +import com.github._1c_syntax.bsl.languageserver.context.symbol.MethodSymbol; +import org.antlr.v4.runtime.Token; +import org.eclipse.lsp4j.Position; +import org.eclipse.lsp4j.Range; +import org.eclipse.lsp4j.SemanticTokenTypes; +import org.eclipse.lsp4j.SemanticTokens; +import org.eclipse.lsp4j.SemanticTokensLegend; +import org.eclipse.lsp4j.SemanticTokensParams; +import org.eclipse.lsp4j.TextDocumentIdentifier; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.annotation.DirtiesContext; +import org.eclipse.lsp4j.SymbolKind; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; + +import static org.assertj.core.api.Assertions.assertThat; +import org.eclipse.lsp4j.ClientCapabilities; +import org.eclipse.lsp4j.SemanticTokensCapabilities; +import org.eclipse.lsp4j.TextDocumentClientCapabilities; +import org.springframework.test.context.bean.override.mockito.MockitoBean; + +import static org.mockito.Mockito.RETURNS_DEEP_STUBS; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@SpringBootTest +@DirtiesContext +class SemanticTokensProviderTest { + + @Autowired + private SemanticTokensProvider provider; + + @Autowired + private SemanticTokensLegend legend; + + @Autowired + private ClientCapabilitiesHolder clientCapabilitiesHolder; + + @MockitoBean + private ReferenceIndex referenceIndex; + + @Test + void emitsExpectedTokenTypes() { + // given: sample BSL with annotation, macro, method, parameter, string, number, comment, operators + String bsl = String.join("\n", + "&��аКлиенте", + "#Если Истина Тогда", + "Процедура Тест(Парам) Экспорт", + " // комментарий", + " Сообщить(\"строка\" + 123);", + "КонецПроцедуры", + "#КонецЕсли" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + var params = new SemanticTokensParams(textDocumentIdentifier); + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, params); + + // then: collect type indexes present + List data = tokens.getData(); + assertThat(data).isNotEmpty(); + + Set presentTypes = indexesOfTypes(data); + + // map desired types to indices and assert they're present + assertPresent(presentTypes, SemanticTokenTypes.Decorator); + assertPresent(presentTypes, SemanticTokenTypes.Macro); + assertPresent(presentTypes, SemanticTokenTypes.Method); + assertPresent(presentTypes, SemanticTokenTypes.Parameter); + assertPresent(presentTypes, SemanticTokenTypes.Keyword); + assertPresent(presentTypes, SemanticTokenTypes.String); + assertPresent(presentTypes, SemanticTokenTypes.Number); + assertPresent(presentTypes, SemanticTokenTypes.Comment); + assertPresent(presentTypes, SemanticTokenTypes.Operator); + } + + @Test + void emitsMacroForAllPreprocTokens() { + // given: preprocessor variety to cover PREPROC_* tokens including regions + String bsl = String.join("\n", + "#Область Region1", + "#Если Сервер И НЕ Клиент Тогда", + "Процедура Пусто()", + "КонецПроце��уры", + "#ИначеЕсли Клиент Тогда", + "#Иначе", + "#КонецЕсли", + "#КонецОбласти" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + // then: count how many lexer tokens are PREPROC_* (or HASH) on default channel + List defaultTokens = documentContext.getTokensFromDefaultChannel(); + + long totalPreproc = defaultTokens.stream() + .map(Token::getType) + .map(BSLLexer.VOCABULARY::getSymbolicName) + .filter(Objects::nonNull) + .filter(sym -> sym.equals("HASH") || sym.startsWith("PREPROC_")) + .count(); + + // count region directives and names + long regionDirectives = 0; + long regionNames = 0; + for (int i = 0; i + 1 < defaultTokens.size(); i++) { + Token t = defaultTokens.get(i); + Token n = defaultTokens.get(i + 1); + if (t.getType() == BSLLexer.HASH && n.getType() == BSLLexer.PREPROC_REGION) { + regionDirectives++; + // if name token follows, it is included into Namespace span and not counted as Macro + if (i + 2 < defaultTokens.size() && defaultTokens.get(i + 2).getType() == BSLLexer.PREPROC_IDENTIFIER) { + regionNames++; + } + } else if (t.getType() == BSLLexer.HASH && n.getType() == BSLLexer.PREPROC_END_REGION) { + regionDirectives++; + } + } + + // expected macro tokens exclude region directives (HASH + PREPROC_*) and region names after PREPROC_REGION + long expectedMacro = totalPreproc - (regionDirectives * 2) - regionNames; + + int macroIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Macro); + int nsIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Namespace); + assertThat(macroIdx).isGreaterThanOrEqualTo(0); + assertThat(nsIdx).isGreaterThanOrEqualTo(0); + + long macroCount = countOfType(tokens.getData(), macroIdx); + long nsCount = countOfType(tokens.getData(), nsIdx); + + // macros match non-region preproc tokens; namespace tokens match number of region directives + assertThat(macroCount).isEqualTo(expectedMacro); + assertThat(nsCount).isEqualTo(regionDirectives); + } + + @Test + void emitsOperatorsForPunctuators() { + // given: code with many punctuators and operators + String bsl = String.join("\n", + "Процедура Опер()", + " Массив = Новый Массив();", + " Массив.Добавить(1 + 2);", + " Значение = Массив[0]?;", + " Если 1 <> 2 Тогда КонецЕсли;", + "КонецПроцедуры" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + int operatorIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Operator); + assertThat(operatorIdx).isGreaterThanOrEqualTo(0); + + // count lexer operator/punctuator tokens + Set opTypes = Set.of( + BSLLexer.LPAREN, + BSLLexer.RPAREN, + BSLLexer.LBRACK, + BSLLexer.RBRACK, + BSLLexer.COMMA, + BSLLexer.SEMICOLON, + BSLLexer.COLON, + BSLLexer.DOT, + BSLLexer.PLUS, + BSLLexer.MINUS, + BSLLexer.MUL, + BSLLexer.QUOTIENT, + BSLLexer.MODULO, + BSLLexer.ASSIGN, + BSLLexer.NOT_EQUAL, + BSLLexer.LESS, + BSLLexer.LESS_OR_EQUAL, + BSLLexer.GREATER, + BSLLexer.GREATER_OR_EQUAL, + BSLLexer.QUESTION, + BSLLexer.TILDA + ); + + long lexerOpCount = documentContext.getTokensFromDefaultChannel().stream() + .map(Token::getType) + .filter(opTypes::contains) + .count(); + + long operatorCount = countOfType(tokens.getData(), operatorIdx); + + // 1:1 mapping of lexer operator tokens to semantic Operator tokens + assertThat(operatorCount).isEqualTo(lexerOpCount); + } + + @Test + void annotationWithoutParams_isDecoratorOnly() { + // given + String annotation = "&НаКлиенте"; + String bsl = String.join("\n", + annotation, + "Процедура Тест()", + "КонецПроцедуры" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + int decoratorIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Decorator); + int operatorIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Operator); + assertThat(decoratorIdx).isGreaterThanOrEqualTo(0); + assertThat(operatorIdx).isGreaterThanOrEqualTo(0); + + List firstLineTokens = decode(tokens.getData()).stream().filter(t -> t.line == 0).toList(); + + // then: on line 0 we should have exactly one Decorator token: merged '&НаКлиенте' + long decoratorsOnFirstLine = firstLineTokens.stream().filter(t -> t.type == decoratorIdx).count(); + assertThat(decoratorsOnFirstLine).isEqualTo(1); + + // and no operators or strings on that line + long operatorsOnFirstLine = firstLineTokens.stream().filter(t -> t.type == operatorIdx).count(); + assertThat(operatorsOnFirstLine).isEqualTo(0); + } + + @Test + void annotationWithStringParam_tokenizesNameParenAndString() { + // given + String bsl = String.join("\n", + "&Перед(\"Строка\")", + "Процедура Тест()", + "КонецПроцедуры" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + int decoratorIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Decorator); + int operatorIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Operator); + int stringIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.String); + assertThat(decoratorIdx).isGreaterThanOrEqualTo(0); + assertThat(operatorIdx).isGreaterThanOrEqualTo(0); + assertThat(stringIdx).isGreaterThanOrEqualTo(0); + + List firstLineTokens = decode(tokens.getData()).stream().filter(t -> t.line == 0).toList(); + + // one decorator on line 0: merged '&Перед' + assertThat(firstLineTokens.stream().filter(t -> t.type == decoratorIdx).count()).isEqualTo(1); + + // operators present for parentheses + assertThat(firstLineTokens.stream().filter(t -> t.type == operatorIdx).count()).isGreaterThanOrEqualTo(2); + + // string present + assertThat(firstLineTokens.stream().filter(t -> t.type == stringIdx).count()).isGreaterThanOrEqualTo(1); + } + + @Test + void customAnnotationWithNamedStringParam_marksIdentifierAsParameter() { + // given + String bsl = String.join("\n", + "&КастомнаяАннотация(Значение = \"Параметр\")", + "Процедура Тест()", + "КонецПроцедуры" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + int decoratorIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Decorator); + int operatorIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Operator); + int stringIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.String); + int paramIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Parameter); + + assertThat(decoratorIdx).isGreaterThanOrEqualTo(0); + assertThat(operatorIdx).isGreaterThanOrEqualTo(0); + assertThat(stringIdx).isGreaterThanOrEqualTo(0); + assertThat(paramIdx).isGreaterThanOrEqualTo(0); + + List firstLineTokens = decode(tokens.getData()).stream().filter(t -> t.line == 0).toList(); + + // one decorator: merged '&КастомнаяАннотация' + assertThat(firstLineTokens.stream().filter(t -> t.type == decoratorIdx).count()).isEqualTo(1); + + // operators for '(' ')' and '=' + assertThat(firstLineTokens.stream().filter(t -> t.type == operatorIdx).count()).isGreaterThanOrEqualTo(3); + + // parameter identifier 'Значение' + assertThat(firstLineTokens.stream().filter(t -> t.type == paramIdx).count()).isGreaterThanOrEqualTo(1); + + // string literal + assertThat(firstLineTokens.stream().filter(t -> t.type == stringIdx).count()).isGreaterThanOrEqualTo(1); + } + + @Test + void useDirective_isNamespace() { + // given: several #Использовать directives + String bsl = String.join("\n", + "#Использовать А", + "#Использовать Б", + "#Использовать В" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + int namespaceIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Namespace); + assertThat(namespaceIdx).isGreaterThanOrEqualTo(0); + + long nsCount = countOfType(tokens.getData(), namespaceIdx); + + // then: each use line produces one Namespace token + assertThat(nsCount).isEqualTo(3); + } + + @Test + void datetimeAndUndefinedTrueFalse_areHighlighted() { + // given: date literal and undefined/boolean literals + String bsl = String.join("\n", + "Процедура T()", + " Дата = '20010101';", + " X = Неопределено;", + " Если Истина Тогда", + " КонецЕсли;", + " Если Ложь Тогда", + " КонецЕсли;", + "КонецПроцедуры" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + int stringIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.String); + int keywordIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Keyword); + assertThat(stringIdx).isGreaterThanOrEqualTo(0); + assertThat(keywordIdx).isGreaterThanOrEqualTo(0); + + long strings = countOfType(tokens.getData(), stringIdx); + long keywords = countOfType(tokens.getData(), keywordIdx); + + // then: at least one string (for DATETIME) and at least three keywords for undefined/true/false + assertThat(strings).isGreaterThanOrEqualTo(1); + + long expectedSpecialLiteralCount = documentContext.getTokensFromDefaultChannel().stream() + .map(Token::getType) + .filter(t -> t == BSLLexer.UNDEFINED || t == BSLLexer.TRUE || t == BSLLexer.FALSE) + .count(); + + assertThat(keywords).isGreaterThanOrEqualTo(expectedSpecialLiteralCount); + } + + @Test + void methodDescriptionComments_areMarkedWithDocumentationModifier() { + // given: leading description comments above a method and a non-doc comment in body + String bsl = String.join("\n", + "// Описание процедуры", + "// Параметры: Парам - Число", + "Процедура ДокТест(Парам)", + " // обычный комментарий", + "КонецПроцедуры" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + int commentIdx = legend.getTokenTypes().indexOf("comment"); + int docModIdx = legend.getTokenModifiers().indexOf("documentation"); + assertThat(commentIdx).isGreaterThanOrEqualTo(0); + assertThat(docModIdx).isGreaterThanOrEqualTo(0); + int docMask = 1 << docModIdx; + + List decoded = decode(tokens.getData()); + // comments on lines 0 and 1 must have documentation modifier; line 3 comment must not + var line0 = decoded.stream().filter(t -> t.line == 0 && t.type == commentIdx).toList(); + var line1 = decoded.stream().filter(t -> t.line == 1 && t.type == commentIdx).toList(); + var line3 = decoded.stream().filter(t -> t.line == 3 && t.type == commentIdx).toList(); + + assertThat(line0).isNotEmpty(); + assertThat(line1).isNotEmpty(); + assertThat(line3).isNotEmpty(); + + assertThat(line0.stream().allMatch(t -> (t.modifiers & docMask) != 0)).isTrue(); + assertThat(line1.stream().allMatch(t -> (t.modifiers & docMask) != 0)).isTrue(); + assertThat(line3.stream().allMatch(t -> (t.modifiers & docMask) == 0)).isTrue(); + } + + @Test + void variableDescriptionLeadingAndTrailing_areMarkedWithDocumentationModifier() { + // given: leading description and trailing description for a variable + String bsl = String.join("\n", + "// Описание переменной", + "Перем Перем1; // трейл" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + int commentIdx = legend.getTokenTypes().indexOf("comment"); + int docModIdx = legend.getTokenModifiers().indexOf("documentation"); + assertThat(commentIdx).isGreaterThanOrEqualTo(0); + assertThat(docModIdx).isGreaterThanOrEqualTo(0); + int docMask = 1 << docModIdx; + + List decoded = decode(tokens.getData()); + + // We expect two comment tokens: line 0 (leading) and line 1 (trailing). Both should have documentation modifier. + var line0 = decoded.stream().filter(t -> t.line == 0 && t.type == commentIdx).toList(); + var line1 = decoded.stream().filter(t -> t.line == 1 && t.type == commentIdx).toList(); + + assertThat(line0).isNotEmpty(); + assertThat(line1).isNotEmpty(); + + assertThat(line0.stream().allMatch(t -> (t.modifiers & docMask) != 0)).isTrue(); + assertThat(line1.stream().allMatch(t -> (t.modifiers & docMask) != 0)).isTrue(); + } + + @Test + void multilineDocumentation_isMergedIntoSingleToken_whenClientSupportsIt() { + // set multilineTokenSupport via ClientCapabilitiesHolder directly + ClientCapabilities caps = new ClientCapabilities(); + TextDocumentClientCapabilities td = new TextDocumentClientCapabilities(); + SemanticTokensCapabilities st = new SemanticTokensCapabilities(); + st.setMultilineTokenSupport(true); + td.setSemanticTokens(st); + caps.setTextDocument(td); + clientCapabilitiesHolder.setCapabilities(caps); + + // given: two-line documentation followed by a method and a body comment + String bsl = String.join("\n", + "// Первая строка описания", + "// Вторая строка описания", + "Процедура ДокТест()", + " // не документация", + "КонецПроцедуры" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + int commentIdx = legend.getTokenTypes().indexOf("comment"); + int docModIdx = legend.getTokenModifiers().indexOf("documentation"); + assertThat(commentIdx).isGreaterThanOrEqualTo(0); + assertThat(docModIdx).isGreaterThanOrEqualTo(0); + int docMask = 1 << docModIdx; + + List decoded = decode(tokens.getData()); + + // then: exactly one documentation comment token exists (merged), starting on line 0 + var docTokens = decoded.stream().filter(t -> t.type == commentIdx && (t.modifiers & docMask) != 0).toList(); + assertThat(docTokens).hasSize(1); + assertThat(docTokens.get(0).line).isEqualTo(0); + + // and there is no comment token on line 1 (second doc line) + var commentsLine1 = decoded.stream().filter(t -> t.line == 1 && t.type == commentIdx).toList(); + assertThat(commentsLine1).isEmpty(); + + // and a regular body comment exists on line 3 without the documentation modifier + var bodyComments = decoded.stream().filter(t -> t.line == 3 && t.type == commentIdx).toList(); + assertThat(bodyComments).isNotEmpty(); + assertThat(bodyComments.stream().allMatch(t -> (t.modifiers & docMask) == 0)).isTrue(); + + // reset capabilities to avoid side-effects on other tests + clientCapabilitiesHolder.setCapabilities(null); + } + + @Test + void regionName_isHighlightedAsVariable() { + // given: region with a name and its end + String bsl = String.join("\n", + "#Область МояСекция", + "Процедура Тест()\nКонецПроцедуры", + "#КонецОбласти" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + int nsIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Namespace); + int varIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Variable); + assertThat(nsIdx).isGreaterThanOrEqualTo(0); + assertThat(varIdx).isGreaterThanOrEqualTo(0); + + List decoded = decode(tokens.getData()); + + // then: one Namespace token for region start and one for region end, and one Variable on line 0 for the name + long nsOnLine0 = decoded.stream().filter(t -> t.line == 0 && t.type == nsIdx).count(); + long nsOnLastLine = decoded.stream().filter(t -> t.line == 3 && t.type == nsIdx).count(); + long varsOnLine0 = decoded.stream().filter(t -> t.line == 0 && t.type == varIdx).count(); + + assertThat(nsOnLine0).isEqualTo(1); + assertThat(nsOnLastLine).isEqualTo(1); + assertThat(varsOnLine0).isEqualTo(1); + } + + @Test + void variableDefinition_hasDefinitionModifier() { + // given: module-level variable declaration + String bsl = String.join("\n", + "Перем Перем1;", + "Процедура T()", + " // тело", + "КонецПроцедуры" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + int varIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Variable); + int defModIdx = legend.getTokenModifiers().indexOf("definition"); + assertThat(varIdx).isGreaterThanOrEqualTo(0); + assertThat(defModIdx).isGreaterThanOrEqualTo(0); + int defMask = 1 << defModIdx; + + // then: at least one Variable token has the definition modifier (for Перем1) + List decoded = decode(tokens.getData()); + long defs = decoded.stream() + .filter(t -> t.type == varIdx) + .filter(t -> (t.modifiers & defMask) != 0) + .count(); + + assertThat(defs).isGreaterThanOrEqualTo(1); + } + + @Test + void sameFileMethodCall_isHighlightedAsMethodTokenAtCallSite() { + // given: a method and a call to another method in the same file + String bsl = String.join("\n", + "Процедура Бар()", + " CallMe();", + "КонецПроцедуры" + ); + + DocumentContext documentContext = TestUtils.getDocumentContext(bsl); + TextDocumentIdentifier textDocumentIdentifier = TestUtils.getTextDocumentIdentifier(documentContext.getUri()); + + // compute selection range for 'CallMe' on line 1 + int callLine = 1; + int callStart = bsl.split("\n")[callLine].indexOf("CallMe"); + Range callRange = new Range(new Position(callLine, callStart), new Position(callLine, callStart + "CallMe".length())); + + // mock a same-file reference pointing to a method symbol owned by this document + Reference ref = mock(Reference.class, RETURNS_DEEP_STUBS); + MethodSymbol toSymbol = MethodSymbol.builder() + .name("CallMe") + .owner(documentContext) + .function(false) + .range(new Range(new Position(0, 0), new Position(0, 0))) + .subNameRange(new Range(new Position(0, 0), new Position(0, 0))) + .build(); + + when(ref.isSourceDefinedSymbolReference()).thenReturn(true); + when(ref.getSourceDefinedSymbol()).thenReturn(java.util.Optional.of(toSymbol)); + when(ref.getSelectionRange()).thenReturn(callRange); + + when(referenceIndex.getReferencesFrom(documentContext.getUri(), SymbolKind.Method)) + .thenReturn(List.of(ref)); + + // when + SemanticTokens tokens = provider.getSemanticTokensFull(documentContext, new SemanticTokensParams(textDocumentIdentifier)); + + int methodIdx = legend.getTokenTypes().indexOf(SemanticTokenTypes.Method); + assertThat(methodIdx).isGreaterThanOrEqualTo(0); + + // then: there is a Method token on the call line (line 1) + List decoded = decode(tokens.getData()); + long methodsOnCallLine = decoded.stream().filter(t -> t.line == callLine && t.type == methodIdx).count(); + assertThat(methodsOnCallLine).isGreaterThanOrEqualTo(1); + } + + // helpers + private record DecodedToken(int line, int start, int length, int type, int modifiers) {} + + private List decode(List data) { + List out = new ArrayList<>(); + int line = 0; + int start = 0; + for (int i = 0; i + 4 < data.size(); i += 5) { + int dLine = data.get(i); + int dStart = data.get(i + 1); + int length = data.get(i + 2); + int type = data.get(i + 3); + int mods = data.get(i + 4); + line = line + dLine; + start = (dLine == 0) ? start + dStart : dStart; + out.add(new DecodedToken(line, start, length, type, mods)); + } + return out; + } + + private Set indexesOfTypes(List data) { + // data: [deltaLine, deltaStart, length, tokenType, tokenModifiers] per token + Set res = new HashSet<>(); + for (int i = 0; i + 3 < data.size(); i += 5) { + res.add(data.get(i + 3)); + } + return res; + } + + private long countOfType(List data, int typeIdx) { + long cnt = 0; + for (int i = 0; i + 3 < data.size(); i += 5) { + if (data.get(i + 3) == typeIdx) cnt++; + } + return cnt; + } + + private void assertPresent(Set presentTypes, String tokenType) { + int idx = legend.getTokenTypes().indexOf(tokenType); + assertThat(idx).isGreaterThanOrEqualTo(0); + assertThat(presentTypes).contains(idx); + } +}