From 1718ce9e97f42d14a401f9ac795ce0b67512ee21 Mon Sep 17 00:00:00 2001 From: Mark Sujew Date: Mon, 17 Jul 2023 16:09:47 +0200 Subject: [PATCH] Support completion for grammar imports (#1106) --- .../src/grammar/langium-grammar-module.ts | 4 +- .../lsp/grammar-completion-provider.ts | 93 ++++++ .../src/lsp/completion/completion-provider.ts | 273 ++++++++++-------- packages/langium/src/utils/uri-util.ts | 6 +- .../test/lsp/completion-provider.test.ts | 51 +++- 5 files changed, 305 insertions(+), 122 deletions(-) create mode 100644 packages/langium/src/grammar/lsp/grammar-completion-provider.ts diff --git a/packages/langium/src/grammar/langium-grammar-module.ts b/packages/langium/src/grammar/langium-grammar-module.ts index 922dcd551..89331f040 100644 --- a/packages/langium/src/grammar/langium-grammar-module.ts +++ b/packages/langium/src/grammar/langium-grammar-module.ts @@ -26,6 +26,7 @@ import { LangiumGrammarValidationResourcesCollector } from './validation/validat import { LangiumGrammarTypesValidator, registerTypeValidationChecks } from './validation/types-validator'; import { interruptAndCheck } from '../utils/promise-util'; import { DocumentState } from '../workspace/documents'; +import { LangiumGrammarCompletionProvider } from './lsp/grammar-completion-provider'; export type LangiumGrammarAddedServices = { validation: { @@ -49,7 +50,8 @@ export const LangiumGrammarModule: Module new LangiumGrammarSemanticTokenProvider(services), Formatter: () => new LangiumGrammarFormatter(), DefinitionProvider: (services) => new LangiumGrammarDefinitionProvider(services), - CallHierarchyProvider: (services) => new LangiumGrammarCallHierarchyProvider(services) + CallHierarchyProvider: (services) => new LangiumGrammarCallHierarchyProvider(services), + CompletionProvider: (services) => new LangiumGrammarCompletionProvider(services) }, references: { ScopeComputation: (services) => new LangiumGrammarScopeComputation(services), diff --git a/packages/langium/src/grammar/lsp/grammar-completion-provider.ts b/packages/langium/src/grammar/lsp/grammar-completion-provider.ts new file mode 100644 index 000000000..2a6a49ff9 --- /dev/null +++ b/packages/langium/src/grammar/lsp/grammar-completion-provider.ts @@ -0,0 +1,93 @@ +/****************************************************************************** + * Copyright 2023 TypeFox GmbH + * This program and the accompanying materials are made available under the + * terms of the MIT License, which is available in the project root. + ******************************************************************************/ + +import type { Range } from 'vscode-languageserver-types'; +import { CompletionItemKind } from 'vscode-languageserver-types'; +import type { NextFeature } from '../../lsp/completion/follow-element-computation'; +import type { CompletionAcceptor, CompletionContext } from '../../lsp/completion/completion-provider'; +import { DefaultCompletionProvider } from '../../lsp/completion/completion-provider'; +import type { LangiumServices } from '../../services'; +import type { MaybePromise } from '../../utils/promise-util'; +import { getContainerOfType } from '../../utils/ast-util'; +import { equalURI, relativeURI } from '../../utils/uri-util'; +import type { LangiumDocument, LangiumDocuments } from '../../workspace'; +import type { AbstractElement } from '../generated/ast'; +import { isAssignment } from '../generated/ast'; +import { Utils } from 'vscode-uri'; + +export class LangiumGrammarCompletionProvider extends DefaultCompletionProvider { + + private readonly documents: () => LangiumDocuments; + + constructor(services: LangiumServices) { + super(services); + this.documents = () => services.shared.workspace.LangiumDocuments; + } + + protected override completionFor(context: CompletionContext, next: NextFeature, acceptor: CompletionAcceptor): MaybePromise { + const assignment = getContainerOfType(next.feature, isAssignment); + if (assignment?.feature === 'path') { + this.completeImportPath(context, acceptor); + } else { + return super.completionFor(context, next, acceptor); + } + } + + private completeImportPath(context: CompletionContext, acceptor: CompletionAcceptor): void { + const text = context.textDocument.getText(); + const existingText = text.substring(context.tokenOffset, context.offset); + let allPaths = this.getAllFiles(context.document); + let range: Range = { + start: context.position, + end: context.position + }; + if (existingText.length > 0) { + const existingPath = existingText.substring(1); + allPaths = allPaths.filter(path => path.startsWith(existingPath)); + // Completely replace the current token + const start = context.textDocument.positionAt(context.tokenOffset + 1); + const end = context.textDocument.positionAt(context.tokenEndOffset - 1); + range = { + start, + end + }; + } + for (const path of allPaths) { + // Only insert quotes if there is no `path` token yet. + const delimiter = existingText.length > 0 ? '' : '"'; + const completionValue = `${delimiter}${path}${delimiter}`; + acceptor(context, { + label: path, + textEdit: { + newText: completionValue, + range + }, + kind: CompletionItemKind.File, + sortText: '0' + }); + } + } + + private getAllFiles(document: LangiumDocument): string[] { + const documents = this.documents().all; + const uri = document.uri.toString(); + const dirname = Utils.dirname(document.uri).toString(); + const paths: string[] = []; + for (const doc of documents) { + if (!equalURI(doc.uri, uri)) { + const docUri = doc.uri.toString(); + const uriWithoutExt = docUri.substring(0, docUri.length - Utils.extname(doc.uri).length); + let relativePath = relativeURI(dirname, uriWithoutExt); + if (!relativePath.startsWith('.')) { + relativePath = `./${relativePath}`; + } + paths.push(relativePath); + } + } + return paths; + } + +} diff --git a/packages/langium/src/lsp/completion/completion-provider.ts b/packages/langium/src/lsp/completion/completion-provider.ts index 36e6df72a..5d62fca84 100644 --- a/packages/langium/src/lsp/completion/completion-provider.ts +++ b/packages/langium/src/lsp/completion/completion-provider.ts @@ -6,7 +6,6 @@ import type { CancellationToken, CompletionItem, CompletionParams } from 'vscode-languageserver'; import type { TextDocument, TextEdit } from 'vscode-languageserver-textdocument'; -import type { GrammarConfig } from '../../grammar/grammar-config'; import type { LangiumCompletionParser } from '../../parser/langium-parser'; import type { NameProvider } from '../../references/name-provider'; import type { ScopeProvider } from '../../references/scope-provider'; @@ -17,6 +16,8 @@ import type { LangiumDocument } from '../../workspace/documents'; import type { NextFeature } from './follow-element-computation'; import type { NodeKindProvider } from '../node-kind-provider'; import type { FuzzyMatcher } from '../fuzzy-matcher'; +import type { Lexer } from '../../parser/lexer'; +import type { IToken } from 'chevrotain'; import { CompletionItemKind, CompletionList, Position } from 'vscode-languageserver'; import * as ast from '../../grammar/generated/ast'; import { getExplicitRuleType } from '../../grammar/internal-grammar-util'; @@ -26,7 +27,7 @@ import { getEntryRule } from '../../utils/grammar-util'; import { stream } from '../../utils/stream'; import { findFirstFeatures, findNextFeatures } from './follow-element-computation'; -export type CompletionAcceptor = (value: CompletionValueItem) => void +export type CompletionAcceptor = (context: CompletionContext, value: CompletionValueItem) => void export type CompletionValueItem = ({ label?: string @@ -40,6 +41,21 @@ export interface CompletionContext { node?: AstNode document: LangiumDocument textDocument: TextDocument + features: NextFeature[] + /** + * Index at the start of the token related to this context. + * If the context performs completion for a token that doesn't exist yet, it is equal to the `offset`. + */ + tokenOffset: number + /** + * Index at the end of the token related to this context, even if it is behind the cursor position. + * Points at the first character after the last token. + * If the context performs completion for a token that doesn't exist yet, it is equal to the `offset`. + */ + tokenEndOffset: number + /** + * Index of the requested completed position. + */ offset: number position: Position } @@ -66,6 +82,13 @@ export interface CompletionProviderOptions { allCommitCharacters?: string[]; } +export interface CompletionBacktrackingInformation { + previousTokenStart?: number; + previousTokenEnd?: number; + nextTokenStart: number; + nextTokenEnd: number; +} + export function mergeCompletionProviderOptions(options: Array): CompletionProviderOptions { const triggerCharacters = Array.from(new Set(options.flatMap(option => option?.triggerCharacters ?? []))); const allCommitCharacters = Array.from(new Set(options.flatMap(option => option?.allCommitCharacters ?? []))); @@ -100,7 +123,7 @@ export class DefaultCompletionProvider implements CompletionProvider { protected readonly scopeProvider: ScopeProvider; protected readonly grammar: ast.Grammar; protected readonly nameProvider: NameProvider; - protected readonly grammarConfig: GrammarConfig; + protected readonly lexer: Lexer; protected readonly nodeKindProvider: NodeKindProvider; protected readonly fuzzyMatcher: FuzzyMatcher; @@ -109,73 +132,22 @@ export class DefaultCompletionProvider implements CompletionProvider { this.grammar = services.Grammar; this.completionParser = services.parser.CompletionParser; this.nameProvider = services.references.NameProvider; - this.grammarConfig = services.parser.GrammarConfig; + this.lexer = services.parser.Lexer; this.nodeKindProvider = services.shared.lsp.NodeKindProvider; this.fuzzyMatcher = services.shared.lsp.FuzzyMatcher; } async getCompletion(document: LangiumDocument, params: CompletionParams): Promise { - const root = document.parseResult.value; - const cst = root.$cstNode; - if (!cst) { - return undefined; - } const items: CompletionItem[] = []; - const textDocument = document.textDocument; - const text = textDocument.getText(); - const offset = textDocument.offsetAt(params.position); - const acceptor: CompletionAcceptor = value => { - const completionItem = this.fillCompletionItem(textDocument, offset, value); + const contexts = this.buildContexts(document, params.position); + + const acceptor: CompletionAcceptor = (context, value) => { + const completionItem = this.fillCompletionItem(context, value); if (completionItem) { items.push(completionItem); } }; - const lastTokenOffset = this.backtrackToAnyToken(text, offset); - - const astNode = findLeafNodeAtOffset(cst, lastTokenOffset)?.element; - - const context: CompletionContext = { - document, - textDocument, - node: astNode, - offset, - position: params.position - }; - - if (!astNode) { - const parserRule = getEntryRule(this.grammar)!; - await this.completionForRule(context, parserRule, acceptor); - return CompletionList.create(this.deduplicateItems(items), true); - } - - const contexts: CompletionContext[] = [context]; - - // In some cases, the completion depends on the concrete AST node at the current position - // Often times, it is not clear whether the completion is related to the ast node at the current cursor position - // (i.e. associated with the character right after the cursor), or the one before the cursor. - // If these are different AST nodes, then we perform the completion twice - if (lastTokenOffset === offset && lastTokenOffset > 0) { - const previousAstNode = findLeafNodeAtOffset(cst, lastTokenOffset - 1)?.element; - if (previousAstNode !== astNode) { - contexts.push({ - document, - textDocument, - node: previousAstNode, - offset, - position: params.position - }); - } - } - - const parserStart = this.backtrackToTokenStart(text, offset); - const beforeFeatures = this.findFeaturesAt(textDocument, parserStart); - let afterFeatures: NextFeature[] = []; - const reparse = this.canReparse() && offset !== parserStart; - if (reparse) { - afterFeatures = this.findFeaturesAt(textDocument, offset); - } - const distinctionFunction = (element: NextFeature) => { if (ast.isKeyword(element.feature)) { return element.feature.value; @@ -184,19 +156,20 @@ export class DefaultCompletionProvider implements CompletionProvider { } }; - await Promise.all( - stream(beforeFeatures) - .distinct(distinctionFunction) - .map(e => this.completionForContexts(contexts, e, acceptor)) - ); - - if (reparse) { + const completedFeatures: NextFeature[] = []; + for (const context of contexts) { await Promise.all( - stream(afterFeatures) - .exclude(beforeFeatures, distinctionFunction) + stream(context.features) .distinct(distinctionFunction) - .map(e => this.completionForContexts(contexts, e, acceptor)) + .exclude(completedFeatures) + .map(e => this.completionFor(context, e, acceptor)) ); + // Do not try to complete the same feature multiple times + completedFeatures.push(...context.features); + // We might want to stop computing completion results + if (!this.continueCompletion(items)) { + break; + } } return CompletionList.create(this.deduplicateItems(items), true); @@ -206,22 +179,12 @@ export class DefaultCompletionProvider implements CompletionProvider { * The completion algorithm could yield the same reference/keyword multiple times. * * This methods deduplicates these items afterwards before returning to the client. - * Unique items are identified as a combination of `kind`, `label` and `detail` + * Unique items are identified as a combination of `kind`, `label` and `detail`. */ protected deduplicateItems(items: CompletionItem[]): CompletionItem[] { return stream(items).distinct(item => `${item.kind}_${item.label}_${item.detail}`).toArray(); } - /** - * Determines whether the completion parser will reparse the input at the point of completion. - * By default, this returns `false`, indicating that the completion will only look for completion results starting from the token at the cursor position. - * Override this and return `true` to indicate that the completion should parse the input a second time. - * This might add some missing completions at the cost at parsing the input twice. - */ - protected canReparse(): boolean { - return false; - } - protected findFeaturesAt(document: TextDocument, offset: number): NextFeature[] { const text = document.getText({ start: Position.create(0, 0), @@ -251,27 +214,116 @@ export class DefaultCompletionProvider implements CompletionProvider { return features; } - protected backtrackToAnyToken(text: string, offset: number): number { - if (offset >= text.length) { - offset = text.length - 1; + protected *buildContexts(document: LangiumDocument, position: Position): IterableIterator { + const cst = document.parseResult.value.$cstNode; + if (!cst) { + return; + } + const textDocument = document.textDocument; + const text = textDocument.getText(); + const offset = textDocument.offsetAt(position); + const { nextTokenStart, nextTokenEnd, previousTokenStart, previousTokenEnd } = this.backtrackToAnyToken(text, offset); + const partialContext = { + document, + textDocument, + offset, + position + }; + let astNode: AstNode | undefined; + if (previousTokenStart !== undefined && previousTokenEnd !== undefined && previousTokenEnd === offset) { + astNode = findLeafNodeAtOffset(cst, previousTokenStart)?.element; + const previousTokenFeatures = this.findFeaturesAt(textDocument, previousTokenStart); + yield { + ...partialContext, + node: astNode, + tokenOffset: previousTokenStart, + tokenEndOffset: previousTokenEnd, + features: previousTokenFeatures, + }; } - while (offset > 0 && /\s/.test(text.charAt(offset))) { - offset--; + astNode = findLeafNodeAtOffset(cst, nextTokenStart)?.element + ?? (previousTokenStart === undefined ? undefined : findLeafNodeAtOffset(cst, previousTokenStart)?.element); + + if (!astNode) { + const parserRule = getEntryRule(this.grammar)!; + const firstFeatures = findFirstFeatures(parserRule.definition); + yield { + ...partialContext, + tokenOffset: nextTokenStart, + tokenEndOffset: nextTokenEnd, + features: firstFeatures + }; + } else { + const nextTokenFeatures = this.findFeaturesAt(textDocument, nextTokenStart); + yield { + ...partialContext, + node: astNode, + tokenOffset: nextTokenStart, + tokenEndOffset: nextTokenEnd, + features: nextTokenFeatures, + }; } - return offset; } - protected backtrackToTokenStart(text: string, offset: number): number { - if (offset < 1) { - return offset; + /** + * Indicates whether the completion should continue to process the next completion context. + * + * The default implementation continues the completion only if there are currently no proposed completion items. + */ + protected continueCompletion(items: CompletionItem[]): boolean { + return items.length === 0; + } + + /** + * This method returns two sets of token offset information. + * + * The `nextToken*` offsets are related to the token at the cursor position. + * If there is none, both offsets are simply set to `offset`. + * + * The `previousToken*` offsets are related to the last token before the current token at the cursor position. + * They are `undefined`, if there is no token before the cursor position. + */ + protected backtrackToAnyToken(text: string, offset: number): CompletionBacktrackingInformation { + const tokens = this.lexer.tokenize(text).tokens; + if (tokens.length === 0) { + // If we don't have any tokens in our document, just return the offset position + return { + nextTokenStart: offset, + nextTokenEnd: offset + }; } - const wordRegex = this.grammarConfig.nameRegexp; - let lastCharacter = text.charAt(offset - 1); - while (offset > 0 && wordRegex.test(lastCharacter)) { - offset--; - lastCharacter = text.charAt(offset - 1); + let previousToken: IToken | undefined; + for (const token of tokens) { + if (token.startOffset >= offset) { + // We are between two tokens + // Return the current offset as the next token index + return { + nextTokenStart: offset, + nextTokenEnd: offset, + previousTokenStart: previousToken ? previousToken.startOffset : undefined, + previousTokenEnd: previousToken ? previousToken.endOffset! + 1 : undefined + }; + } + if (token.endOffset! >= offset) { + // We are within a token + // Return the current and previous token offsets as normal + return { + nextTokenStart: token.startOffset, + nextTokenEnd: token.endOffset! + 1, + previousTokenStart: previousToken ? previousToken.startOffset : undefined, + previousTokenEnd: previousToken ? previousToken.endOffset! + 1 : undefined + }; + } + previousToken = token; } - return offset; + // We have run into the end of the file + // Return the current offset as the next token index + return { + nextTokenStart: offset, + nextTokenEnd: offset, + previousTokenStart: previousToken ? previousToken.startOffset : undefined, + previousTokenEnd: previousToken ? previousToken.endOffset! + 1 : undefined + }; } protected async completionForRule(context: CompletionContext, rule: ast.AbstractRule, acceptor: CompletionAcceptor): Promise { @@ -281,12 +333,6 @@ export class DefaultCompletionProvider implements CompletionProvider { } } - protected async completionForContexts(contexts: CompletionContext[], next: NextFeature, acceptor: CompletionAcceptor): Promise { - for (const context of contexts) { - await this.completionFor(context, next, acceptor); - } - } - protected completionFor(context: CompletionContext, next: NextFeature, acceptor: CompletionAcceptor): MaybePromise { if (ast.isKeyword(next.feature)) { return this.completionForKeyword(context, next.feature, acceptor); @@ -319,7 +365,7 @@ export class DefaultCompletionProvider implements CompletionProvider { const duplicateStore = new Set(); scope.getAllElements().forEach(e => { if (!duplicateStore.has(e.name) && this.filterCrossReference(e)) { - acceptor(this.createReferenceCompletionItem(e)); + acceptor(context, this.createReferenceCompletionItem(e)); duplicateStore.add(e.name); } }); @@ -331,7 +377,7 @@ export class DefaultCompletionProvider implements CompletionProvider { /** * Override this method to change how reference completion items are created. - * Most notably useful to change the `kind` property which indicates which icon to display on the client. + * To change the `kind` of a completion item, override the `NodeKindProvider` service instead. * * @param nodeDescription The description of a reference candidate * @returns A partial completion item @@ -354,7 +400,7 @@ export class DefaultCompletionProvider implements CompletionProvider { if (!keyword.value.match(/[\w]/)) { return; } - acceptor({ + acceptor(context, { label: keyword.value, kind: CompletionItemKind.Keyword, detail: 'Keyword', @@ -362,7 +408,7 @@ export class DefaultCompletionProvider implements CompletionProvider { }); } - protected fillCompletionItem(document: TextDocument, offset: number, item: CompletionValueItem): CompletionItem | undefined { + protected fillCompletionItem(context: CompletionContext, item: CompletionValueItem): CompletionItem | undefined { let label: string; if (typeof item.label === 'string') { label = item.label; @@ -385,7 +431,7 @@ export class DefaultCompletionProvider implements CompletionProvider { } else { insertText = label; } - const textEdit = item.textEdit ?? this.buildCompletionTextEdit(document, offset, label, insertText); + const textEdit = item.textEdit ?? this.buildCompletionTextEdit(context, label, insertText); if (!textEdit) { return undefined; } @@ -413,13 +459,12 @@ export class DefaultCompletionProvider implements CompletionProvider { return completionItem; } - protected buildCompletionTextEdit(document: TextDocument, offset: number, label: string, newText: string): TextEdit | undefined { - const content = document.getText(); - const tokenStart = this.backtrackToTokenStart(content, offset); - const identifier = content.substring(tokenStart, offset); + protected buildCompletionTextEdit(context: CompletionContext, label: string, newText: string): TextEdit | undefined { + const content = context.textDocument.getText(); + const identifier = content.substring(context.tokenOffset, context.offset); if (this.fuzzyMatcher.match(identifier, label)) { - const start = document.positionAt(tokenStart); - const end = document.positionAt(offset); + const start = context.textDocument.positionAt(context.tokenOffset); + const end = context.position; return { newText, range: { @@ -431,8 +476,4 @@ export class DefaultCompletionProvider implements CompletionProvider { return undefined; } } - - protected isWordCharacterAt(content: string, index: number): boolean { - return this.grammarConfig.nameRegexp.test(content.charAt(index)); - } } diff --git a/packages/langium/src/utils/uri-util.ts b/packages/langium/src/utils/uri-util.ts index 8f1975b05..1ef323c46 100644 --- a/packages/langium/src/utils/uri-util.ts +++ b/packages/langium/src/utils/uri-util.ts @@ -10,9 +10,9 @@ export function equalURI(a?: URI | string, b?: URI | string): boolean { return a?.toString() === b?.toString(); } -export function relativeURI(from: URI, to: URI): string { - const fromPath = from.path; - const toPath = to.path; +export function relativeURI(from: URI | string, to: URI | string): string { + const fromPath = typeof from === 'string' ? from : from.path; + const toPath = typeof to === 'string' ? to : to.path; const fromParts = fromPath.split('/').filter(e => e.length > 0); const toParts = toPath.split('/').filter(e => e.length > 0); let i = 0; diff --git a/packages/langium/test/lsp/completion-provider.test.ts b/packages/langium/test/lsp/completion-provider.test.ts index e105a3255..55e1b78b7 100644 --- a/packages/langium/test/lsp/completion-provider.test.ts +++ b/packages/langium/test/lsp/completion-provider.test.ts @@ -4,9 +4,9 @@ * terms of the MIT License, which is available in the project root. ******************************************************************************/ -import { describe, test } from 'vitest'; +import { beforeEach, describe, test } from 'vitest'; import { createLangiumGrammarServices, createServicesForGrammar, EmptyFileSystem } from '../../src'; -import { expectCompletion } from '../../src/test'; +import { clearDocuments, expectCompletion, parseHelper } from '../../src/test'; describe('Langium completion provider', () => { @@ -155,3 +155,50 @@ describe('Completion within alternatives', () => { }); }); }); + +describe('Path import completion', () => { + + const grammarServices = createLangiumGrammarServices(EmptyFileSystem).grammar; + const completion = expectCompletion(grammarServices); + const parse = parseHelper(grammarServices); + let path = ''; + let text = ''; + + beforeEach(async () => { + await clearDocuments(grammarServices); + const importedGrammar = "terminal ID: 'ID';"; + const document = await parse(importedGrammar); + path = document.uri.path.substring(1); + path = './' + path.substring(0, path.length - '.langium'.length); + text = ` + grammar g + import<|> <|>"<|>./<|>" + entry Main: value=ID; + `; + }); + + const testNames = [ + 'Completes import before string start', + 'Completes import inside of string', + 'Completes import at the end of string' + ]; + + for (let i = 0; i < 3; i++) { + const index = i + 1; + test(testNames[i], async () => { + await completion({ + text, + index, + expectedItems: [path] + }); + }); + } + + test('Completes import keyword as expected', async () => { + await completion({ + text, + index: 0, + expectedItems: ['import'] + }); + }); +});