From 33fc6647d4a655d305500410c15e23d1a3baf9e3 Mon Sep 17 00:00:00 2001 From: Quinn Slack Date: Fri, 28 Jul 2023 22:30:03 -0700 Subject: [PATCH] rm completions cache --- lib/shared/src/configuration.ts | 1 - vscode/package.json | 5 - vscode/src/completions/cache.test.ts | 97 ---------------- vscode/src/completions/cache.ts | 104 ----------------- .../src/completions/request-manager.test.ts | 25 +---- vscode/src/completions/request-manager.ts | 106 +----------------- ...vscodeInlineCompletionItemProvider.test.ts | 21 +--- .../vscodeInlineCompletionItemProvider.ts | 78 +------------ vscode/src/configuration.test.ts | 4 - vscode/src/configuration.ts | 1 - vscode/src/main.ts | 2 - .../test/completions/completions-dataset.ts | 2 - .../run-code-completions-on-dataset.ts | 1 - 13 files changed, 12 insertions(+), 435 deletions(-) delete mode 100644 vscode/src/completions/cache.test.ts delete mode 100644 vscode/src/completions/cache.ts diff --git a/lib/shared/src/configuration.ts b/lib/shared/src/configuration.ts index 076a28ae14b6..efc29a398e09 100644 --- a/lib/shared/src/configuration.ts +++ b/lib/shared/src/configuration.ts @@ -24,7 +24,6 @@ export interface Configuration { | 'unstable-azure-openai' autocompleteAdvancedServerEndpoint: string | null autocompleteAdvancedAccessToken: string | null - autocompleteAdvancedCache: boolean autocompleteAdvancedEmbeddings: boolean autocompleteExperimentalCompleteSuggestWidgetSelection?: boolean pluginsEnabled?: boolean diff --git a/vscode/package.json b/vscode/package.json index 7c54db4d3910..7740c067f0ae 100644 --- a/vscode/package.json +++ b/vscode/package.json @@ -885,11 +885,6 @@ "type": "string", "markdownDescription": "Overwrite the access token used for code autocomplete. This is only supported with a provider other than `anthropic`." }, - "cody.autocomplete.advanced.cache": { - "type": "boolean", - "default": true, - "markdownDescription": "Enables caching of code autocomplete." - }, "cody.autocomplete.advanced.embeddings": { "order": 99, "type": "boolean", diff --git a/vscode/src/completions/cache.test.ts b/vscode/src/completions/cache.test.ts deleted file mode 100644 index a78879ec9b8c..000000000000 --- a/vscode/src/completions/cache.test.ts +++ /dev/null @@ -1,97 +0,0 @@ -import { describe, expect, it } from 'vitest' - -import { CompletionsCache } from './cache' - -describe('CompletionsCache', () => { - it('returns the cached completion items', () => { - const cache = new CompletionsCache() - cache.add('id1', [{ prefix: 'foo\n', content: 'bar' }]) - - expect(cache.get('foo\n')).toEqual({ - logId: 'id1', - isExactPrefix: true, - completions: [{ prefix: 'foo\n', content: 'bar' }], - }) - }) - - it('returns the cached items when the prefix includes characters from the completion', () => { - const cache = new CompletionsCache() - cache.add('id1', [{ prefix: 'foo\n', content: 'bar' }]) - - expect(cache.get('foo\nb')).toEqual({ - logId: 'id1', - isExactPrefix: false, - completions: [{ prefix: 'foo\nb', content: 'ar' }], - }) - expect(cache.get('foo\nba')).toEqual({ - logId: 'id1', - isExactPrefix: false, - completions: [{ prefix: 'foo\nba', content: 'r' }], - }) - }) - - it('trims trailing whitespace on empty line', () => { - const cache = new CompletionsCache() - cache.add('id1', [{ prefix: 'foo \n ', content: 'bar' }]) - - expect(cache.get('foo \n ', true)).toEqual({ - logId: 'id1', - isExactPrefix: false, - completions: [{ prefix: 'foo \n ', content: 'bar' }], - }) - expect(cache.get('foo \n ', true)).toEqual({ - logId: 'id1', - isExactPrefix: false, - completions: [{ prefix: 'foo \n ', content: 'bar' }], - }) - expect(cache.get('foo \n', true)).toEqual({ - logId: 'id1', - isExactPrefix: false, - completions: [{ prefix: 'foo \n', content: 'bar' }], - }) - expect(cache.get('foo ', true)).toEqual(undefined) - }) - - it('does not trim trailing whitespace on non-empty line', () => { - const cache = new CompletionsCache() - cache.add('id1', [{ prefix: 'foo', content: 'bar' }]) - - expect(cache.get('foo', true)).toEqual({ - logId: 'id1', - isExactPrefix: true, - completions: [{ prefix: 'foo', content: 'bar' }], - }) - expect(cache.get('foo ', true)).toEqual(undefined) - expect(cache.get('foo ', true)).toEqual(undefined) - expect(cache.get('foo \n', true)).toEqual(undefined) - expect(cache.get('foo\n', true)).toEqual(undefined) - expect(cache.get('foo\t', true)).toEqual(undefined) - }) - - it('has a lookup function for untrimmed prefixes', () => { - const cache = new CompletionsCache() - cache.add('id1', [{ prefix: 'foo\n ', content: 'baz' }]) - - expect(cache.get('foo\n ', false)).toEqual({ - logId: 'id1', - isExactPrefix: true, - completions: [ - { - prefix: 'foo\n ', - content: 'baz', - }, - ], - }) - expect(cache.get('foo\n ', false)).toEqual(undefined) - }) - - it('updates the log id for all cached entries', () => { - const cache = new CompletionsCache() - cache.add('id1', [{ prefix: 'foo \n ', content: 'bar' }]) - cache.updateLogId('id1', 'id2') - - expect(cache.get('foo \n ', true)?.logId).toBe('id2') - expect(cache.get('foo \n ', true)?.logId).toBe('id2') - expect(cache.get('foo \n', true)?.logId).toBe('id2') - }) -}) diff --git a/vscode/src/completions/cache.ts b/vscode/src/completions/cache.ts deleted file mode 100644 index 2b3396fc260d..000000000000 --- a/vscode/src/completions/cache.ts +++ /dev/null @@ -1,104 +0,0 @@ -import { LRUCache } from 'lru-cache' - -import { trimEndOnLastLineIfWhitespaceOnly } from './text-processing' -import { Completion } from './types' - -export interface CachedCompletions { - logId: string - isExactPrefix: boolean - completions: Completion[] -} - -export class CompletionsCache { - private cache = new LRUCache({ - max: 500, // Maximum input prefixes in the cache. - }) - - public clear(): void { - this.cache.clear() - } - - // TODO: The caching strategy only takes the file content prefix into - // account. We need to add additional information like file path or suffix - // to make sure the cache does not return undesired results for other files - // in the same project. - public get(prefix: string, trim: boolean = true): CachedCompletions | undefined { - const trimmedPrefix = trim ? trimEndOnLastLineIfWhitespaceOnly(prefix) : prefix - const result = this.cache.get(trimmedPrefix) - - if (!result) { - return undefined - } - - const completions = result.completions.map(completion => { - if (trimmedPrefix.length === trimEndOnLastLineIfWhitespaceOnly(completion.prefix).length) { - return { ...completion, prefix, content: completion.content } - } - - // Cached results can be created by appending characters from a - // recommendation from a smaller input prompt. If that's the - // case, we need to slightly change the content and remove - // characters that are now part of the prefix. - const sliceChars = prefix.length - completion.prefix.length - return { - ...completion, - prefix, - content: completion.content.slice(sliceChars), - } - }) - - return { - ...result, - completions, - } - } - - public add(logId: string, completions: Completion[]): void { - for (const completion of completions) { - // Cache the exact prefix first and then append characters from the - // completion one after the other until the first line is exceeded. - // - // If the completion starts with a `\n`, this logic will append the - // second line instead. - let maxCharsAppended = completion.content.indexOf('\n', completion.content.at(0) === '\n' ? 1 : 0) - if (maxCharsAppended === -1) { - maxCharsAppended = completion.content.length - } - - // We also cache the completion with the exact (= untrimmed) prefix - // for the separate lookup mode used for deletions - if (trimEndOnLastLineIfWhitespaceOnly(completion.prefix) !== completion.prefix) { - this.insertCompletion(completion.prefix, logId, completion, true) - } - - for (let i = 0; i <= maxCharsAppended; i++) { - const key = trimEndOnLastLineIfWhitespaceOnly(completion.prefix) + completion.content.slice(0, i) - this.insertCompletion(key, logId, completion, key === completion.prefix) - } - } - } - - public updateLogId(oldLogId: string, newLogId: string): void { - const entries = this.cache.values() - for (const value of entries) { - if (value && 'logId' in value && value.logId === oldLogId) { - value.logId = newLogId - } - } - } - - private insertCompletion(key: string, logId: string, completion: Completion, isExactPrefix: boolean): void { - let existingCompletions: Completion[] = [] - if (this.cache.has(key)) { - existingCompletions = this.cache.get(key)!.completions - } - - const cachedCompletion: CachedCompletions = { - logId, - isExactPrefix, - completions: existingCompletions.concat(completion), - } - - this.cache.set(key, cachedCompletion) - } -} diff --git a/vscode/src/completions/request-manager.test.ts b/vscode/src/completions/request-manager.test.ts index e3921228b366..0b2400f498cb 100644 --- a/vscode/src/completions/request-manager.test.ts +++ b/vscode/src/completions/request-manager.test.ts @@ -2,7 +2,6 @@ import { beforeEach, describe, expect, it, vi } from 'vitest' import { vsCodeMocks } from '../testutils/mocks' -import { CompletionsCache } from './cache' import { Provider } from './providers/provider' import { RequestManager } from './request-manager' import { Completion } from './types' @@ -51,8 +50,7 @@ function createProvider(prefix: string) { describe('RequestManager', () => { let createRequest: (prefix: string, provider: Provider) => Promise beforeEach(() => { - const cache = new CompletionsCache() - const requestManager = new RequestManager(cache) + const requestManager = new RequestManager() createRequest = (prefix: string, provider: Provider) => requestManager.request(DOCUMENT_URI, LOG_ID, prefix, [provider], [], new AbortController().signal) @@ -98,25 +96,4 @@ describe('RequestManager', () => { expect((await promise1)[0].content).toBe('log();') expect(provider1.didFinishNetworkRequest).toBe(true) }) - - it('serves request from cache when a prior request resolves', async () => { - const prefix1 = 'console.' - const provider1 = createProvider(prefix1) - const promise1 = createRequest(prefix1, provider1) - - const prefix2 = 'console.log(' - const provider2 = createProvider(prefix2) - const promise2 = createRequest(prefix2, provider2) - - provider1.resolveRequest(["log('hello')"]) - - expect((await promise1)[0].content).toBe("log('hello')") - expect((await promise2)[0].content).toBe("'hello')") - - expect(provider1.didFinishNetworkRequest).toBe(true) - expect(provider2.didFinishNetworkRequest).toBe(false) - - // Ensure that the completed network request does not cause issues - provider2.resolveRequest(["'world')"]) - }) }) diff --git a/vscode/src/completions/request-manager.ts b/vscode/src/completions/request-manager.ts index 3440f0b3f372..7ae61ee3c05d 100644 --- a/vscode/src/completions/request-manager.ts +++ b/vscode/src/completions/request-manager.ts @@ -1,16 +1,7 @@ -import { CompletionsCache } from './cache' import { ReferenceSnippet } from './context' -import { logCompletionEvent } from './logger' import { CompletionProviderTracer, Provider } from './providers/provider' import { Completion } from './types' -interface Request { - prefix: string - tracer?: CompletionProviderTracer - resolve(completions: Completion[]): void - reject(error: Error): void -} - /** * This class can handle concurrent requests for code completions. The idea is * that requests are not cancelled even when the user continues typing in the @@ -18,10 +9,6 @@ interface Request { * return them when the user triggers a completion again. */ export class RequestManager { - private readonly requests: Map = new Map() - - constructor(private completionsCache: CompletionsCache | null) {} - public async request( documentUri: string, logId: string, @@ -31,107 +18,20 @@ export class RequestManager { signal: AbortSignal, tracer?: CompletionProviderTracer ): Promise { - let resolve: Request['resolve'] = () => {} - let reject: Request['reject'] = () => {} - const requestPromise = new Promise((res, rej) => { - resolve = res - reject = rej - }) - - const request: Request = { - prefix, - resolve, - reject, - tracer, - } - this.startRequest(request, documentUri, logId, providers, context, signal) - - return requestPromise - } - - private startRequest( - request: Request, - documentUri: string, - logId: string, - providers: Provider[], - context: ReferenceSnippet[], - signal: AbortSignal - ): void { // We forward a different abort controller to the network request so we // can cancel the network request independently of the user cancelling // the completion. const networkRequestAbortController = new AbortController() - this.addRequest(documentUri, request) - - Promise.all( - providers.map(c => c.generateCompletions(networkRequestAbortController.signal, context, request.tracer)) + return Promise.all( + providers.map(c => c.generateCompletions(networkRequestAbortController.signal, context, tracer)) ) .then(res => res.flat()) .then(completions => { - // Add the completed results to the cache, even if the request - // was cancelled before or completed via a cache retest of a - // previous request. - this.completionsCache?.add(logId, completions) - if (signal.aborted) { throw new Error('aborted') } - - request.resolve(completions) - }) - .catch(error => { - request.reject(error) + return completions }) - .finally(() => { - this.removeRequest(documentUri, request) - this.retestCaches(documentUri) - }) - } - - /** - * When one network request completes and the item is being added to the - * completion cache, we check all pending requests for the same document to - * see if we can synthesize a completion response from the new cache. - */ - private retestCaches(documentUri: string): void { - const requests = this.requests.get(documentUri) - if (!requests) { - return - } - - for (const request of requests) { - const cachedCompletions = this.completionsCache?.get(request.prefix) - if (cachedCompletions) { - logCompletionEvent('synthesizedFromParallelRequest') - request.resolve(cachedCompletions.completions) - this.removeRequest(documentUri, request) - } - } - } - - private addRequest(documentUri: string, request: Request): void { - let requestsForDocument: Request[] = [] - if (this.requests.has(documentUri)) { - requestsForDocument = this.requests.get(documentUri)! - } else { - this.requests.set(documentUri, requestsForDocument) - } - requestsForDocument.push(request) - } - - private removeRequest(documentUri: string, request: Request): void { - const requestsForDocument = this.requests.get(documentUri) - const index = requestsForDocument?.indexOf(request) - - if (requestsForDocument === undefined || index === undefined || index === -1) { - return - } - - requestsForDocument.splice(index, 1) - - if (requestsForDocument.length === 0) { - this.requests.delete(documentUri) - } } } diff --git a/vscode/src/completions/vscodeInlineCompletionItemProvider.test.ts b/vscode/src/completions/vscodeInlineCompletionItemProvider.test.ts index 264ff76c3416..8185d375e829 100644 --- a/vscode/src/completions/vscodeInlineCompletionItemProvider.test.ts +++ b/vscode/src/completions/vscodeInlineCompletionItemProvider.test.ts @@ -12,7 +12,6 @@ import { import { CodyStatusBar } from '../services/StatusBar' import { vsCodeMocks } from '../testutils/mocks' -import { CompletionsCache } from './cache' import { DocumentHistory } from './history' import { createProviderConfig } from './providers/anthropic' import { completion, documentAndPosition } from './testHelpers' @@ -79,7 +78,7 @@ describe('Cody completions', () => { */ let complete: ( code: string, - responses?: CompletionResponse[] | 'stall', + responses?: CompletionResponse[], languageId?: string, context?: vscode.InlineCompletionContext ) => Promise<{ @@ -87,10 +86,9 @@ describe('Cody completions', () => { completions: vscode.InlineCompletionItem[] }> beforeEach(() => { - const cache = new CompletionsCache() complete = async ( code: string, - responses?: CompletionResponse[] | 'stall', + responses?: CompletionResponse[], languageId: string = 'typescript', context: vscode.InlineCompletionContext = { triggerKind: vsCodeMocks.InlineCompletionTriggerKind.Automatic, @@ -105,10 +103,6 @@ describe('Cody completions', () => { const completionsClient: Pick = { complete(params: CompletionParameters): Promise { requests.push(params) - if (responses === 'stall') { - // Creates a stalling request that never responds - return new Promise(() => {}) - } return Promise.resolve(responses?.[requestCounter++] || { completion: '', stopReason: 'unknown' }) }, } @@ -122,7 +116,6 @@ describe('Cody completions', () => { history: DUMMY_DOCUMENT_HISTORY, codebaseContext: DUMMY_CODEBASE_CONTEXT, disableTimeouts: true, - cache, }) if (!code.includes(CURSOR_MARKER)) { @@ -1037,14 +1030,4 @@ describe('Cody completions', () => { expect(completions[0].insertText).toBe("console.log('foo')") }) }) - - describe('completions cache', () => { - it('synthesizes a completion from a prior request', async () => { - await complete('console.█', [completion`log('Hello, world!');`]) - - const { completions } = await complete('console.log(█', 'stall') - - expect(completions[0].insertText).toBe("'Hello, world!');") - }) - }) }) diff --git a/vscode/src/completions/vscodeInlineCompletionItemProvider.ts b/vscode/src/completions/vscodeInlineCompletionItemProvider.ts index d9b7f972208a..e14563d303d6 100644 --- a/vscode/src/completions/vscodeInlineCompletionItemProvider.ts +++ b/vscode/src/completions/vscodeInlineCompletionItemProvider.ts @@ -8,7 +8,6 @@ import { CodebaseContext } from '@sourcegraph/cody-shared/src/codebase-context' import { debug } from '../log' import { CodyStatusBar } from '../services/StatusBar' -import { CachedCompletions, CompletionsCache } from './cache' import { getContext, GetContextOptions, GetContextResult } from './context' import { getCurrentDocContext } from './document' import { DocumentHistory } from './history' @@ -30,7 +29,6 @@ interface CodyCompletionItemProviderConfig { suffixPercentage?: number disableTimeouts?: boolean isEmbeddingsContextEnabled?: boolean - cache: CompletionsCache | null completeSuggestWidgetSelection?: boolean tracer?: ProvideInlineCompletionItemsTracer | null contextFetcher?: (options: GetContextOptions) => Promise @@ -49,7 +47,6 @@ export class InlineCompletionItemProvider implements vscode.InlineCompletionItem private readonly config: Required private requestManager: RequestManager - private previousCompletionLogId?: string constructor({ responsePercentage = 0.1, @@ -94,7 +91,7 @@ export class InlineCompletionItemProvider implements vscode.InlineCompletionItem this.maxPrefixChars = Math.floor(this.promptChars * this.config.prefixPercentage) this.maxSuffixChars = Math.floor(this.promptChars * this.config.suffixPercentage) - this.requestManager = new RequestManager(this.config.cache) + this.requestManager = new RequestManager() debug('CodyCompletionProvider:initialized', `provider: ${this.config.providerConfig.identifier}`) @@ -180,74 +177,11 @@ export class InlineCompletionItemProvider implements vscode.InlineCompletionItem return emptyCompletions() } - let cachedCompletions: CachedCompletions | undefined - - // Avoid showing completions when we're deleting code (Cody can only insert code at the - // moment) - const lastChange = this.lastContentChanges.get(document.fileName) ?? 'add' - if (lastChange === 'del') { - // When a line was deleted, only look up cached items and only include them if the - // untruncated prefix matches. This fixes some weird issues where the completion would - // render if you insert whitespace but not on the original place when you delete it - // again - cachedCompletions = this.config.cache?.get(docContext.prefix, false) - if (!cachedCompletions?.isExactPrefix) { - return emptyCompletions() - } - } - - // If cachedCompletions was already set by the above logic, we don't have to query the cache - // again. - cachedCompletions = cachedCompletions ?? this.config.cache?.get(docContext.prefix) - - // We create a log entry after determining if we have a potential cache hit. This is - // necessary to make sure that typing text of a displayed completion will not log a new - // completion on every keystroke - // - // However we only log a completion as started if it's either served from cache _or_ the - // debounce interval has passed to ensure we don't log too many start events where we end up - // not doing any work at all - const useLogIdFromPreviousCompletion = - cachedCompletions?.logId && cachedCompletions?.logId === this.previousCompletionLogId - if (!useLogIdFromPreviousCompletion) { - CompletionLogger.clear() - } - const logId = useLogIdFromPreviousCompletion - ? cachedCompletions!.logId - : CompletionLogger.create({ - multiline, - providerIdentifier: this.config.providerConfig.identifier, - languageId: document.languageId, - }) - this.previousCompletionLogId = logId - - if (cachedCompletions) { - // When we serve a completion from the cache and create a new log - // id, we want to ensure to only refer to the new id for future - // cache retrievals. If we don't do this, every subsequent cache hit - // would otherwise no longer match the previous completion ID and we - // would log a new completion each time, even if the user just - // continues typing on the currently displayed completion. - if (logId !== cachedCompletions.logId) { - this.config.cache?.updateLogId(cachedCompletions.logId, logId) - } - - tracer?.({ cacheHit: true }) - CompletionLogger.start(logId) - return this.prepareCompletions( - logId, - cachedCompletions.completions, - document, - context, - position, - docContext.prefix, - docContext.suffix, - multiline, - document.languageId, - true, - abortController.signal - ) - } + const logId = CompletionLogger.create({ + multiline, + providerIdentifier: this.config.providerConfig.identifier, + languageId: document.languageId, + }) tracer?.({ cacheHit: false }) const completers: Provider[] = [] diff --git a/vscode/src/configuration.test.ts b/vscode/src/configuration.test.ts index 676ff5aa0c39..26b13ec735f1 100644 --- a/vscode/src/configuration.test.ts +++ b/vscode/src/configuration.test.ts @@ -30,7 +30,6 @@ describe('getConfiguration', () => { autocompleteAdvancedProvider: 'anthropic', autocompleteAdvancedServerEndpoint: null, autocompleteAdvancedAccessToken: null, - autocompleteAdvancedCache: true, autocompleteAdvancedEmbeddings: true, autocompleteExperimentalCompleteSuggestWidgetSelection: false, }) @@ -77,8 +76,6 @@ describe('getConfiguration', () => { return 'https://example.com/llm' case 'cody.autocomplete.advanced.accessToken': return 'foobar' - case 'cody.autocomplete.advanced.cache': - return false case 'cody.autocomplete.advanced.embeddings': return false case 'cody.autocomplete.experimental.completeSuggestWidgetSelection': @@ -120,7 +117,6 @@ describe('getConfiguration', () => { autocompleteAdvancedProvider: 'unstable-codegen', autocompleteAdvancedServerEndpoint: 'https://example.com/llm', autocompleteAdvancedAccessToken: 'foobar', - autocompleteAdvancedCache: false, autocompleteAdvancedEmbeddings: false, autocompleteExperimentalCompleteSuggestWidgetSelection: false, }) diff --git a/vscode/src/configuration.ts b/vscode/src/configuration.ts index 8c9e396d1436..575113149fe1 100644 --- a/vscode/src/configuration.ts +++ b/vscode/src/configuration.ts @@ -75,7 +75,6 @@ export function getConfiguration(config: ConfigGetter): Configuration { null ), autocompleteAdvancedAccessToken: config.get(CONFIG_KEY.autocompleteAdvancedAccessToken, null), - autocompleteAdvancedCache: config.get(CONFIG_KEY.autocompleteAdvancedCache, true), autocompleteAdvancedEmbeddings: config.get(CONFIG_KEY.autocompleteAdvancedEmbeddings, true), autocompleteExperimentalCompleteSuggestWidgetSelection: config.get( CONFIG_KEY.autocompleteExperimentalCompleteSuggestWidgetSelection, diff --git a/vscode/src/main.ts b/vscode/src/main.ts index 7c2583bb2835..ab59fb27f8dd 100644 --- a/vscode/src/main.ts +++ b/vscode/src/main.ts @@ -10,7 +10,6 @@ import { ContextProvider } from './chat/ContextProvider' import { InlineChatViewManager } from './chat/InlineChatViewProvider' import { MessageProviderOptions } from './chat/MessageProvider' import { CODY_FEEDBACK_URL } from './chat/protocol' -import { CompletionsCache } from './completions/cache' import { VSCodeDocumentHistory } from './completions/history' import * as CompletionsLogger from './completions/logger' import { createProviderConfig } from './completions/providers/createProvider' @@ -423,7 +422,6 @@ function createCompletionsProvider( history, statusBar, codebaseContext, - cache: config.autocompleteAdvancedCache ? new CompletionsCache() : null, isEmbeddingsContextEnabled: config.autocompleteAdvancedEmbeddings, completeSuggestWidgetSelection: config.autocompleteExperimentalCompleteSuggestWidgetSelection, }) diff --git a/vscode/test/completions/completions-dataset.ts b/vscode/test/completions/completions-dataset.ts index 557843f45004..c3588b71b3fb 100644 --- a/vscode/test/completions/completions-dataset.ts +++ b/vscode/test/completions/completions-dataset.ts @@ -476,7 +476,6 @@ export const completionsDataset: Sample[] = [ import { vsCodeMocks } from '../testutils/mocks' import { CodyCompletionItemProvider } from '.' - import { CompletionsCache } from './cache' import { History } from './history' import { createProviderConfig } from './providers/anthropic' @@ -565,7 +564,6 @@ export const completionsDataset: Sample[] = [ completions: vscode.InlineCompletionItem[] }> beforeEach(() => { - const cache = new CompletionsCache() complete = async ( code: string, responses?: CompletionResponse[] | 'stall', diff --git a/vscode/test/completions/run-code-completions-on-dataset.ts b/vscode/test/completions/run-code-completions-on-dataset.ts index 9cb48ff96199..59e554840c9e 100644 --- a/vscode/test/completions/run-code-completions-on-dataset.ts +++ b/vscode/test/completions/run-code-completions-on-dataset.ts @@ -63,7 +63,6 @@ async function initCompletionsProvider(context: GetContextResult): Promise Promise.resolve(context), })