Skip to content

Commit

Permalink
fix: improve performance in AI request logging (#14769)
Browse files Browse the repository at this point in the history
Instead of appending AI requests line by line to the output channel, we
now append them in a single step. Since appending to a channel is a
relatively expensive operation, this change significantly improves
performance for large requests.

This is particularly noticeable when AI inline code completions are
enabled for large files, as many requests may be sent during typing.

Also requests text responses by the LLM instead of streamed responses
in the autocomplete case.
  • Loading branch information
sdirix authored Jan 27, 2025
1 parent 42e1556 commit 7533c95
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,9 @@ export class CodeCompletionAgentImpl implements CodeCompletionAgent {
const requestId = generateUuid();
const request: LanguageModelRequest = {
messages: [{ type: 'text', actor: 'user', query: prompt }],
settings: {
stream: false
}
};
if (token.isCancellationRequested) {
return undefined;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ const languageModelOutputHandler = (
'Sending request:'
);
const formattedRequest = formatJsonWithIndentation(args[0]);
formattedRequest.forEach(line => outputChannel.appendLine(line));
outputChannel.append(formattedRequest.join('\n'));
if (args[1]) {
args[1] = new Proxy(args[1], {
get<CK extends keyof CancellationToken>(
Expand Down
2 changes: 1 addition & 1 deletion packages/ai-openai/src/node/openai-language-model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ export class OpenAiModel implements LanguageModel {
const settings = this.getSettings(request);
const openai = this.initializeOpenAi();

if (this.isNonStreamingModel(this.model)) {
if (this.isNonStreamingModel(this.model) || (typeof settings.stream === 'boolean' && !settings.stream)) {
return this.handleNonStreamingRequest(openai, request);
}

Expand Down

0 comments on commit 7533c95

Please sign in to comment.