Skip to content

Commit

Permalink
fix: import LanguageModelChat from @types/vscode (#80)
Browse files Browse the repository at this point in the history
* docs: update examples for rendering prompts

* fix: import `LanguageModelChat` from `@vscode/types`
  • Loading branch information
joyceerhl authored Sep 3, 2024
1 parent a618ce9 commit e0bc4ac
Show file tree
Hide file tree
Showing 6 changed files with 27 additions and 24 deletions.
27 changes: 15 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,10 @@ Note: if your codebase depends on both `@vscode/prompt-tsx` and another library

Next, your extension can use `renderPrompt` to render a TSX prompt. Here is an example of using TSX prompts in a Copilot chat participant that suggests SQL queries based on database context:
```ts
import { renderPrompt, Cl100KBaseTokenizer } from '@vscode/prompt-tsx';
import { renderPrompt } from '@vscode/prompt-tsx';
import * as vscode from 'vscode';
import { TestPrompt } from './prompt';

const tokenizer = new Cl100KBaseTokenizer();
const participant = vscode.chat.createChatParticipant(
"mssql",
async (
Expand All @@ -73,19 +72,22 @@ const participant = vscode.chat.createChatParticipant(
) => {
response.progress("Reading database context...");

const models = await vscode.lm.selectChatModels({ family: 'gpt-4' });
if (models.length === 0) {
// No models available, return early
return;
}
const chatModel = models[0];

// Render TSX prompt
const { messages } = await renderPrompt(
TestPrompt,
{ userQuery: request.prompt },
{ modelMaxPromptTokens: 4096 },
tokenizer
chatModel
);
const models = await vscode.lm.selectChatModels({ family: 'gpt-4' });
if (models.length === 0) {
// No models available, return early
return;
}
const chatRequest = await models[0].sendChatRequest(

const chatRequest = await chatModel.sendChatRequest(
messages,
{},
token
Expand All @@ -112,11 +114,12 @@ export interface PromptState {

export class TestPrompt extends PromptElement<PromptProps, PromptState> {
override async prepare() {
}

async render(state: PromptState, sizing: PromptSizing) {
const sqlExtensionApi = await vscode.extensions.getExtension('ms-mssql.mssql')?.activate();
return { creationScript: await sqlExtensionApi.getDatabaseCreateScript?.() };
}
const creationScript = await sqlExtensionApi.getDatabaseCreateScript?.();

render(state: PromptState, sizing: PromptSizing) {
return (
<>
<AssistantMessage>
Expand Down
12 changes: 6 additions & 6 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@vscode/prompt-tsx",
"version": "0.2.7-alpha",
"version": "0.2.8-alpha",
"description": "Declare LLM prompts with TSX",
"main": "./dist/base/index.js",
"types": "./dist/base/index.d.ts",
Expand All @@ -23,7 +23,7 @@
"@types/node": "^20.11.30",
"@vscode/test-cli": "^0.0.9",
"@vscode/test-electron": "^2.4.1",
"@types/vscode": "^1.89.0",
"@types/vscode": "^1.92.0",
"esbuild": "0.20.2",
"mocha": "^10.2.0",
"npm-dts": "^1.3.12",
Expand Down
4 changes: 2 additions & 2 deletions src/base/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@
* Copyright (c) Microsoft Corporation and GitHub. All rights reserved.
*--------------------------------------------------------------------------------------------*/

import type { CancellationToken, Progress } from 'vscode';
import type { CancellationToken, ChatResponsePart, LanguageModelChat, Progress } from 'vscode';
import { PromptElementJSON } from './jsonTypes';
import { ChatMessage, ChatRole } from './openai';
import { MetadataMap, PromptRenderer } from './promptRenderer';
import { PromptReference } from './results';
import { AnyTokenizer, ITokenizer } from './tokenizer/tokenizer';
import { BasePromptElementProps, IChatEndpointInfo, PromptElementCtor } from './types';
import { ChatDocumentContext, ChatResponsePart, LanguageModelChat, LanguageModelChatMessage } from './vscodeTypes.d';
import { ChatDocumentContext, LanguageModelChatMessage } from './vscodeTypes.d';

export * as JSONTree from './jsonTypes';
export { AssistantChatMessage, ChatMessage, ChatRole, FunctionChatMessage, SystemChatMessage, ToolChatMessage, UserChatMessage } from './openai';
Expand Down
2 changes: 1 addition & 1 deletion src/base/tsx-globals.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
* Copyright (c) Microsoft Corporation and GitHub. All rights reserved.
*--------------------------------------------------------------------------------------------*/

import { PieceJSON, PromptElementJSON } from './jsonTypes';
import { PromptElementJSON } from './jsonTypes';
import { PromptMetadata, PromptReference } from './results';
import { URI } from './util/vs/common/uri';
import { ChatDocumentContext } from './vscodeTypes';
Expand Down
2 changes: 1 addition & 1 deletion src/base/vscodeTypes.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
* Copyright (c) Microsoft Corporation and GitHub. All rights reserved.
*--------------------------------------------------------------------------------------------*/

import type { CancellationToken, Command, Location, MarkdownString, ProviderResult, Range, ThemeIcon, Uri } from 'vscode';
import type { CancellationToken, Command, Location, MarkdownString, ProviderResult, ThemeIcon, Uri } from 'vscode';

/**
* Represents a part of a chat response that is formatted as Markdown.
Expand Down

0 comments on commit e0bc4ac

Please sign in to comment.