Skip to content

Commit ae434a3

Browse files
committed
Remove implicit file handle caching from history.Chat
Preparatory refactoring for #34
1 parent 06565ed commit ae434a3

9 files changed

+386
-444
lines changed

src/lmstudio/async_api.py

+10-12
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,8 @@
3434
from .history import (
3535
Chat,
3636
ChatHistoryDataDict,
37-
_FileHandle,
38-
_FileInputType,
37+
FileHandle,
38+
_FileCacheInputType,
3939
_LocalFileData,
4040
)
4141
from .json_api import (
@@ -581,18 +581,18 @@ class _AsyncSessionFiles(AsyncSession):
581581

582582
API_NAMESPACE = "files"
583583

584-
async def _fetch_file_handle(self, file_data: _LocalFileData) -> _FileHandle:
584+
async def _fetch_file_handle(self, file_data: _LocalFileData) -> FileHandle:
585585
handle = await self.remote_call("uploadFileBase64", file_data._as_fetch_param())
586586
# Returned dict provides the handle identifier, file type, and size in bytes
587587
# Add the extra fields needed for a FileHandle (aka ChatMessagePartFileData)
588588
handle["name"] = file_data.name
589589
handle["type"] = "file"
590-
return load_struct(handle, _FileHandle)
590+
return load_struct(handle, FileHandle)
591591

592592
@sdk_public_api_async()
593593
async def _add_temp_file(
594-
self, src: _FileInputType, name: str | None = None
595-
) -> _FileHandle:
594+
self, src: _FileCacheInputType, name: str | None = None
595+
) -> FileHandle:
596596
"""Add a file to the server."""
597597
# Private until LM Studio file handle support stabilizes
598598
file_data = _LocalFileData(src, name)
@@ -795,7 +795,7 @@ async def _load_new_instance(
795795
on_load_progress: ModelLoadingCallback | None,
796796
) -> TAsyncModelHandle:
797797
channel_type = self._API_TYPES.REQUEST_NEW_INSTANCE
798-
config_type = self._API_TYPES.MODEL_LOAD_CONFIG
798+
config_type: type[TLoadConfig] = self._API_TYPES.MODEL_LOAD_CONFIG
799799
endpoint = LoadModelEndpoint(
800800
model_key,
801801
instance_identifier,
@@ -847,7 +847,7 @@ async def list_downloaded(self) -> Sequence[TAsyncDownloadedModel]:
847847
models = await self._system_session.list_downloaded_models()
848848
return [m for m in models if self._is_relevant_model(m)]
849849

850-
async def _fetch_file_handle(self, file_data: _LocalFileData) -> _FileHandle:
850+
async def _fetch_file_handle(self, file_data: _LocalFileData) -> FileHandle:
851851
return await self._files_session._fetch_file_handle(file_data)
852852

853853

@@ -1054,7 +1054,6 @@ async def _respond_stream(
10541054
"""Request a response in an ongoing assistant chat session and stream the generated tokens."""
10551055
if not isinstance(history, Chat):
10561056
history = Chat.from_history(history)
1057-
await history._fetch_file_handles_async(self._fetch_file_handle)
10581057
endpoint = ChatResponseEndpoint(
10591058
model_specifier,
10601059
history,
@@ -1078,7 +1077,6 @@ async def _apply_prompt_template(
10781077
"""Apply a prompt template to the given history."""
10791078
if not isinstance(history, Chat):
10801079
history = Chat.from_history(history)
1081-
await history._fetch_file_handles_async(self._fetch_file_handle)
10821080
if not isinstance(opts, LlmApplyPromptTemplateOpts):
10831081
opts = LlmApplyPromptTemplateOpts.from_dict(opts)
10841082
params = LlmRpcApplyPromptTemplateParameter._from_api_dict(
@@ -1505,8 +1503,8 @@ def repository(self) -> AsyncSessionRepository:
15051503
# Convenience methods
15061504
@sdk_public_api_async()
15071505
async def _add_temp_file(
1508-
self, src: _FileInputType, name: str | None = None
1509-
) -> _FileHandle:
1506+
self, src: _FileCacheInputType, name: str | None = None
1507+
) -> FileHandle:
15101508
"""Add a file to the server."""
15111509
# Private until LM Studio file handle support stabilizes
15121510
return await self._files._add_temp_file(src, name)

0 commit comments

Comments
 (0)