34
34
from .history import (
35
35
Chat ,
36
36
ChatHistoryDataDict ,
37
- _FileHandle ,
38
- _FileInputType ,
37
+ FileHandle ,
38
+ _FileCacheInputType ,
39
39
_LocalFileData ,
40
40
)
41
41
from .json_api import (
@@ -581,18 +581,18 @@ class _AsyncSessionFiles(AsyncSession):
581
581
582
582
API_NAMESPACE = "files"
583
583
584
- async def _fetch_file_handle (self , file_data : _LocalFileData ) -> _FileHandle :
584
+ async def _fetch_file_handle (self , file_data : _LocalFileData ) -> FileHandle :
585
585
handle = await self .remote_call ("uploadFileBase64" , file_data ._as_fetch_param ())
586
586
# Returned dict provides the handle identifier, file type, and size in bytes
587
587
# Add the extra fields needed for a FileHandle (aka ChatMessagePartFileData)
588
588
handle ["name" ] = file_data .name
589
589
handle ["type" ] = "file"
590
- return load_struct (handle , _FileHandle )
590
+ return load_struct (handle , FileHandle )
591
591
592
592
@sdk_public_api_async ()
593
593
async def _add_temp_file (
594
- self , src : _FileInputType , name : str | None = None
595
- ) -> _FileHandle :
594
+ self , src : _FileCacheInputType , name : str | None = None
595
+ ) -> FileHandle :
596
596
"""Add a file to the server."""
597
597
# Private until LM Studio file handle support stabilizes
598
598
file_data = _LocalFileData (src , name )
@@ -795,7 +795,7 @@ async def _load_new_instance(
795
795
on_load_progress : ModelLoadingCallback | None ,
796
796
) -> TAsyncModelHandle :
797
797
channel_type = self ._API_TYPES .REQUEST_NEW_INSTANCE
798
- config_type = self ._API_TYPES .MODEL_LOAD_CONFIG
798
+ config_type : type [ TLoadConfig ] = self ._API_TYPES .MODEL_LOAD_CONFIG
799
799
endpoint = LoadModelEndpoint (
800
800
model_key ,
801
801
instance_identifier ,
@@ -847,7 +847,7 @@ async def list_downloaded(self) -> Sequence[TAsyncDownloadedModel]:
847
847
models = await self ._system_session .list_downloaded_models ()
848
848
return [m for m in models if self ._is_relevant_model (m )]
849
849
850
- async def _fetch_file_handle (self , file_data : _LocalFileData ) -> _FileHandle :
850
+ async def _fetch_file_handle (self , file_data : _LocalFileData ) -> FileHandle :
851
851
return await self ._files_session ._fetch_file_handle (file_data )
852
852
853
853
@@ -1054,7 +1054,6 @@ async def _respond_stream(
1054
1054
"""Request a response in an ongoing assistant chat session and stream the generated tokens."""
1055
1055
if not isinstance (history , Chat ):
1056
1056
history = Chat .from_history (history )
1057
- await history ._fetch_file_handles_async (self ._fetch_file_handle )
1058
1057
endpoint = ChatResponseEndpoint (
1059
1058
model_specifier ,
1060
1059
history ,
@@ -1078,7 +1077,6 @@ async def _apply_prompt_template(
1078
1077
"""Apply a prompt template to the given history."""
1079
1078
if not isinstance (history , Chat ):
1080
1079
history = Chat .from_history (history )
1081
- await history ._fetch_file_handles_async (self ._fetch_file_handle )
1082
1080
if not isinstance (opts , LlmApplyPromptTemplateOpts ):
1083
1081
opts = LlmApplyPromptTemplateOpts .from_dict (opts )
1084
1082
params = LlmRpcApplyPromptTemplateParameter ._from_api_dict (
@@ -1505,8 +1503,8 @@ def repository(self) -> AsyncSessionRepository:
1505
1503
# Convenience methods
1506
1504
@sdk_public_api_async ()
1507
1505
async def _add_temp_file (
1508
- self , src : _FileInputType , name : str | None = None
1509
- ) -> _FileHandle :
1506
+ self , src : _FileCacheInputType , name : str | None = None
1507
+ ) -> FileHandle :
1510
1508
"""Add a file to the server."""
1511
1509
# Private until LM Studio file handle support stabilizes
1512
1510
return await self ._files ._add_temp_file (src , name )
0 commit comments