Skip to content

Commit 972f35b

Browse files
authored
Add image preparation APIs (#38)
Also make debug logging for file uploads less noisy. Implements the remaining components of #34
1 parent a4c74a5 commit 972f35b

7 files changed

+140
-49
lines changed

src/lmstudio/async_api.py

+29-9
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@
8787
check_model_namespace,
8888
load_struct,
8989
_model_spec_to_api_dict,
90+
_redact_json,
9091
)
9192
from ._kv_config import TLoadConfig, TLoadConfigDict, dict_from_fields_key
9293
from ._sdk_models import (
@@ -368,7 +369,14 @@ async def _send_call(
368369
"""Initiate remote call to specified endpoint."""
369370
self._ensure_connected("send remote procedure call")
370371
call_message = rpc.get_rpc_message(endpoint, params)
371-
self._logger.debug("Sending RPC request", json=call_message)
372+
# TODO: Improve logging for large requests (such as file uploads)
373+
# without requiring explicit special casing here
374+
logged_message: DictObject
375+
if call_message.get("endpoint") == "uploadFileBase64":
376+
logged_message = _redact_json(call_message)
377+
else:
378+
logged_message = call_message
379+
self._logger.debug("Sending RPC request", json=logged_message)
372380
await self._send_json(call_message)
373381

374382
async def remote_call(
@@ -593,8 +601,15 @@ async def _fetch_file_handle(self, file_data: _LocalFileData) -> FileHandle:
593601
async def prepare_file(
594602
self, src: LocalFileInput, name: str | None = None
595603
) -> FileHandle:
596-
"""Add a file to the server."""
597-
# Private until LM Studio file handle support stabilizes
604+
"""Add a file to the server. Returns a file handle for use in prediction requests."""
605+
file_data = _LocalFileData(src, name)
606+
return await self._fetch_file_handle(file_data)
607+
608+
@sdk_public_api_async()
609+
async def prepare_image(
610+
self, src: LocalFileInput, name: str | None = None
611+
) -> FileHandle:
612+
"""Add an image to the server. Returns a file handle for use in prediction requests."""
598613
file_data = _LocalFileData(src, name)
599614
return await self._fetch_file_handle(file_data)
600615

@@ -672,7 +687,7 @@ def _system_session(self) -> AsyncSessionSystem:
672687

673688
@property
674689
def _files_session(self) -> _AsyncSessionFiles:
675-
return self._client._files
690+
return self._client.files
676691

677692
async def _get_load_config(self, model_specifier: AnyModelSpecifier) -> DictObject:
678693
"""Get the model load config for the specified model."""
@@ -1490,9 +1505,8 @@ def system(self) -> AsyncSessionSystem:
14901505
return self._get_session(AsyncSessionSystem)
14911506

14921507
@property
1493-
def _files(self) -> _AsyncSessionFiles:
1508+
def files(self) -> _AsyncSessionFiles:
14941509
"""Return the files API client session."""
1495-
# Private until LM Studio file handle support stabilizes
14961510
return self._get_session(_AsyncSessionFiles)
14971511

14981512
@property
@@ -1505,9 +1519,15 @@ def repository(self) -> AsyncSessionRepository:
15051519
async def prepare_file(
15061520
self, src: LocalFileInput, name: str | None = None
15071521
) -> FileHandle:
1508-
"""Add a file to the server."""
1509-
# Private until LM Studio file handle support stabilizes
1510-
return await self._files.prepare_file(src, name)
1522+
"""Add a file to the server. Returns a file handle for use in prediction requests."""
1523+
return await self.files.prepare_file(src, name)
1524+
1525+
@sdk_public_api_async()
1526+
async def prepare_image(
1527+
self, src: LocalFileInput, name: str | None = None
1528+
) -> FileHandle:
1529+
"""Add an image to the server. Returns a file handle for use in prediction requests."""
1530+
return await self.files.prepare_image(src, name)
15111531

15121532
@sdk_public_api_async()
15131533
async def list_downloaded_models(

src/lmstudio/history.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
ChatMessagePartToolCallResultData as ToolCallResultData,
5353
ChatMessagePartToolCallResultDataDict as ToolCallResultDataDict,
5454
FilesRpcUploadFileBase64Parameter,
55-
FileType as FileHandleType,
55+
FileType,
5656
ToolCallRequest as ToolCallRequest,
5757
FunctionToolCallRequestDict as ToolCallRequestDict,
5858
)
@@ -70,7 +70,7 @@
7070
"FileHandle",
7171
"FileHandleDict",
7272
"FileHandleInput",
73-
"FileHandleType",
73+
"FileType",
7474
"SystemPrompt",
7575
"SystemPromptContent",
7676
"ToolCallRequest",

src/lmstudio/json_api.py

+5
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
assert_never,
2727
cast,
2828
get_type_hints,
29+
overload,
2930
)
3031

3132

@@ -490,6 +491,10 @@ class ActResult:
490491
# fmt: on
491492

492493

494+
@overload
495+
def _redact_json(data: DictObject) -> DictObject: ...
496+
@overload
497+
def _redact_json(data: None) -> None: ...
493498
def _redact_json(data: DictObject | None) -> DictObject | None:
494499
"""Show top level structure without any substructure details."""
495500
if data is None:

src/lmstudio/sync_api.py

+31-10
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@
112112
check_model_namespace,
113113
load_struct,
114114
_model_spec_to_api_dict,
115+
_redact_json,
115116
)
116117
from ._kv_config import TLoadConfig, TLoadConfigDict, dict_from_fields_key
117118
from ._sdk_models import (
@@ -548,7 +549,14 @@ def _send_call(
548549
"""Initiate remote call to specified endpoint."""
549550
self._ensure_connected("send remote procedure call")
550551
call_message = rpc.get_rpc_message(endpoint, params)
551-
self._logger.debug("Sending RPC request", json=call_message)
552+
# TODO: Improve logging for large requests (such as file uploads)
553+
# without requiring explicit special casing here
554+
logged_message: DictObject
555+
if call_message.get("endpoint") == "uploadFileBase64":
556+
logged_message = _redact_json(call_message)
557+
else:
558+
logged_message = call_message
559+
self._logger.debug("Sending RPC request", json=logged_message)
552560
self._send_json(call_message)
553561

554562
def remote_call(
@@ -766,8 +774,13 @@ def _fetch_file_handle(self, file_data: _LocalFileData) -> FileHandle:
766774

767775
@sdk_public_api()
768776
def prepare_file(self, src: LocalFileInput, name: str | None = None) -> FileHandle:
769-
"""Add a file to the server."""
770-
# Private until LM Studio file handle support stabilizes
777+
"""Add a file to the server. Returns a file handle for use in prediction requests."""
778+
file_data = _LocalFileData(src, name)
779+
return self._fetch_file_handle(file_data)
780+
781+
@sdk_public_api()
782+
def prepare_image(self, src: LocalFileInput, name: str | None = None) -> FileHandle:
783+
"""Add an image to the server. Returns a file handle for use in prediction requests."""
771784
file_data = _LocalFileData(src, name)
772785
return self._fetch_file_handle(file_data)
773786

@@ -838,7 +851,7 @@ def _system_session(self) -> SyncSessionSystem:
838851

839852
@property
840853
def _files_session(self) -> _SyncSessionFiles:
841-
return self._client._files
854+
return self._client.files
842855

843856
def _get_load_config(self, model_specifier: AnyModelSpecifier) -> DictObject:
844857
"""Get the model load config for the specified model."""
@@ -1806,9 +1819,8 @@ def system(self) -> SyncSessionSystem:
18061819
return self._get_session(SyncSessionSystem)
18071820

18081821
@property
1809-
def _files(self) -> _SyncSessionFiles:
1822+
def files(self) -> _SyncSessionFiles:
18101823
"""Return the files API client session."""
1811-
# Private until LM Studio file handle support stabilizes
18121824
return self._get_session(_SyncSessionFiles)
18131825

18141826
@property
@@ -1819,9 +1831,13 @@ def repository(self) -> SyncSessionRepository:
18191831
# Convenience methods
18201832
@sdk_public_api()
18211833
def prepare_file(self, src: LocalFileInput, name: str | None = None) -> FileHandle:
1822-
"""Add a file to the server."""
1823-
# Private until LM Studio file handle support stabilizes
1824-
return self._files.prepare_file(src, name)
1834+
"""Add a file to the server. Returns a file handle for use in prediction requests."""
1835+
return self.files.prepare_file(src, name)
1836+
1837+
@sdk_public_api()
1838+
def prepare_image(self, src: LocalFileInput, name: str | None = None) -> FileHandle:
1839+
"""Add an image to the server. Returns a file handle for use in prediction requests."""
1840+
return self.files.prepare_image(src, name)
18251841

18261842
@sdk_public_api()
18271843
def list_downloaded_models(
@@ -1893,10 +1909,15 @@ def embedding_model(
18931909
@sdk_public_api()
18941910
def prepare_file(src: LocalFileInput, name: str | None = None) -> FileHandle:
18951911
"""Add a file to the server using the default global client."""
1896-
# Private until LM Studio file handle support stabilizes
18971912
return get_default_client().prepare_file(src, name)
18981913

18991914

1915+
@sdk_public_api()
1916+
def prepare_image(src: LocalFileInput, name: str | None = None) -> FileHandle:
1917+
"""Add an image to the server using the default global client."""
1918+
return get_default_client().prepare_image(src, name)
1919+
1920+
19001921
@sdk_public_api()
19011922
def list_downloaded_models(
19021923
namespace: str | None = None,

tests/async/test_images_async.py

+31-13
Original file line numberDiff line numberDiff line change
@@ -23,37 +23,55 @@
2323
async def test_upload_from_pathlike_async(caplog: LogCap) -> None:
2424
caplog.set_level(logging.DEBUG)
2525
async with AsyncClient() as client:
26-
session = client._files
26+
session = client.files
2727
file = await session.prepare_file(IMAGE_FILEPATH)
2828
assert file
2929
assert isinstance(file, FileHandle)
3030
logging.info(f"Uploaded file: {file}")
31+
image = await session.prepare_image(IMAGE_FILEPATH)
32+
assert image
33+
assert isinstance(image, FileHandle)
34+
logging.info(f"Uploaded image: {image}")
35+
# Even with the same data uploaded, assigned identifiers should differ
36+
assert image != file
3137

3238

3339
@pytest.mark.asyncio
3440
@pytest.mark.lmstudio
3541
async def test_upload_from_file_obj_async(caplog: LogCap) -> None:
3642
caplog.set_level(logging.DEBUG)
3743
async with AsyncClient() as client:
38-
session = client._files
44+
session = client.files
3945
with open(IMAGE_FILEPATH, "rb") as f:
4046
file = await session.prepare_file(f)
4147
assert file
4248
assert isinstance(file, FileHandle)
4349
logging.info(f"Uploaded file: {file}")
50+
with open(IMAGE_FILEPATH, "rb") as f:
51+
image = await session.prepare_image(f)
52+
assert image
53+
assert isinstance(image, FileHandle)
54+
logging.info(f"Uploaded image: {image}")
55+
# Even with the same data uploaded, assigned identifiers should differ
56+
assert image != file
4457

4558

4659
@pytest.mark.asyncio
4760
@pytest.mark.lmstudio
4861
async def test_upload_from_bytesio_async(caplog: LogCap) -> None:
4962
caplog.set_level(logging.DEBUG)
5063
async with AsyncClient() as client:
51-
session = client._files
52-
with open(IMAGE_FILEPATH, "rb") as f:
53-
file = await session.prepare_file(BytesIO(f.read()))
64+
session = client.files
65+
file = await session.prepare_file(BytesIO(IMAGE_FILEPATH.read_bytes()))
5466
assert file
5567
assert isinstance(file, FileHandle)
5668
logging.info(f"Uploaded file: {file}")
69+
image = await session.prepare_image(BytesIO(IMAGE_FILEPATH.read_bytes()))
70+
assert image
71+
assert isinstance(image, FileHandle)
72+
logging.info(f"Uploaded image: {image}")
73+
# Even with the same data uploaded, assigned identifiers should differ
74+
assert image != file
5775

5876

5977
@pytest.mark.asyncio
@@ -64,9 +82,9 @@ async def test_vlm_predict_async(caplog: LogCap) -> None:
6482
caplog.set_level(logging.DEBUG)
6583
model_id = EXPECTED_VLM_ID
6684
async with AsyncClient() as client:
67-
file_handle = await client._files.prepare_file(IMAGE_FILEPATH)
85+
image_handle = await client.files.prepare_image(IMAGE_FILEPATH)
6886
history = Chat()
69-
history.add_user_message((prompt, file_handle))
87+
history.add_user_message((prompt, image_handle))
7088
vlm = await client.llm.model(model_id)
7189
response = await vlm.respond(history, config=SHORT_PREDICTION_CONFIG)
7290
logging.info(f"VLM response: {response!r}")
@@ -84,9 +102,9 @@ async def test_non_vlm_predict_async(caplog: LogCap) -> None:
84102
caplog.set_level(logging.DEBUG)
85103
model_id = "hugging-quants/llama-3.2-1b-instruct"
86104
async with AsyncClient() as client:
87-
file_handle = await client._files.prepare_file(IMAGE_FILEPATH)
105+
image_handle = await client.files.prepare_image(IMAGE_FILEPATH)
88106
history = Chat()
89-
history.add_user_message((prompt, file_handle))
107+
history.add_user_message((prompt, image_handle))
90108
llm = await client.llm.model(model_id)
91109
with pytest.raises(LMStudioServerError) as exc_info:
92110
await llm.respond(history)
@@ -101,9 +119,9 @@ async def test_vlm_predict_image_param_async(caplog: LogCap) -> None:
101119
caplog.set_level(logging.DEBUG)
102120
model_id = EXPECTED_VLM_ID
103121
async with AsyncClient() as client:
104-
file_handle = await client._files.prepare_file(IMAGE_FILEPATH)
122+
image_handle = await client.files.prepare_image(IMAGE_FILEPATH)
105123
history = Chat()
106-
history.add_user_message(prompt, images=[file_handle])
124+
history.add_user_message(prompt, images=[image_handle])
107125
vlm = await client.llm.model(model_id)
108126
response = await vlm.respond(history, config=SHORT_PREDICTION_CONFIG)
109127
logging.info(f"VLM response: {response!r}")
@@ -121,9 +139,9 @@ async def test_non_vlm_predict_image_param_async(caplog: LogCap) -> None:
121139
caplog.set_level(logging.DEBUG)
122140
model_id = "hugging-quants/llama-3.2-1b-instruct"
123141
async with AsyncClient() as client:
124-
file_handle = await client._files.prepare_file(IMAGE_FILEPATH)
142+
image_handle = await client.files.prepare_image(IMAGE_FILEPATH)
125143
history = Chat()
126-
history.add_user_message(prompt, images=[file_handle])
144+
history.add_user_message(prompt, images=[image_handle])
127145
llm = await client.llm.model(model_id)
128146
with pytest.raises(LMStudioServerError) as exc_info:
129147
await llm.respond(history)

0 commit comments

Comments
 (0)