23
23
async def test_upload_from_pathlike_async (caplog : LogCap ) -> None :
24
24
caplog .set_level (logging .DEBUG )
25
25
async with AsyncClient () as client :
26
- session = client ._files
26
+ session = client .files
27
27
file = await session .prepare_file (IMAGE_FILEPATH )
28
28
assert file
29
29
assert isinstance (file , FileHandle )
30
30
logging .info (f"Uploaded file: { file } " )
31
+ image = await session .prepare_image (IMAGE_FILEPATH )
32
+ assert image
33
+ assert isinstance (image , FileHandle )
34
+ logging .info (f"Uploaded image: { image } " )
35
+ # Even with the same data uploaded, assigned identifiers should differ
36
+ assert image != file
31
37
32
38
33
39
@pytest .mark .asyncio
34
40
@pytest .mark .lmstudio
35
41
async def test_upload_from_file_obj_async (caplog : LogCap ) -> None :
36
42
caplog .set_level (logging .DEBUG )
37
43
async with AsyncClient () as client :
38
- session = client ._files
44
+ session = client .files
39
45
with open (IMAGE_FILEPATH , "rb" ) as f :
40
46
file = await session .prepare_file (f )
41
47
assert file
42
48
assert isinstance (file , FileHandle )
43
49
logging .info (f"Uploaded file: { file } " )
50
+ with open (IMAGE_FILEPATH , "rb" ) as f :
51
+ image = await session .prepare_image (f )
52
+ assert image
53
+ assert isinstance (image , FileHandle )
54
+ logging .info (f"Uploaded image: { image } " )
55
+ # Even with the same data uploaded, assigned identifiers should differ
56
+ assert image != file
44
57
45
58
46
59
@pytest .mark .asyncio
47
60
@pytest .mark .lmstudio
48
61
async def test_upload_from_bytesio_async (caplog : LogCap ) -> None :
49
62
caplog .set_level (logging .DEBUG )
50
63
async with AsyncClient () as client :
51
- session = client ._files
52
- with open (IMAGE_FILEPATH , "rb" ) as f :
53
- file = await session .prepare_file (BytesIO (f .read ()))
64
+ session = client .files
65
+ file = await session .prepare_file (BytesIO (IMAGE_FILEPATH .read_bytes ()))
54
66
assert file
55
67
assert isinstance (file , FileHandle )
56
68
logging .info (f"Uploaded file: { file } " )
69
+ image = await session .prepare_image (BytesIO (IMAGE_FILEPATH .read_bytes ()))
70
+ assert image
71
+ assert isinstance (image , FileHandle )
72
+ logging .info (f"Uploaded image: { image } " )
73
+ # Even with the same data uploaded, assigned identifiers should differ
74
+ assert image != file
57
75
58
76
59
77
@pytest .mark .asyncio
@@ -64,9 +82,9 @@ async def test_vlm_predict_async(caplog: LogCap) -> None:
64
82
caplog .set_level (logging .DEBUG )
65
83
model_id = EXPECTED_VLM_ID
66
84
async with AsyncClient () as client :
67
- file_handle = await client ._files . prepare_file (IMAGE_FILEPATH )
85
+ image_handle = await client .files . prepare_image (IMAGE_FILEPATH )
68
86
history = Chat ()
69
- history .add_user_message ((prompt , file_handle ))
87
+ history .add_user_message ((prompt , image_handle ))
70
88
vlm = await client .llm .model (model_id )
71
89
response = await vlm .respond (history , config = SHORT_PREDICTION_CONFIG )
72
90
logging .info (f"VLM response: { response !r} " )
@@ -84,9 +102,9 @@ async def test_non_vlm_predict_async(caplog: LogCap) -> None:
84
102
caplog .set_level (logging .DEBUG )
85
103
model_id = "hugging-quants/llama-3.2-1b-instruct"
86
104
async with AsyncClient () as client :
87
- file_handle = await client ._files . prepare_file (IMAGE_FILEPATH )
105
+ image_handle = await client .files . prepare_image (IMAGE_FILEPATH )
88
106
history = Chat ()
89
- history .add_user_message ((prompt , file_handle ))
107
+ history .add_user_message ((prompt , image_handle ))
90
108
llm = await client .llm .model (model_id )
91
109
with pytest .raises (LMStudioServerError ) as exc_info :
92
110
await llm .respond (history )
@@ -101,9 +119,9 @@ async def test_vlm_predict_image_param_async(caplog: LogCap) -> None:
101
119
caplog .set_level (logging .DEBUG )
102
120
model_id = EXPECTED_VLM_ID
103
121
async with AsyncClient () as client :
104
- file_handle = await client ._files . prepare_file (IMAGE_FILEPATH )
122
+ image_handle = await client .files . prepare_image (IMAGE_FILEPATH )
105
123
history = Chat ()
106
- history .add_user_message (prompt , images = [file_handle ])
124
+ history .add_user_message (prompt , images = [image_handle ])
107
125
vlm = await client .llm .model (model_id )
108
126
response = await vlm .respond (history , config = SHORT_PREDICTION_CONFIG )
109
127
logging .info (f"VLM response: { response !r} " )
@@ -121,9 +139,9 @@ async def test_non_vlm_predict_image_param_async(caplog: LogCap) -> None:
121
139
caplog .set_level (logging .DEBUG )
122
140
model_id = "hugging-quants/llama-3.2-1b-instruct"
123
141
async with AsyncClient () as client :
124
- file_handle = await client ._files . prepare_file (IMAGE_FILEPATH )
142
+ image_handle = await client .files . prepare_image (IMAGE_FILEPATH )
125
143
history = Chat ()
126
- history .add_user_message (prompt , images = [file_handle ])
144
+ history .add_user_message (prompt , images = [image_handle ])
127
145
llm = await client .llm .model (model_id )
128
146
with pytest .raises (LMStudioServerError ) as exc_info :
129
147
await llm .respond (history )
0 commit comments