Skip to content

Commit

Permalink
style_fixed
Browse files Browse the repository at this point in the history
  • Loading branch information
wqj2004 committed Jan 26, 2025
1 parent fca8e72 commit 46cd3cf
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 16 deletions.
8 changes: 3 additions & 5 deletions ding/worker/collector/tests/test_vllm_collector.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@
from loguru import logger
from ..vllm_collector import HuggingFaceModelGenerator
from vllm.assets.image import ImageAsset

from enum import Enum
import asyncio
import nest_asyncio
# set a temperature > 0 to get multiple responses
# note that HFModelGenerator has a parameter "mm_processor_kwargs" set to align with the settings of Qwen in default
model = HuggingFaceModelGenerator('/mnt/afs/share/Qwen2-VL-7B', temperature=0.5)

from enum import Enum


class Modality(Enum):
IMAGE = "image"
Expand Down Expand Up @@ -81,8 +81,6 @@ def get_multi_modal_input(modality: Modality, filenames: list, questions: list)
question = mm_input["question"]
prompts, stop_token_ids = get_prompts_qwen(question, modality)

import asyncio
import nest_asyncio
nest_asyncio.apply()


Expand Down
16 changes: 5 additions & 11 deletions ding/worker/collector/tests/test_vllm_collector_multigpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,11 @@
import uuid
from loguru import logger
from vllm import AsyncLLMEngine, AsyncEngineArgs, SamplingParams, RequestOutput
from typing import List, Tuple, Optional
from vllm.assets.image import ImageAsset
from enum import Enum
import concurrent.futures
import asyncio


class VllmActor:
Expand Down Expand Up @@ -155,14 +160,6 @@ def chunk_list(original_list: list, t: int) -> List[list]:
return new_list


from typing import List, Tuple, Optional
import os
from loguru import logger
from vllm.assets.image import ImageAsset
from enum import Enum
import concurrent.futures


class Modality(Enum):
IMAGE = "image"
TEXT = "text"
Expand Down Expand Up @@ -229,9 +226,6 @@ async def run_vllm_collector(gpu_id: int, prompts: List, model_path: str, temper
return responses_list


import asyncio


def start_collector(gpu_id: int, prompts: list, model_path: str, temperature: float) -> List[str]:
# event loop in a process
results = asyncio.run(run_vllm_collector(gpu_id, prompts, model_path, temperature))
Expand Down

0 comments on commit 46cd3cf

Please sign in to comment.