Skip to content

vllm.v1.core.sched.output

CachedRequestData dataclass

Source code in vllm/v1/core/sched/output.py
@bc_linter_include
@dataclass
class CachedRequestData:
    req_ids: list[str]
    # If resumed_from_preemption is False, new_block_ids will be appended to
    # the request's block IDs. If True, new_block_ids will be used as the
    # request's block IDs instead of appending to the existing block IDs.
    resumed_from_preemption: list[bool]
    # NOTE(woosuk): new_token_ids is only used for pipeline parallelism.
    # When PP is not used, new_token_ids will be empty.
    new_token_ids: list[list[int]]
    new_block_ids: list[tuple[list[int], ...] | None]
    num_computed_tokens: list[int]
    num_output_tokens: list[int]

    @property
    def num_reqs(self) -> int:
        return len(self.req_ids)

    @classmethod
    def make_empty(cls) -> CachedRequestData:
        return cls(
            req_ids=[],
            resumed_from_preemption=[],
            new_token_ids=[],
            new_block_ids=[],
            num_computed_tokens=[],
            num_output_tokens=[],
        )

new_block_ids instance-attribute

new_block_ids: list[tuple[list[int], ...] | None]

new_token_ids instance-attribute

new_token_ids: list[list[int]]

num_computed_tokens instance-attribute

num_computed_tokens: list[int]

num_output_tokens instance-attribute

num_output_tokens: list[int]

num_reqs property

num_reqs: int

req_ids instance-attribute

req_ids: list[str]

resumed_from_preemption instance-attribute

resumed_from_preemption: list[bool]

__init__

__init__(
    req_ids: list[str],
    resumed_from_preemption: list[bool],
    new_token_ids: list[list[int]],
    new_block_ids: list[tuple[list[int], ...] | None],
    num_computed_tokens: list[int],
    num_output_tokens: list[int],
) -> None

make_empty classmethod

make_empty() -> CachedRequestData
Source code in vllm/v1/core/sched/output.py
@classmethod
def make_empty(cls) -> CachedRequestData:
    return cls(
        req_ids=[],
        resumed_from_preemption=[],
        new_token_ids=[],
        new_block_ids=[],
        num_computed_tokens=[],
        num_output_tokens=[],
    )

NewRequestData dataclass

Source code in vllm/v1/core/sched/output.py
@bc_linter_include
@dataclass
class NewRequestData:
    req_id: str
    prompt_token_ids: list[int] | None
    mm_features: list[MultiModalFeatureSpec]
    sampling_params: SamplingParams | None
    pooling_params: PoolingParams | None
    block_ids: tuple[list[int], ...]
    num_computed_tokens: int
    lora_request: LoRARequest | None
    prompt_embeds: torch.Tensor | None = None

    @classmethod
    def from_request(
        cls,
        request: Request,
        block_ids: tuple[list[int], ...],
    ) -> NewRequestData:
        return cls(
            req_id=request.request_id,
            prompt_token_ids=request.prompt_token_ids,
            mm_features=request.mm_features,
            sampling_params=request.sampling_params,
            pooling_params=request.pooling_params,
            block_ids=block_ids,
            num_computed_tokens=request.num_computed_tokens,
            lora_request=request.lora_request,
            prompt_embeds=request.prompt_embeds,
        )

    def __repr__(self) -> str:
        prompt_embeds_shape = self.prompt_embeds.shape if self.prompt_embeds else None
        return (
            f"NewRequestData("
            f"req_id={self.req_id},"
            f"prompt_token_ids={self.prompt_token_ids},"
            f"mm_features={self.mm_features},"
            f"sampling_params={self.sampling_params},"
            f"block_ids={self.block_ids},"
            f"num_computed_tokens={self.num_computed_tokens},"
            f"lora_request={self.lora_request},"
            f"prompt_embeds_shape={prompt_embeds_shape}"
            ")"
        )

    # Version of __repr__ with the prompt data obfuscated
    def anon_repr(self) -> str:
        prompt_token_ids_len = (
            len(self.prompt_token_ids) if self.prompt_token_ids is not None else None
        )
        prompt_embeds_shape = self.prompt_embeds.shape if self.prompt_embeds else None
        return (
            f"NewRequestData("
            f"req_id={self.req_id},"
            f"prompt_token_ids_len={prompt_token_ids_len},"
            f"mm_features={self.mm_features},"
            f"sampling_params={self.sampling_params},"
            f"block_ids={self.block_ids},"
            f"num_computed_tokens={self.num_computed_tokens},"
            f"lora_request={self.lora_request},"
            f"prompt_embeds_shape={prompt_embeds_shape}"
            ")"
        )

block_ids instance-attribute

block_ids: tuple[list[int], ...]

lora_request instance-attribute

lora_request: LoRARequest | None

mm_features instance-attribute

num_computed_tokens instance-attribute

num_computed_tokens: int

pooling_params instance-attribute

pooling_params: PoolingParams | None

prompt_embeds class-attribute instance-attribute

prompt_embeds: Tensor | None = None

prompt_token_ids instance-attribute

prompt_token_ids: list[int] | None

req_id instance-attribute

req_id: str

sampling_params instance-attribute

sampling_params: SamplingParams | None

__init__

__init__(
    req_id: str,
    prompt_token_ids: list[int] | None,
    mm_features: list[MultiModalFeatureSpec],
    sampling_params: SamplingParams | None,
    pooling_params: PoolingParams | None,
    block_ids: tuple[list[int], ...],
    num_computed_tokens: int,
    lora_request: LoRARequest | None,
    prompt_embeds: Tensor | None = None,
) -> None

__repr__

__repr__() -> str
Source code in vllm/v1/core/sched/output.py
def __repr__(self) -> str:
    prompt_embeds_shape = self.prompt_embeds.shape if self.prompt_embeds else None
    return (
        f"NewRequestData("
        f"req_id={self.req_id},"
        f"prompt_token_ids={self.prompt_token_ids},"
        f"mm_features={self.mm_features},"
        f"sampling_params={self.sampling_params},"
        f"block_ids={self.block_ids},"
        f"num_computed_tokens={self.num_computed_tokens},"
        f"lora_request={self.lora_request},"
        f"prompt_embeds_shape={prompt_embeds_shape}"
        ")"
    )

anon_repr

anon_repr() -> str
Source code in vllm/v1/core/sched/output.py
def anon_repr(self) -> str:
    prompt_token_ids_len = (
        len(self.prompt_token_ids) if self.prompt_token_ids is not None else None
    )
    prompt_embeds_shape = self.prompt_embeds.shape if self.prompt_embeds else None
    return (
        f"NewRequestData("
        f"req_id={self.req_id},"
        f"prompt_token_ids_len={prompt_token_ids_len},"
        f"mm_features={self.mm_features},"
        f"sampling_params={self.sampling_params},"
        f"block_ids={self.block_ids},"
        f"num_computed_tokens={self.num_computed_tokens},"
        f"lora_request={self.lora_request},"
        f"prompt_embeds_shape={prompt_embeds_shape}"
        ")"
    )

from_request classmethod

from_request(
    request: Request, block_ids: tuple[list[int], ...]
) -> NewRequestData
Source code in vllm/v1/core/sched/output.py
@classmethod
def from_request(
    cls,
    request: Request,
    block_ids: tuple[list[int], ...],
) -> NewRequestData:
    return cls(
        req_id=request.request_id,
        prompt_token_ids=request.prompt_token_ids,
        mm_features=request.mm_features,
        sampling_params=request.sampling_params,
        pooling_params=request.pooling_params,
        block_ids=block_ids,
        num_computed_tokens=request.num_computed_tokens,
        lora_request=request.lora_request,
        prompt_embeds=request.prompt_embeds,
    )

SchedulerOutput dataclass

Source code in vllm/v1/core/sched/output.py
@bc_linter_include
@dataclass
class SchedulerOutput:
    # list of the requests that are scheduled for the first time.
    # We cache the request's data in each worker process, so that we don't
    # need to re-send it every scheduling step.
    scheduled_new_reqs: list[NewRequestData]
    # list of the requests that have been scheduled before.
    # Since the request's data is already cached in the worker processes,
    # we only send the diff to minimize the communication cost.
    scheduled_cached_reqs: CachedRequestData

    # req_id -> num_scheduled_tokens
    # Number of tokens scheduled for each request.
    num_scheduled_tokens: dict[str, int]
    # Total number of tokens scheduled for all requests.
    # Equal to sum(num_scheduled_tokens.values())
    total_num_scheduled_tokens: int
    # req_id -> spec_token_ids
    # If a request does not have any spec decode tokens, it will not be
    # included in the dictionary.
    scheduled_spec_decode_tokens: dict[str, list[int]]
    # req_id -> encoder input indices that need processing.
    # E.g., if a request has [0, 1], it could mean the vision encoder needs
    # to process that the request's 0-th and 1-th images in the current step.
    scheduled_encoder_inputs: dict[str, list[int]]
    # Number of common prefix blocks for all requests in each KV cache group.
    # This can be used for cascade attention.
    num_common_prefix_blocks: list[int]

    # Request IDs that are finished in between the previous and the current
    # steps. This is used to notify the workers about the finished requests
    # so that they can free the cached states for those requests.
    finished_req_ids: set[str]
    # list of mm_hash strings associated with the encoder outputs to be
    # freed from the encoder cache.
    free_encoder_mm_hashes: list[str]

    # Dict of request ids to their index within the batch
    # for filling the next token bitmask
    structured_output_request_ids: dict[str, int]
    # the bitmask for the whole batch
    grammar_bitmask: npt.NDArray[np.int32] | None

    # KV Cache Connector metadata.
    kv_connector_metadata: KVConnectorMetadata | None = None

finished_req_ids instance-attribute

finished_req_ids: set[str]

free_encoder_mm_hashes instance-attribute

free_encoder_mm_hashes: list[str]

grammar_bitmask instance-attribute

grammar_bitmask: NDArray[int32] | None

kv_connector_metadata class-attribute instance-attribute

kv_connector_metadata: KVConnectorMetadata | None = None

num_common_prefix_blocks instance-attribute

num_common_prefix_blocks: list[int]

num_scheduled_tokens instance-attribute

num_scheduled_tokens: dict[str, int]

scheduled_cached_reqs instance-attribute

scheduled_cached_reqs: CachedRequestData

scheduled_encoder_inputs instance-attribute

scheduled_encoder_inputs: dict[str, list[int]]

scheduled_new_reqs instance-attribute

scheduled_new_reqs: list[NewRequestData]

scheduled_spec_decode_tokens instance-attribute

scheduled_spec_decode_tokens: dict[str, list[int]]

structured_output_request_ids instance-attribute

structured_output_request_ids: dict[str, int]

total_num_scheduled_tokens instance-attribute

total_num_scheduled_tokens: int

__init__

__init__(
    scheduled_new_reqs: list[NewRequestData],
    scheduled_cached_reqs: CachedRequestData,
    num_scheduled_tokens: dict[str, int],
    total_num_scheduled_tokens: int,
    scheduled_spec_decode_tokens: dict[str, list[int]],
    scheduled_encoder_inputs: dict[str, list[int]],
    num_common_prefix_blocks: list[int],
    finished_req_ids: set[str],
    free_encoder_mm_hashes: list[str],
    structured_output_request_ids: dict[str, int],
    grammar_bitmask: NDArray[int32] | None,
    kv_connector_metadata: KVConnectorMetadata
    | None = None,
) -> None