From 6cf77d7eb684ff7dbc9f60c52782c06d9499bfba Mon Sep 17 00:00:00 2001 From: benniekiss <63211101+benniekiss@users.noreply.github.com> Date: Sat, 13 Jul 2024 10:11:03 -0400 Subject: [PATCH 1/5] enable detokenizing special tokens --- llama_cpp/_internals.py | 2 +- llama_cpp/llama.py | 4 ++-- llama_cpp/llama_tokenizer.py | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/llama_cpp/_internals.py b/llama_cpp/_internals.py index 6dae88c8f..c708867c6 100644 --- a/llama_cpp/_internals.py +++ b/llama_cpp/_internals.py @@ -214,7 +214,7 @@ def token_to_piece(self, token: int, special: bool = False) -> bytes: llama_cpp.llama_token_to_piece(self.model, token, buf, 32, 0, special) return bytes(buf) - def detokenize(self, tokens: List[int], special: bool = False) -> bytes: + def detokenize(self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = False) -> bytes: assert self.model is not None output = b"" size = 32 diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 9b1a3d263..7298c17c9 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -588,7 +588,7 @@ def tokenize( return self.tokenizer_.tokenize(text, add_bos, special) def detokenize( - self, tokens: List[int], prev_tokens: Optional[List[int]] = None + self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = False ) -> bytes: """Detokenize a list of tokens. @@ -599,7 +599,7 @@ def detokenize( Returns: The detokenized string. """ - return self.tokenizer_.detokenize(tokens, prev_tokens=prev_tokens) + return self.tokenizer_.detokenize(tokens, prev_tokens=prev_tokens, special=special) def set_cache(self, cache: Optional[BaseLlamaCache]): """Set the cache. diff --git a/llama_cpp/llama_tokenizer.py b/llama_cpp/llama_tokenizer.py index 029bf2acc..9228738af 100644 --- a/llama_cpp/llama_tokenizer.py +++ b/llama_cpp/llama_tokenizer.py @@ -26,7 +26,7 @@ def tokenize( @abc.abstractmethod def detokenize( - self, tokens: List[int], prev_tokens: Optional[List[int]] = None + self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = True ) -> bytes: """Detokenize the tokens into text. @@ -47,9 +47,9 @@ def tokenize( return self._model.tokenize(text, add_bos=add_bos, special=special) def detokenize( - self, tokens: List[int], prev_tokens: Optional[List[int]] = None + self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = True ) -> bytes: - return self._model.detokenize(tokens) + return self._model.detokenize(tokens, prev_tokens=prev_tokens, special=special) def encode( self, text: str, add_bos: bool = True, special: bool = True From 992eb709a908d11c3efe90aeedbfd483664683f8 Mon Sep 17 00:00:00 2001 From: benniekiss <63211101+benniekiss@users.noreply.github.com> Date: Sat, 13 Jul 2024 10:25:49 -0400 Subject: [PATCH 2/5] enable skipping_special_tokens in hf_tokenizer detokenize() --- llama_cpp/llama_tokenizer.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/llama_cpp/llama_tokenizer.py b/llama_cpp/llama_tokenizer.py index 9228738af..6e49752e1 100644 --- a/llama_cpp/llama_tokenizer.py +++ b/llama_cpp/llama_tokenizer.py @@ -78,18 +78,19 @@ def tokenize( ) def detokenize( - self, tokens: List[int], prev_tokens: Optional[List[int]] = None + self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = True ) -> bytes: + skip_special_tokens = not special if prev_tokens is not None: - text = self.hf_tokenizer.decode(prev_tokens + tokens).encode( + text = self.hf_tokenizer.decode(prev_tokens + tokens, skip_special_tokens=skip_special_tokens).encode( "utf-8", errors="ignore" ) - prev_text = self.hf_tokenizer.decode(prev_tokens).encode( + prev_text = self.hf_tokenizer.decode(prev_tokens, skip_special_tokens=skip_special_tokens).encode( "utf-8", errors="ignore" ) return text[len(prev_text) :] else: - return self.hf_tokenizer.decode(tokens).encode("utf-8", errors="ignore") + return self.hf_tokenizer.decode(tokens, skip_special_tokens=skip_special_tokens).encode("utf-8", errors="ignore") @classmethod def from_pretrained(cls, pretrained_model_name_or_path: str) -> "LlamaHFTokenizer": From 212f3f715a4c6702bd70bf9cacb46981d8348d43 Mon Sep 17 00:00:00 2001 From: benniekiss <63211101+benniekiss@users.noreply.github.com> Date: Sat, 13 Jul 2024 10:32:04 -0400 Subject: [PATCH 3/5] process prev_tokens --- llama_cpp/_internals.py | 2 +- llama_cpp/llama_tokenizer.py | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/llama_cpp/_internals.py b/llama_cpp/_internals.py index c708867c6..6dae88c8f 100644 --- a/llama_cpp/_internals.py +++ b/llama_cpp/_internals.py @@ -214,7 +214,7 @@ def token_to_piece(self, token: int, special: bool = False) -> bytes: llama_cpp.llama_token_to_piece(self.model, token, buf, 32, 0, special) return bytes(buf) - def detokenize(self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = False) -> bytes: + def detokenize(self, tokens: List[int], special: bool = False) -> bytes: assert self.model is not None output = b"" size = 32 diff --git a/llama_cpp/llama_tokenizer.py b/llama_cpp/llama_tokenizer.py index 6e49752e1..03c2571b6 100644 --- a/llama_cpp/llama_tokenizer.py +++ b/llama_cpp/llama_tokenizer.py @@ -49,7 +49,12 @@ def tokenize( def detokenize( self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = True ) -> bytes: - return self._model.detokenize(tokens, prev_tokens=prev_tokens, special=special) + if prev_tokens is not None: + text = self._model.detokenize(prev_tokens + tokens, special=special) + prev_text = self._model.detokenize(prev_tokens, special=special) + return text[len(prev_text) :] + else: + return self._model.detokenize(tokens, special=special) def encode( self, text: str, add_bos: bool = True, special: bool = True From e1cc07ddfed835210fe733362e9986a9cd2783d1 Mon Sep 17 00:00:00 2001 From: benniekiss <63211101+benniekiss@users.noreply.github.com> Date: Sat, 13 Jul 2024 10:36:37 -0400 Subject: [PATCH 4/5] fix doc strings --- llama_cpp/llama.py | 5 ++++- llama_cpp/llama_tokenizer.py | 10 ++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 7298c17c9..45cbd7bca 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -578,6 +578,8 @@ def tokenize( Args: text: The utf-8 encoded string to tokenize. + add_bos: Whether to add a beginning of sequence token. + special: Whether to tokenize special tokens. Raises: RuntimeError: If the tokenization failed. @@ -594,7 +596,8 @@ def detokenize( Args: tokens: The list of tokens to detokenize. - prev_tokens: The list of previous tokens. Offset mapping will be performed if provided + prev_tokens: The list of previous tokens. Offset mapping will be performed if provided. + special: Whether to detokenize special tokens. Returns: The detokenized string. diff --git a/llama_cpp/llama_tokenizer.py b/llama_cpp/llama_tokenizer.py index 03c2571b6..da7a8619f 100644 --- a/llama_cpp/llama_tokenizer.py +++ b/llama_cpp/llama_tokenizer.py @@ -19,9 +19,10 @@ def tokenize( """Tokenize the text into tokens. Args: - text: The text to tokenize. + text: The utf-8 encoded string to tokenize. add_bos: Whether to add a beginning of sequence token. - special: Whether to tokenize text literally or as special tokens.""" + special: Whether to tokenize special tokens. + """ raise NotImplementedError @abc.abstractmethod @@ -31,8 +32,9 @@ def detokenize( """Detokenize the tokens into text. Args: - tokens: The tokens to detokenize. - prev_tokens: If tokens is a continuation of a previous sequence, the previous tokens. + tokens: The list of tokens to detokenize. + prev_tokens: The list of previous tokens. Offset mapping will be performed if provided. + special: Whether to detokenize special tokens. """ raise NotImplementedError From 783e10998425653ab89bcaf78fbc02b7342ea384 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 29 Aug 2024 13:01:19 -0400 Subject: [PATCH 5/5] Revert changes to LlamaTokenizer prev_tokens and set special to False by default --- llama_cpp/llama_tokenizer.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/llama_cpp/llama_tokenizer.py b/llama_cpp/llama_tokenizer.py index da7a8619f..2e7590d14 100644 --- a/llama_cpp/llama_tokenizer.py +++ b/llama_cpp/llama_tokenizer.py @@ -27,7 +27,7 @@ def tokenize( @abc.abstractmethod def detokenize( - self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = True + self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = False ) -> bytes: """Detokenize the tokens into text. @@ -49,14 +49,9 @@ def tokenize( return self._model.tokenize(text, add_bos=add_bos, special=special) def detokenize( - self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = True + self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = False ) -> bytes: - if prev_tokens is not None: - text = self._model.detokenize(prev_tokens + tokens, special=special) - prev_text = self._model.detokenize(prev_tokens, special=special) - return text[len(prev_text) :] - else: - return self._model.detokenize(tokens, special=special) + return self._model.detokenize(tokens, special=special) def encode( self, text: str, add_bos: bool = True, special: bool = True @@ -85,7 +80,7 @@ def tokenize( ) def detokenize( - self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = True + self, tokens: List[int], prev_tokens: Optional[List[int]] = None, special: bool = False ) -> bytes: skip_special_tokens = not special if prev_tokens is not None: