Skip to content

Commit 5eb4ebb

Browse files
authored
Merge branch 'main' into fix-state-pickle
2 parents 877ca6d + 04d9218 commit 5eb4ebb

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

llama_cpp/llama.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -407,6 +407,7 @@ def eval(self, tokens: Sequence[int]):
407407
"""
408408
assert self.ctx is not None
409409
n_ctx = self._n_ctx
410+
scores = []
410411
for i in range(0, len(tokens), self.n_batch):
411412
batch = tokens[i : min(len(tokens), i + self.n_batch)]
412413
n_past = min(n_ctx - len(batch), len(self._input_ids))
@@ -432,9 +433,8 @@ def eval(self, tokens: Sequence[int]):
432433
logits_view = llama_cpp.llama_get_logits(self.ctx)
433434
logits = [logits_view[i * cols : (i + 1) * cols] for i in range(rows)]
434435
self.eval_logits.extend(logits)
435-
self._scores: npt.NDArray[np.single] = np.concatenate(
436-
(self._scores, np.array(logits, dtype=np.single)), axis=0
437-
)
436+
scores.append(np.array(logits, dtype=np.single))
437+
self._scores = np.concatenate(scores)
438438

439439
def _sample(
440440
self,

0 commit comments

Comments
 (0)