Skip to content

Commit d1ae8c5

Browse files
authored
feat(llama.cpp): update (#229)
Signed-off-by: mudler <[email protected]>
1 parent d9f6176 commit d1ae8c5

File tree

2 files changed

+4
-3
lines changed

2 files changed

+4
-3
lines changed

binding.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,8 @@ int eval(void* params_ptr,void* state_pr,char *text) {
9898
auto last_n_tokens_data = std::vector<llama_token>(params_p->repeat_last_n, 0);
9999

100100
auto tokens = std::vector<llama_token>(params_p->n_ctx);
101-
auto n_prompt_tokens = llama_tokenize(ctx, text, tokens.data(), tokens.size(), true);
101+
std::string str = std::string(text);
102+
auto n_prompt_tokens = llama_tokenize(ctx, str.data(), str.length(), tokens.data(), tokens.size(), true);
102103

103104
if (n_prompt_tokens < 1) {
104105
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);
@@ -814,7 +815,7 @@ int llama_tokenize_string(void* params_ptr, void* state_pr, int* result) {
814815

815816
const bool add_bos = llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM;
816817

817-
return llama_tokenize(ctx, params_p->prompt.c_str(), result, params_p->n_ctx, add_bos);
818+
return llama_tokenize(ctx, params_p->prompt.data(), params_p->prompt.length(), result, params_p->n_ctx, add_bos);
818819
}
819820

820821

llama.cpp

0 commit comments

Comments
 (0)