Skip to content

Commit

Permalink
feat: sync llama.cpp
Browse files Browse the repository at this point in the history
  • Loading branch information
jhen0409 committed Nov 2, 2023
1 parent ce7f9e4 commit 6e4c9bc
Show file tree
Hide file tree
Showing 23 changed files with 6,369 additions and 6,989 deletions.
4 changes: 2 additions & 2 deletions android/src/main/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ set(
${RNLLAMA_LIB_DIR}/ggml-alloc.c
${RNLLAMA_LIB_DIR}/ggml-backend.c
${RNLLAMA_LIB_DIR}/ggml.c
${RNLLAMA_LIB_DIR}/k_quants.c
${RNLLAMA_LIB_DIR}/ggml-quants.c
${RNLLAMA_LIB_DIR}/common.cpp
${RNLLAMA_LIB_DIR}/grammar-parser.cpp
${RNLLAMA_LIB_DIR}/sampling.cpp
Expand All @@ -36,7 +36,7 @@ function(build_library target_name)
target_compile_options(${target_name} PRIVATE -march=armv8.2-a+fp16+dotprod)
endif ()

target_compile_options(${target_name} PRIVATE -DLM_GGML_USE_K_QUANTS -pthread)
target_compile_options(${target_name} PRIVATE -pthread)

if (${target_name} STREQUAL "rnllama_v8fp16_va")
target_compile_options(${target_name} PRIVATE -march=armv8.2-a+fp16+dotprod)
Expand Down
4 changes: 2 additions & 2 deletions cpp/build-info.h
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#ifndef BUILD_INFO_H
#define BUILD_INFO_H

#define BUILD_NUMBER 1429
#define BUILD_COMMIT "34b2a5e"
#define BUILD_NUMBER 1461
#define BUILD_COMMIT "c43c2da"
#define BUILD_COMPILER ""
#define BUILD_TARGET "unknown"

Expand Down
54 changes: 37 additions & 17 deletions cpp/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,24 @@ void process_escapes(std::string& input) {
}

bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
bool result = true;
try {
if (!gpt_params_parse_ex(argc, argv, params)) {
gpt_print_usage(argc, argv, gpt_params());
exit(0);
}
}
catch (const std::invalid_argument & ex) {
fprintf(stderr, "%s\n", ex.what());
gpt_print_usage(argc, argv, gpt_params());
exit(1);
}
return result;
}

bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
bool invalid_param = false;
std::string arg;
gpt_params default_params;
const std::string arg_prefix = "--";
llama_sampling_params & sparams = params.sparams;

Expand Down Expand Up @@ -218,12 +233,19 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
break;
}
sparams.top_p = std::stof(argv[i]);
} else if (arg == "--min-p") {
if (++i >= argc) {
invalid_param = true;
break;
}
sparams.min_p = std::stof(argv[i]);
} else if (arg == "--temp") {
if (++i >= argc) {
invalid_param = true;
break;
}
sparams.temp = std::stof(argv[i]);
sparams.temp = std::max(sparams.temp, 0.0f);
} else if (arg == "--tfs") {
if (++i >= argc) {
invalid_param = true;
Expand Down Expand Up @@ -547,11 +569,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
break;
}
} else if (arg == "-h" || arg == "--help") {
gpt_print_usage(argc, argv, default_params);
#ifndef LOG_DISABLE_LOGS
log_print_usage();
#endif // LOG_DISABLE_LOGS
exit(0);
return false;

} else if (arg == "--random-prompt") {
params.random_prompt = true;
} else if (arg == "--in-prefix-bos") {
Expand Down Expand Up @@ -610,22 +629,17 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
// End of Parse args for logging parameters
#endif // LOG_DISABLE_LOGS
} else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
gpt_print_usage(argc, argv, default_params);
exit(1);
throw std::invalid_argument("error: unknown argument: " + arg);
}
}
if (invalid_param) {
fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
gpt_print_usage(argc, argv, default_params);
exit(1);
throw std::invalid_argument("error: invalid parameter for argument: " + arg);
}
if (params.prompt_cache_all &&
(params.interactive || params.interactive_first ||
params.instruct)) {
fprintf(stderr, "error: --prompt-cache-all not supported in interactive mode yet\n");
gpt_print_usage(argc, argv, default_params);
exit(1);

throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
}

if (params.escape) {
Expand All @@ -644,6 +658,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
const llama_sampling_params & sparams = params.sparams;

printf("\n");
printf("usage: %s [options]\n", argv[0]);
printf("\n");
printf("options:\n");
Expand Down Expand Up @@ -678,6 +693,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
printf(" --top-k N top-k sampling (default: %d, 0 = disabled)\n", sparams.top_k);
printf(" --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)sparams.top_p);
printf(" --min-p N min-p sampling (default: %.1f, 0.0 = disabled)\n", (double)sparams.min_p);
printf(" --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)sparams.tfs_z);
printf(" --typical N locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)\n", (double)sparams.typical_p);
printf(" --repeat-last-n N last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)\n", sparams.penalty_last_n);
Expand Down Expand Up @@ -743,7 +759,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
#endif // LM_GGML_USE_CUBLAS
#endif
printf(" --verbose-prompt print prompt before generation\n");
fprintf(stderr, " --simple-io use basic IO for better compatibility in subprocesses and limited consoles\n");
printf(" --simple-io use basic IO for better compatibility in subprocesses and limited consoles\n");
printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
printf(" --lora-scaled FNAME S apply LoRA adapter with user defined scaling S (implies --no-mmap)\n");
printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
Expand All @@ -754,6 +770,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
printf(" -ld LOGDIR, --logdir LOGDIR\n");
printf(" path under which to save YAML logs (no logging if unset)\n");
printf("\n");
#ifndef LOG_DISABLE_LOGS
log_print_usage();
#endif // LOG_DISABLE_LOGS
}

std::string get_system_info(const gpt_params & params) {
Expand Down Expand Up @@ -888,7 +907,7 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par

std::vector<llama_token> tmp = { llama_token_bos(model), llama_token_eos(model), };
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0));
llama_kv_cache_tokens_rm(lctx, -1, -1);
llama_kv_cache_clear(lctx);
llama_reset_timings(lctx);
}

Expand Down Expand Up @@ -1274,6 +1293,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
fprintf(stream, "threads: %d # default: %d\n", params.n_threads, std::thread::hardware_concurrency());
fprintf(stream, "top_k: %d # default: 40\n", sparams.top_k);
fprintf(stream, "top_p: %f # default: 0.95\n", sparams.top_p);
fprintf(stream, "min_p: %f # default: 0.0\n", sparams.min_p);
fprintf(stream, "typical_p: %f # default: 1.0\n", sparams.typical_p);
fprintf(stream, "verbose_prompt: %s # default: false\n", params.verbose_prompt ? "true" : "false");
}
2 changes: 2 additions & 0 deletions cpp/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,8 @@ struct gpt_params {
std::string image = ""; // path to an image file
};

bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params);

bool gpt_params_parse(int argc, char ** argv, gpt_params & params);

void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
Expand Down
Loading

0 comments on commit 6e4c9bc

Please sign in to comment.