diff --git a/android/src/main/jni.cpp b/android/src/main/jni.cpp index 71662c5..5377aea 100644 --- a/android/src/main/jni.cpp +++ b/android/src/main/jni.cpp @@ -168,7 +168,7 @@ Java_com_rnllama_LlamaContext_initContext( const char *lora_chars = env->GetStringUTFChars(lora_str, nullptr); if (lora_chars != nullptr && lora_chars[0] != '\0') { - defaultParams.lora_adapter.push_back({lora_chars, lora_scaled}); + defaultParams.lora_adapters.push_back({lora_chars, lora_scaled}); defaultParams.use_mmap = false; } diff --git a/cpp/rn-llama.hpp b/cpp/rn-llama.hpp index 00a04c3..537f0a1 100644 --- a/cpp/rn-llama.hpp +++ b/cpp/rn-llama.hpp @@ -219,7 +219,9 @@ struct llama_rn_context bool loadModel(gpt_params ¶ms_) { params = params_; - std::tie(model, ctx) = llama_init_from_gpt_params(params); + llama_init_result llama_init = llama_init_from_gpt_params(params); + model = llama_init.model; + ctx = llama_init.context; if (model == nullptr) { LOG_ERROR("unable to load model: %s", params_.model.c_str()); diff --git a/ios/RNLlamaContext.mm b/ios/RNLlamaContext.mm index ef494aa..f7fbc95 100644 --- a/ios/RNLlamaContext.mm +++ b/ios/RNLlamaContext.mm @@ -59,7 +59,7 @@ + (instancetype)initWithParams:(NSDictionary *)params { if (params[@"lora"]) { float lora_scaled = 1.0f; if (params[@"lora_scaled"]) lora_scaled = [params[@"lora_scaled"] floatValue]; - defaultParams.lora_adapter.push_back({[params[@"lora"] UTF8String], lora_scaled}); + defaultParams.lora_adapters.push_back({[params[@"lora"] UTF8String], lora_scaled}); defaultParams.use_mmap = false; }