From a9dc12bfbcc2663cc7c8c5faa6240e14bc4f8b3a Mon Sep 17 00:00:00 2001 From: Jhen-Jie Hong Date: Thu, 6 Feb 2025 11:51:25 +0800 Subject: [PATCH] feat(ios): impl toggleNativeLog --- ios/RNLlama.h | 8 -------- ios/RNLlama.mm | 13 ++++++++++++- ios/RNLlamaContext.h | 1 + ios/RNLlamaContext.mm | 27 +++++++++++++++++++++++++++ 4 files changed, 40 insertions(+), 9 deletions(-) diff --git a/ios/RNLlama.h b/ios/RNLlama.h index e58cf614..db735792 100644 --- a/ios/RNLlama.h +++ b/ios/RNLlama.h @@ -1,11 +1,3 @@ -#ifdef __cplusplus -#if RNLLAMA_BUILD_FROM_SOURCE -#import "rn-llama.h" -#else -#import -#endif -#endif - #import #import diff --git a/ios/RNLlama.mm b/ios/RNLlama.mm index 91de6498..cdd89b4e 100644 --- a/ios/RNLlama.mm +++ b/ios/RNLlama.mm @@ -13,6 +13,16 @@ @implementation RNLlama RCT_EXPORT_MODULE() +RCT_EXPORT_METHOD(toggleNativeLog:(BOOL)enabled) { + void (^onEmitLog)(NSString *level, NSString *text) = nil; + if (enabled) { + onEmitLog = ^(NSString *level, NSString *text) { + [self sendEventWithName:@"@RNLlama_onNativeLog" body:@{ @"level": level, @"text": text }]; + }; + } + [RNLlamaContext toggleNativeLog:enabled onEmitLog:onEmitLog]; +} + RCT_EXPORT_METHOD(setContextLimit:(double)limit withResolver:(RCTPromiseResolveBlock)resolve withRejecter:(RCTPromiseRejectBlock)reject) @@ -41,7 +51,7 @@ @implementation RNLlama } if (llamaDQueue == nil) { - llamaDQueue = dispatch_queue_create("com.rnllama", DISPATCH_QUEUE_SERIAL); + llamaDQueue = dispatch_queue_create("com.rnllama", DISPATCH_QUEUE_SERIAL); } if (llamaContexts == nil) { @@ -159,6 +169,7 @@ - (NSArray *)supportedEvents { return@[ @"@RNLlama_onInitContextProgress", @"@RNLlama_onToken", + @"@RNLlama_onNativeLog", ]; } diff --git a/ios/RNLlamaContext.h b/ios/RNLlamaContext.h index 937b6d64..63380187 100644 --- a/ios/RNLlamaContext.h +++ b/ios/RNLlamaContext.h @@ -25,6 +25,7 @@ rnllama::llama_rn_context * llama; } ++ (void)toggleNativeLog:(BOOL)enabled onEmitLog:(void (^)(NSString *level, NSString *text))onEmitLog; + (NSDictionary *)modelInfo:(NSString *)path skip:(NSArray *)skip; + (instancetype)initWithParams:(NSDictionary *)params onProgress:(void (^)(unsigned int progress))onProgress; - (void)interruptLoad; diff --git a/ios/RNLlamaContext.mm b/ios/RNLlamaContext.mm index 6d4d2294..873a2cc6 100644 --- a/ios/RNLlamaContext.mm +++ b/ios/RNLlamaContext.mm @@ -3,6 +3,33 @@ @implementation RNLlamaContext ++ (void)toggleNativeLog:(BOOL)enabled onEmitLog:(void (^)(NSString *level, NSString *text))onEmitLog { + if (enabled) { + void (^copiedBlock)(NSString *, NSString *) = [onEmitLog copy]; + llama_log_set([](lm_ggml_log_level level, const char * text, void * data) { + llama_log_callback_default(level, text, data); + NSString *levelStr = @""; + if (level == LM_GGML_LOG_LEVEL_ERROR) { + levelStr = @"error"; + } else if (level == LM_GGML_LOG_LEVEL_INFO) { + levelStr = @"info"; + } else if (level == LM_GGML_LOG_LEVEL_WARN) { + levelStr = @"warn"; + } + + NSString *textStr = [NSString stringWithUTF8String:text]; + // NOTE: Convert to UTF-8 string may fail + if (!textStr) { + return; + } + void (^block)(NSString *, NSString *) = (__bridge void (^)(NSString *, NSString *))(data); + block(levelStr, textStr); + }, copiedBlock); + } else { + llama_log_set(llama_log_callback_default, nullptr); + } +} + + (NSDictionary *)modelInfo:(NSString *)path skip:(NSArray *)skip { struct lm_gguf_init_params params = { /*.no_alloc = */ false,