Skip to content

Commit

Permalink
feat(ios): impl toggleNativeLog
Browse files Browse the repository at this point in the history
  • Loading branch information
jhen0409 committed Feb 6, 2025
1 parent ea7e945 commit a9dc12b
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 9 deletions.
8 changes: 0 additions & 8 deletions ios/RNLlama.h
Original file line number Diff line number Diff line change
@@ -1,11 +1,3 @@
#ifdef __cplusplus
#if RNLLAMA_BUILD_FROM_SOURCE
#import "rn-llama.h"
#else
#import <rnllama/rn-llama.h>
#endif
#endif

#import <React/RCTEventEmitter.h>
#import <React/RCTBridgeModule.h>

Expand Down
13 changes: 12 additions & 1 deletion ios/RNLlama.mm
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,16 @@ @implementation RNLlama

RCT_EXPORT_MODULE()

RCT_EXPORT_METHOD(toggleNativeLog:(BOOL)enabled) {
void (^onEmitLog)(NSString *level, NSString *text) = nil;
if (enabled) {
onEmitLog = ^(NSString *level, NSString *text) {
[self sendEventWithName:@"@RNLlama_onNativeLog" body:@{ @"level": level, @"text": text }];
};
}
[RNLlamaContext toggleNativeLog:enabled onEmitLog:onEmitLog];
}

RCT_EXPORT_METHOD(setContextLimit:(double)limit
withResolver:(RCTPromiseResolveBlock)resolve
withRejecter:(RCTPromiseRejectBlock)reject)
Expand Down Expand Up @@ -41,7 +51,7 @@ @implementation RNLlama
}

if (llamaDQueue == nil) {
llamaDQueue = dispatch_queue_create("com.rnllama", DISPATCH_QUEUE_SERIAL);
llamaDQueue = dispatch_queue_create("com.rnllama", DISPATCH_QUEUE_SERIAL);
}

if (llamaContexts == nil) {
Expand Down Expand Up @@ -159,6 +169,7 @@ - (NSArray *)supportedEvents {
return@[
@"@RNLlama_onInitContextProgress",
@"@RNLlama_onToken",
@"@RNLlama_onNativeLog",
];
}

Expand Down
1 change: 1 addition & 0 deletions ios/RNLlamaContext.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
rnllama::llama_rn_context * llama;
}

+ (void)toggleNativeLog:(BOOL)enabled onEmitLog:(void (^)(NSString *level, NSString *text))onEmitLog;
+ (NSDictionary *)modelInfo:(NSString *)path skip:(NSArray *)skip;
+ (instancetype)initWithParams:(NSDictionary *)params onProgress:(void (^)(unsigned int progress))onProgress;
- (void)interruptLoad;
Expand Down
27 changes: 27 additions & 0 deletions ios/RNLlamaContext.mm
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,33 @@

@implementation RNLlamaContext

+ (void)toggleNativeLog:(BOOL)enabled onEmitLog:(void (^)(NSString *level, NSString *text))onEmitLog {
if (enabled) {
void (^copiedBlock)(NSString *, NSString *) = [onEmitLog copy];
llama_log_set([](lm_ggml_log_level level, const char * text, void * data) {
llama_log_callback_default(level, text, data);
NSString *levelStr = @"";
if (level == LM_GGML_LOG_LEVEL_ERROR) {
levelStr = @"error";
} else if (level == LM_GGML_LOG_LEVEL_INFO) {
levelStr = @"info";
} else if (level == LM_GGML_LOG_LEVEL_WARN) {
levelStr = @"warn";
}

NSString *textStr = [NSString stringWithUTF8String:text];
// NOTE: Convert to UTF-8 string may fail
if (!textStr) {
return;
}
void (^block)(NSString *, NSString *) = (__bridge void (^)(NSString *, NSString *))(data);
block(levelStr, textStr);
}, copiedBlock);
} else {
llama_log_set(llama_log_callback_default, nullptr);
}
}

+ (NSDictionary *)modelInfo:(NSString *)path skip:(NSArray *)skip {
struct lm_gguf_init_params params = {
/*.no_alloc = */ false,
Expand Down

0 comments on commit a9dc12b

Please sign in to comment.