diff --git a/android/CMakeLists.txt b/android/CMakeLists.txt index 4613e4a..121761c 100644 --- a/android/CMakeLists.txt +++ b/android/CMakeLists.txt @@ -4,8 +4,7 @@ project(BarkRn) set (CMAKE_VERBOSE_MAKEFILE ON) set (CMAKE_CXX_STANDARD 17) -set(CMAKE_C_FLAGS_RELEASE "-O3") -set(CMAKE_CXX_FLAGS_RELEASE "-O3") + if (CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma -mf16c -mavx -mavx2") @@ -28,9 +27,21 @@ add_library(bark-rn SHARED cpp-adapter.cpp ) +if (CMAKE_SYSTEM_PROCESSOR MATCHES "arm64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") + target_compile_options(bark-rn PRIVATE -march=armv8.2-a+fp16) +endif() + +target_compile_options(bark-rn PRIVATE -O3 -DNDEBUG -pthread) +# target_compile_options(bark-rn PRIVATE -fvisibility=hidden -fvisibility-inlines-hidden) +# target_compile_options(bark-rn PRIVATE -ffunction-sections -fdata-sections) + +target_link_options(bark-rn PRIVATE -Wl,--gc-sections) +target_link_options(bark-rn PRIVATE -Wl,--exclude-libs,ALL) +target_link_options(bark-rn PRIVATE -flto) + # Specifies a path to native header files. include_directories( ../cpp ) -target_link_libraries(bark-rn PUBLIC m) +target_link_libraries(bark-rn PUBLIC log) diff --git a/android/cpp-adapter.cpp b/android/cpp-adapter.cpp index 202dfa7..396a7f0 100644 --- a/android/cpp-adapter.cpp +++ b/android/cpp-adapter.cpp @@ -3,6 +3,7 @@ #include #include #include +#include template T get_map_value(JNIEnv *env, jobject params, const char *key) { @@ -104,7 +105,8 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_barkrn_BarkContext_nativeInitContext if (has_map_key(env, jParams, "seed")) { seed = get_map_value(env, jParams, "seed"); } - auto model_path = env->GetStringUTFChars(jPath, nullptr); + const char *model_path = env->GetStringUTFChars(jPath, nullptr); + __android_log_print(ANDROID_LOG_INFO, "BarkRN", "Loading model from %s", model_path); bark_context *context = bark_load_model(model_path, params, seed); env->ReleaseStringUTFChars(jPath, model_path); return reinterpret_cast(context); @@ -116,23 +118,25 @@ extern "C" JNIEXPORT jobject JNICALL Java_com_barkrn_BarkContext_nativeGenerate( auto context = reinterpret_cast(jCtx); int threads = jThreads; if (threads < 0) { - threads = std::thread::hardware_concurrency() << 1; + threads = std::thread::hardware_concurrency() >> 1; } else if (threads == 0) { threads = 1; } - auto text = env->GetStringUTFChars(jText, nullptr); + const char *text = env->GetStringUTFChars(jText, nullptr); + __android_log_print(ANDROID_LOG_INFO, "BarkRN", "Generating %s with %d threads", text, threads); auto success = bark_generate_audio(context, text, threads); env->ReleaseStringUTFChars(jText, text); - const float *audio_data = bark_get_audio_data(context); - const int audio_samples = bark_get_audio_data_size(context); - if (success) { - auto dest_path = env->GetStringUTFChars(jOutPath, nullptr); - std::vector audio_data_vec(audio_data, audio_data + audio_samples); - barkrn::pcmToWav(audio_data_vec, sample_rate, dest_path); - env->ReleaseStringUTFChars(jOutPath, dest_path); - } + float *audio_data = bark_get_audio_data(context); + int audio_samples = bark_get_audio_data_size(context); + const char *dest_path = env->GetStringUTFChars(jOutPath, nullptr); + __android_log_print(ANDROID_LOG_INFO, "BarkRN", "Generated %d audio samples", audio_samples); + if (success && audio_samples > 0) { + barkrn::pcmToWav(audio_data, audio_samples, sample_rate, dest_path); + } + env->ReleaseStringUTFChars(jOutPath, dest_path); const auto load_time = bark_get_load_time(context); const auto eval_time = bark_get_eval_time(context); + __android_log_print(ANDROID_LOG_INFO, "BarkRN", "Load time: %f, Eval time: %f", load_time, eval_time); auto result_class = env->FindClass("com/barkrn/BarkContext$BarkResult"); jobject result = env->NewObject( result_class, env->GetMethodID(result_class, "", "(ZII)V"), success, diff --git a/android/src/main/java/com/barkrn/BarkRnModule.kt b/android/src/main/java/com/barkrn/BarkRnModule.kt index 08d7e1c..04240b1 100644 --- a/android/src/main/java/com/barkrn/BarkRnModule.kt +++ b/android/src/main/java/com/barkrn/BarkRnModule.kt @@ -33,29 +33,33 @@ class BarkRnModule internal constructor(context: ReactApplicationContext) : @ReactMethod override fun init_context(model_path: String, params: ReadableMap, promise: Promise) { - try { - val id = next_id - next_id += 1 - contexts[id] = BarkContext(model_path, params.toHashMap()) - promise.resolve(id) - } catch (e: Exception) { - promise.reject(e) - } + Thread { + try { + val id = next_id + next_id += 1 + contexts[id] = BarkContext(model_path, params.toHashMap()) + promise.resolve(id) + } catch (e: Exception) { + promise.reject(e) + } + }.start() } @ReactMethod override fun generate(id: Int, text: String, audio_path: String, promise: Promise) { contexts[id]?.let { context -> - try { - val result = context.generate(text, audio_path) - val resultMap = Arguments.createMap() - resultMap.putBoolean("success", result.success) - resultMap.putInt("load_time", result.load_time) - resultMap.putInt("eval_time", result.eval_time) - promise.resolve(resultMap) - } catch (e: Exception) { - promise.reject(e) - } + Thread { + try { + val result = context.generate(text, audio_path) + val resultMap = Arguments.createMap() + resultMap.putBoolean("success", result.success) + resultMap.putInt("load_time", result.load_time) + resultMap.putInt("eval_time", result.eval_time) + promise.resolve(resultMap) + } catch (e: Exception) { + promise.reject(e) + } + }.start() } ?: promise.reject("Context not found") } diff --git a/bark-rn.podspec b/bark-rn.podspec index c8c84a2..0151ac3 100644 --- a/bark-rn.podspec +++ b/bark-rn.podspec @@ -3,6 +3,15 @@ require "json" package = JSON.parse(File.read(File.join(__dir__, "package.json"))) folly_compiler_flags = '-DFOLLY_NO_CONFIG -DFOLLY_MOBILE=1 -DFOLLY_USE_LIBCPP=1 -Wno-comma -Wno-shorten-64-to-32' +base_compiler_flags = "-DWSP_GGML_USE_ACCELERATE -Wno-shorten-64-to-32" +folly_compiler_flags = "-DFOLLY_NO_CONFIG -DFOLLY_MOBILE=1 -DFOLLY_USE_LIBCPP=1 -Wno-comma" + +# Use base_optimizer_flags = "" for debug builds +# base_optimizer_flags = "" +base_optimizer_flags = "-O3 -DNDEBUG" + + " -fvisibility=hidden -fvisibility-inlines-hidden" + + " -ffunction-sections -fdata-sections" + Pod::Spec.new do |s| s.name = "bark-rn" s.version = package["version"] @@ -11,7 +20,7 @@ Pod::Spec.new do |s| s.license = package["license"] s.authors = package["author"] - s.platforms = { :ios => "14.0", :tvos => "14.0" } + s.platforms = { :ios => "11.0", :tvos => "11.0" } s.source = { :git => "https://github.com/mybigday/bark.rn.git", :tag => "#{s.version}" } s.source_files = "ios/**/*.{h,m,mm}", @@ -22,6 +31,12 @@ Pod::Spec.new do |s| "cpp/bark.{cpp,h}", "cpp/dr_wav.h", "cpp/utils.{cpp,h}" + + s.compiler_flags = base_compiler_flags + s.pod_target_xcconfig = { + "OTHER_CFLAGS" => base_optimizer_flags, + "OTHER_CPLUSPLUSFLAGS" => base_optimizer_flags + } # Use install_modules_dependencies helper to install the dependencies if React Native version >=0.71.0. # See https://github.com/facebook/react-native/blob/febf6b7f33fdb4904669f99d795eba4c0f95d7bf/scripts/cocoapods/new_architecture.rb#L79. @@ -32,7 +47,7 @@ Pod::Spec.new do |s| # Don't install the dependencies when we run `pod install` in the old architecture. if ENV['RCT_NEW_ARCH_ENABLED'] == '1' then - s.compiler_flags = folly_compiler_flags + " -DRCT_NEW_ARCH_ENABLED=1" + s.compiler_flags = base_compiler_flags + " " + folly_compiler_flags + " -DRCT_NEW_ARCH_ENABLED=1" s.pod_target_xcconfig = { "HEADER_SEARCH_PATHS" => "\"$(PODS_ROOT)/boost\"", "OTHER_CPLUSPLUSFLAGS" => "-DFOLLY_NO_CONFIG -DFOLLY_MOBILE=1 -DFOLLY_USE_LIBCPP=1", diff --git a/cpp/bark.cpp b/cpp/bark.cpp index bc589e2..94754fa 100644 --- a/cpp/bark.cpp +++ b/cpp/bark.cpp @@ -2,11 +2,11 @@ #include "ggml-backend.h" #include "ggml.h" -#ifdef GGML_USE_CUBLAS +#ifdef BARK_GGML_USE_CUBLAS #include "ggml-cuda.h" #endif -#ifdef GGML_USE_METAL +#ifdef BARK_GGML_USE_METAL #include "ggml-metal.h" #endif @@ -61,52 +61,52 @@ struct gpt_hparams { struct gpt_layer { // normalization - struct ggml_tensor* ln_1_g; - struct ggml_tensor* ln_1_b; + struct bark_ggml_tensor* ln_1_g; + struct bark_ggml_tensor* ln_1_b; - struct ggml_tensor* ln_2_g; - struct ggml_tensor* ln_2_b; + struct bark_ggml_tensor* ln_2_g; + struct bark_ggml_tensor* ln_2_b; // attention - struct ggml_tensor* c_attn_attn_w; - struct ggml_tensor* c_attn_attn_b; + struct bark_ggml_tensor* c_attn_attn_w; + struct bark_ggml_tensor* c_attn_attn_b; - struct ggml_tensor* c_attn_proj_w; - struct ggml_tensor* c_attn_proj_b; + struct bark_ggml_tensor* c_attn_proj_w; + struct bark_ggml_tensor* c_attn_proj_b; // mlp - struct ggml_tensor* c_mlp_fc_w; - struct ggml_tensor* c_mlp_fc_b; + struct bark_ggml_tensor* c_mlp_fc_w; + struct bark_ggml_tensor* c_mlp_fc_b; - struct ggml_tensor* c_mlp_proj_w; - struct ggml_tensor* c_mlp_proj_b; + struct bark_ggml_tensor* c_mlp_proj_w; + struct bark_ggml_tensor* c_mlp_proj_b; }; struct gpt_model { gpt_hparams hparams; // normalization - struct ggml_tensor* ln_f_g; - struct ggml_tensor* ln_f_b; + struct bark_ggml_tensor* ln_f_g; + struct bark_ggml_tensor* ln_f_b; - struct ggml_tensor* wpe; // position embedding - std::vector wtes; // token embedding - std::vector lm_heads; // language model head + struct bark_ggml_tensor* wpe; // position embedding + std::vector wtes; // token embedding + std::vector lm_heads; // language model head std::vector layers; // key + value memory - struct ggml_tensor* memory_k; - struct ggml_tensor* memory_v; + struct bark_ggml_tensor* memory_k; + struct bark_ggml_tensor* memory_v; - struct ggml_context* ctx; + struct bark_ggml_context* ctx; - ggml_backend_t backend = NULL; + bark_ggml_backend_t backend = NULL; - ggml_backend_buffer_t buffer_w; - ggml_backend_buffer_t buffer_kv; + bark_ggml_backend_buffer_t buffer_w; + bark_ggml_backend_buffer_t buffer_kv; - std::map tensors; + std::map tensors; // int64_t t_sample_us = 0; @@ -136,10 +136,10 @@ struct bark_context { struct encodec_context* encodec_ctx; // buffer for model evaluation - ggml_backend_buffer_t buf_compute; + bark_ggml_backend_buffer_t buf_compute; // custom allocator - struct ggml_allocr* allocr = NULL; + struct bark_ggml_allocr* allocr = NULL; int n_gpu_layers = 0; std::mt19937 rng; @@ -174,11 +174,11 @@ static void write_safe(std::ofstream& fout, T& dest) { } static void bark_print_statistics(gpt_model* model) { - printf("\n\n"); - printf("%s: sample time = %8.2f ms / %lld tokens\n", __func__, model->t_sample_us / 1000.0f, model->n_sample); - printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, model->t_predict_us / 1000.0f, model->t_predict_us / model->n_sample / 1000.0f); - printf("%s: total time = %8.2f ms\n", __func__, model->t_main_us / 1000.0f); - printf("\n"); + LOGI("\n\n"); + LOGI("%s: sample time = %8.2f ms / %lld tokens\n", __func__, model->t_sample_us / 1000.0f, model->n_sample); + LOGI("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, model->t_predict_us / 1000.0f, model->t_predict_us / model->n_sample / 1000.0f); + LOGI("%s: total time = %8.2f ms\n", __func__, model->t_main_us / 1000.0f); + LOGI("\n"); } static void softmax(std::vector& logits) { @@ -254,7 +254,7 @@ static bark_token gpt_sample( float* eos_p, int64_t* t_sample_us, int64_t* n_sample) { - int64_t t_sample_start_us = ggml_time_us(); + int64_t t_sample_start_us = bark_ggml_time_us(); bark_token res; if (temp == 0.0f) { @@ -263,53 +263,53 @@ static bark_token gpt_sample( res = gpt_multinomial_sample(logits, rng, temp, eos_p); } - int64_t t_sample_end_us = ggml_time_us(); + int64_t t_sample_end_us = bark_ggml_time_us(); *t_sample_us += (t_sample_end_us - t_sample_start_us); *n_sample += 1; return res; } -static bool ggml_quantize_weights( +static bool bark_ggml_quantize_weights( std::ifstream& fin, std::ofstream& fout, - const ggml_ftype ftype, + const bark_ggml_ftype ftype, const std::vector& to_quant, const std::vector& to_skip) { - ggml_type qtype = GGML_TYPE_F32; + bark_ggml_type qtype = BARK_GGML_TYPE_F32; switch (ftype) { - case GGML_FTYPE_MOSTLY_Q4_0: - qtype = GGML_TYPE_Q4_0; + case BARK_GGML_FTYPE_MOSTLY_Q4_0: + qtype = BARK_GGML_TYPE_Q4_0; break; - case GGML_FTYPE_MOSTLY_Q4_1: - qtype = GGML_TYPE_Q4_1; + case BARK_GGML_FTYPE_MOSTLY_Q4_1: + qtype = BARK_GGML_TYPE_Q4_1; break; - case GGML_FTYPE_MOSTLY_Q5_0: - qtype = GGML_TYPE_Q5_0; + case BARK_GGML_FTYPE_MOSTLY_Q5_0: + qtype = BARK_GGML_TYPE_Q5_0; break; - case GGML_FTYPE_MOSTLY_Q5_1: - qtype = GGML_TYPE_Q5_1; + case BARK_GGML_FTYPE_MOSTLY_Q5_1: + qtype = BARK_GGML_TYPE_Q5_1; break; - case GGML_FTYPE_MOSTLY_Q8_0: - qtype = GGML_TYPE_Q8_0; + case BARK_GGML_FTYPE_MOSTLY_Q8_0: + qtype = BARK_GGML_TYPE_Q8_0; break; - case GGML_FTYPE_UNKNOWN: - case GGML_FTYPE_ALL_F32: - case GGML_FTYPE_MOSTLY_F16: - case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: - case GGML_FTYPE_MOSTLY_Q2_K: - case GGML_FTYPE_MOSTLY_Q3_K: - case GGML_FTYPE_MOSTLY_Q4_K: - case GGML_FTYPE_MOSTLY_Q5_K: - case GGML_FTYPE_MOSTLY_Q6_K: { - fprintf(stderr, "%s: invalid model type %d\n", __func__, ftype); + case BARK_GGML_FTYPE_UNKNOWN: + case BARK_GGML_FTYPE_ALL_F32: + case BARK_GGML_FTYPE_MOSTLY_F16: + case BARK_GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: + case BARK_GGML_FTYPE_MOSTLY_Q2_K: + case BARK_GGML_FTYPE_MOSTLY_Q3_K: + case BARK_GGML_FTYPE_MOSTLY_Q4_K: + case BARK_GGML_FTYPE_MOSTLY_Q5_K: + case BARK_GGML_FTYPE_MOSTLY_Q6_K: { + LOGE("%s: invalid model type %d\n", __func__, ftype); return false; } }; - if (!ggml_is_quantized(qtype)) { - fprintf(stderr, "%s: invalid quantization type %d (%s)\n", __func__, qtype, ggml_type_name(qtype)); + if (!bark_ggml_is_quantized(qtype)) { + LOGE("%s: invalid quantization type %d (%s)\n", __func__, qtype, bark_ggml_type_name(qtype)); return false; } @@ -319,7 +319,7 @@ static bool ggml_quantize_weights( std::vector work; std::vector data_u8; - std::vector data_f16; + std::vector data_f16; std::vector data_f32; std::vector hist_all(1 << 4, 0); @@ -347,7 +347,7 @@ static bool ggml_quantize_weights( std::string name(length, 0); fin.read(&name[0], length); - printf("%64s - [%5d, %5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ne[2], ggml_type_name((ggml_type)ttype)); + LOGI("%64s - [%5d, %5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ne[2], bark_ggml_type_name((bark_ggml_type)ttype)); bool quantize = false; @@ -371,17 +371,17 @@ static bool ggml_quantize_weights( quantize &= (n_dims == 2); if (quantize) { - if (ttype != GGML_TYPE_F32 && ttype != GGML_TYPE_F16) { - fprintf(stderr, "%s: unsupported ttype %d (%s) for integer quantization\n", __func__, ttype, ggml_type_name((ggml_type)ttype)); + if (ttype != BARK_GGML_TYPE_F32 && ttype != BARK_GGML_TYPE_F16) { + LOGE("%s: unsupported ttype %d (%s) for integer quantization\n", __func__, ttype, bark_ggml_type_name((bark_ggml_type)ttype)); return false; } - if (ttype == GGML_TYPE_F16) { + if (ttype == BARK_GGML_TYPE_F16) { data_f16.resize(nelements); - fin.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); + fin.read(reinterpret_cast(data_f16.data()), nelements * sizeof(bark_ggml_fp16_t)); data_f32.resize(nelements); for (int i = 0; i < nelements; ++i) { - data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); + data_f32[i] = bark_ggml_fp16_to_fp32(data_f16[i]); } } else { data_f32.resize(nelements); @@ -410,36 +410,36 @@ static bool ggml_quantize_weights( size_t cur_size = 0; std::vector hist_cur(1 << 4, 0); - switch ((ggml_type)ttype) { - case GGML_TYPE_Q4_0: { - cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + switch ((bark_ggml_type)ttype) { + case BARK_GGML_TYPE_Q4_0: { + cur_size = bark_ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); } break; - case GGML_TYPE_Q4_1: { - cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + case BARK_GGML_TYPE_Q4_1: { + cur_size = bark_ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); } break; - case GGML_TYPE_Q5_0: { - cur_size = ggml_quantize_q5_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + case BARK_GGML_TYPE_Q5_0: { + cur_size = bark_ggml_quantize_q5_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); } break; - case GGML_TYPE_Q5_1: { - cur_size = ggml_quantize_q5_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + case BARK_GGML_TYPE_Q5_1: { + cur_size = bark_ggml_quantize_q5_1(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); } break; - case GGML_TYPE_Q8_0: { - cur_size = ggml_quantize_q8_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); + case BARK_GGML_TYPE_Q8_0: { + cur_size = bark_ggml_quantize_q8_0(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data()); } break; - case GGML_TYPE_F32: - case GGML_TYPE_F16: - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_Q8_1: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_Q8_K: - case GGML_TYPE_COUNT: { - fprintf(stderr, "%s: unsupported quantization type %d (%s)\n", __func__, ttype, ggml_type_name((ggml_type)ttype)); + case BARK_GGML_TYPE_F32: + case BARK_GGML_TYPE_F16: + case BARK_GGML_TYPE_I8: + case BARK_GGML_TYPE_I16: + case BARK_GGML_TYPE_I32: + case BARK_GGML_TYPE_Q8_1: + case BARK_GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q8_K: + case BARK_GGML_TYPE_COUNT: { + LOGE("%s: unsupported quantization type %d (%s)\n", __func__, ttype, bark_ggml_type_name((bark_ggml_type)ttype)); return false; } } @@ -447,17 +447,17 @@ static bool ggml_quantize_weights( fout.write(reinterpret_cast(work.data()), cur_size); total_size_new += cur_size; - printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float) / 1024.0 / 1024.0, cur_size / 1024.0 / 1024.0); + LOGI("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float) / 1024.0 / 1024.0, cur_size / 1024.0 / 1024.0); for (int i = 0; i < (int)hist_cur.size(); ++i) { hist_all[i] += hist_cur[i]; } for (int i = 0; i < (int)hist_cur.size(); ++i) { - printf("%5.3f ", hist_cur[i] / (float)nelements); + LOGI("%5.3f ", hist_cur[i] / (float)nelements); } - printf("\n"); + LOGI("\n"); } else { - printf("size = %8.3f MB\n", data_u8.size() / 1024.0 / 1024.0); + LOGI("size = %8.3f MB\n", data_u8.size() / 1024.0 / 1024.0); fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); total_size_new += data_u8.size(); } @@ -465,8 +465,8 @@ static bool ggml_quantize_weights( total_size_org += nelements * sizeof(float); } - printf("%s: model size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0); - printf("%s: quant size = %8.2f MB | ftype = %d (%s)\n", __func__, total_size_new / 1024.0 / 1024.0, ftype, ggml_type_name(qtype)); + LOGI("%s: model size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0); + LOGI("%s: quant size = %8.2f MB | ftype = %d (%s)\n", __func__, total_size_new / 1024.0 / 1024.0, ftype, bark_ggml_type_name(qtype)); { int64_t sum_all = 0; @@ -474,11 +474,11 @@ static bool ggml_quantize_weights( sum_all += hist_all[i]; } - printf("%s: hist: ", __func__); + LOGI("%s: hist: ", __func__); for (int i = 0; i < (int)hist_all.size(); ++i) { - printf("%5.3f ", hist_all[i] / (float)sum_all); + LOGI("%5.3f ", hist_all[i] / (float)sum_all); } - printf("\n"); + LOGI("\n"); } return true; @@ -616,7 +616,7 @@ void bert_tokenize( --j; } if (j == i) { - fprintf(stderr, "%s: unknown token '%s'\n", __func__, word.substr(i, 1).data()); + LOGE("%s: unknown token '%s'\n", __func__, word.substr(i, 1).data()); prefix = "##"; ++i; } @@ -646,7 +646,7 @@ static void bark_tokenize_input(struct bark_context* bctx, const std::string& te for (int i = n_tokens; i < max_ctx_size; i++) tokens[i] = params.text_pad_token; } else if (n_tokens > max_ctx_size) { - fprintf(stderr, "%s: input sequence is too long (%d > 256), truncating sequence", __func__, n_tokens); + LOGE("%s: input sequence is too long (%d > 256), truncating sequence", __func__, n_tokens); } tokens.resize(max_ctx_size); @@ -660,12 +660,12 @@ static void bark_tokenize_input(struct bark_context* bctx, const std::string& te bctx->tokens = tokens; - printf("%s: prompt: '%s'\n", __func__, text.c_str()); - printf("%s: number of tokens in prompt = %zu, first 8 tokens: ", __func__, bctx->tokens.size()); + LOGI("%s: prompt: '%s'\n", __func__, text.c_str()); + LOGI("%s: number of tokens in prompt = %zu, first 8 tokens: ", __func__, bctx->tokens.size()); for (int i = 0; i < std::min(8, (int)bctx->tokens.size()); i++) { - printf("%d ", bctx->tokens[i]); + LOGI("%d ", bctx->tokens[i]); } - printf("\n\n"); + LOGI("\n\n"); } static bool bark_vocab_load(std::ifstream& fin, bark_vocab* vocab) { @@ -712,30 +712,30 @@ static bool bark_model_load(std::ifstream& fin, gpt_model& model, int n_gpu_laye read_safe(fin, hparams.n_wtes); read_safe(fin, hparams.ftype); - const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; + const int32_t qntvr = hparams.ftype / BARK_GGML_QNT_VERSION_FACTOR; if (verbosity == bark_verbosity_level::MEDIUM || verbosity == bark_verbosity_level::HIGH) { - printf("%s: n_in_vocab = %d\n", __func__, hparams.n_in_vocab); - printf("%s: n_out_vocab = %d\n", __func__, hparams.n_out_vocab); - printf("%s: block_size = %d\n", __func__, hparams.block_size); - printf("%s: bias = %d\n", __func__, hparams.bias); - printf("%s: n_embd = %d\n", __func__, hparams.n_embd); - printf("%s: n_head = %d\n", __func__, hparams.n_head); - printf("%s: n_layer = %d\n", __func__, hparams.n_layer); - printf("%s: n_lm_heads = %d\n", __func__, hparams.n_lm_heads); - printf("%s: n_wtes = %d\n", __func__, hparams.n_wtes); - printf("%s: ftype = %d\n", __func__, hparams.ftype); - printf("%s: qntvr = %d\n", __func__, qntvr); + LOGI("%s: n_in_vocab = %d\n", __func__, hparams.n_in_vocab); + LOGI("%s: n_out_vocab = %d\n", __func__, hparams.n_out_vocab); + LOGI("%s: block_size = %d\n", __func__, hparams.block_size); + LOGI("%s: bias = %d\n", __func__, hparams.bias); + LOGI("%s: n_embd = %d\n", __func__, hparams.n_embd); + LOGI("%s: n_head = %d\n", __func__, hparams.n_head); + LOGI("%s: n_layer = %d\n", __func__, hparams.n_layer); + LOGI("%s: n_lm_heads = %d\n", __func__, hparams.n_lm_heads); + LOGI("%s: n_wtes = %d\n", __func__, hparams.n_wtes); + LOGI("%s: ftype = %d\n", __func__, hparams.ftype); + LOGI("%s: qntvr = %d\n", __func__, qntvr); } - hparams.ftype %= GGML_QNT_VERSION_FACTOR; + hparams.ftype %= BARK_GGML_QNT_VERSION_FACTOR; } // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation - ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype)(model.hparams.ftype)); - if (wtype == GGML_TYPE_COUNT) { - fprintf(stderr, "%s: invalid model file (bad ftype value %d)\n", + bark_ggml_type wtype = bark_ggml_ftype_to_bark_ggml_type((bark_ggml_ftype)(model.hparams.ftype)); + if (wtype == BARK_GGML_TYPE_COUNT) { + LOGE("%s: invalid model file (bad ftype value %d)\n", __func__, model.hparams.ftype); return false; } @@ -758,32 +758,32 @@ static bool bark_model_load(std::ifstream& fin, gpt_model& model, int n_gpu_laye const int n_wtes = hparams.n_wtes; const int bias = hparams.bias; - buffer_size += n_embd * ggml_type_size(GGML_TYPE_F32); // ln_f_g + buffer_size += n_embd * bark_ggml_type_size(BARK_GGML_TYPE_F32); // ln_f_g - buffer_size += n_wtes * n_in_vocab * n_embd * ggml_type_size(wtype); // wtes - buffer_size += block_size * n_embd * ggml_type_size(GGML_TYPE_F32); // wpe - buffer_size += n_lm_heads * n_out_vocab * n_embd * ggml_type_size(wtype); // lm_head + buffer_size += n_wtes * n_in_vocab * n_embd * bark_ggml_type_size(wtype); // wtes + buffer_size += block_size * n_embd * bark_ggml_type_size(BARK_GGML_TYPE_F32); // wpe + buffer_size += n_lm_heads * n_out_vocab * n_embd * bark_ggml_type_size(wtype); // lm_head - buffer_size += n_layer * (n_embd * ggml_type_size(GGML_TYPE_F32)); // ln_1_g - buffer_size += n_layer * (n_embd * ggml_type_size(GGML_TYPE_F32)); // ln_2_g + buffer_size += n_layer * (n_embd * bark_ggml_type_size(BARK_GGML_TYPE_F32)); // ln_1_g + buffer_size += n_layer * (n_embd * bark_ggml_type_size(BARK_GGML_TYPE_F32)); // ln_2_g - buffer_size += n_layer * (3 * n_embd * n_embd * ggml_type_size(wtype)); // c_attn_attn_w - buffer_size += n_layer * (n_embd * n_embd * ggml_type_size(wtype)); // c_attn_proj_w + buffer_size += n_layer * (3 * n_embd * n_embd * bark_ggml_type_size(wtype)); // c_attn_attn_w + buffer_size += n_layer * (n_embd * n_embd * bark_ggml_type_size(wtype)); // c_attn_proj_w - buffer_size += n_layer * (4 * n_embd * n_embd * ggml_type_size(wtype)); // c_mlp_fc_w - buffer_size += n_layer * (4 * n_embd * n_embd * ggml_type_size(wtype)); // c_mlp_proj_w + buffer_size += n_layer * (4 * n_embd * n_embd * bark_ggml_type_size(wtype)); // c_mlp_fc_w + buffer_size += n_layer * (4 * n_embd * n_embd * bark_ggml_type_size(wtype)); // c_mlp_proj_w if (bias) { - buffer_size += n_embd * ggml_type_size(GGML_TYPE_F32); // ln_f_b + buffer_size += n_embd * bark_ggml_type_size(BARK_GGML_TYPE_F32); // ln_f_b - buffer_size += n_layer * (n_embd * ggml_type_size(GGML_TYPE_F32)); // ln_1_b - buffer_size += n_layer * (n_embd * ggml_type_size(GGML_TYPE_F32)); // ln_2_b + buffer_size += n_layer * (n_embd * bark_ggml_type_size(BARK_GGML_TYPE_F32)); // ln_1_b + buffer_size += n_layer * (n_embd * bark_ggml_type_size(BARK_GGML_TYPE_F32)); // ln_2_b - buffer_size += n_layer * (3 * n_embd * ggml_type_size(GGML_TYPE_F32)); // c_attn_attn_b - buffer_size += n_layer * (n_embd * ggml_type_size(GGML_TYPE_F32)); // c_attn_proj_b + buffer_size += n_layer * (3 * n_embd * bark_ggml_type_size(BARK_GGML_TYPE_F32)); // c_attn_attn_b + buffer_size += n_layer * (n_embd * bark_ggml_type_size(BARK_GGML_TYPE_F32)); // c_attn_proj_b - buffer_size += n_layer * (4 * n_embd * ggml_type_size(GGML_TYPE_F32)); // c_mlp_fc_b - buffer_size += n_layer * (n_embd * ggml_type_size(GGML_TYPE_F32)); // c_mlp_proj_b + buffer_size += n_layer * (4 * n_embd * bark_ggml_type_size(BARK_GGML_TYPE_F32)); // c_mlp_fc_b + buffer_size += n_layer * (n_embd * bark_ggml_type_size(BARK_GGML_TYPE_F32)); // c_mlp_proj_b } buffer_size += 10ull * MB; // object overhead @@ -804,42 +804,42 @@ static bool bark_model_load(std::ifstream& fin, gpt_model& model, int n_gpu_laye } if (verbosity == bark_verbosity_level::HIGH) { - printf("%s: ggml tensor size = %d bytes\n", __func__, (int)sizeof(ggml_tensor)); - printf("%s: ggml ctx size = %6.2f MB\n", __func__, buffer_size / (1024.0 * 1024.0)); + LOGI("%s: ggml tensor size = %d bytes\n", __func__, (int)sizeof(bark_ggml_tensor)); + LOGI("%s: ggml ctx size = %6.2f MB\n", __func__, buffer_size / (1024.0 * 1024.0)); } } // create the ggml context { - struct ggml_init_params params = { - /*.mem_size =*/ggml_tensor_overhead() * n_tensors, + struct bark_ggml_init_params params = { + /*.mem_size =*/bark_ggml_tensor_overhead() * n_tensors, /*.mem_buffer =*/NULL, /*.no_alloc =*/true, }; - model.ctx = ggml_init(params); + model.ctx = bark_ggml_init(params); if (!model.ctx) { - fprintf(stderr, "%s: ggml_init() failed\n", __func__); + LOGE("%s: bark_ggml_init() failed\n", __func__); return false; } } -#ifdef GGML_USE_CUBLAS +#ifdef BARK_GGML_USE_CUBLAS if (n_gpu_layers > 0) { - fprintf(stderr, "%s: using CUDA backend\n", __func__); - model.backend = ggml_backend_cuda_init(); + LOGE("%s: using CUDA backend\n", __func__); + model.backend = bark_ggml_backend_cuda_init(); if (!model.backend) { - fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__); + LOGE("%s: bark_ggml_backend_cuda_init() failed\n", __func__); } } #endif -#ifdef GGML_USE_METAL +#ifdef BARK_GGML_USE_METAL if (n_gpu_layers > 0) { - fprintf(stderr, "%s: using Metal backend\n", __func__); - model.backend = ggml_backend_metal_init(); + LOGE("%s: using Metal backend\n", __func__); + model.backend = bark_ggml_backend_metal_init(); if (!model.backend) { - fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); + LOGE("%s: bark_ggml_backend_metal_init() failed\n", __func__); } } #endif @@ -847,21 +847,21 @@ static bool bark_model_load(std::ifstream& fin, gpt_model& model, int n_gpu_laye if (!model.backend) { // fallback to CPU backend if (verbosity == bark_verbosity_level::HIGH) { - fprintf(stderr, "%s: no backend specified, using CPU backend\n", __func__); + LOGE("%s: no backend specified, using CPU backend\n", __func__); } - model.backend = ggml_backend_cpu_init(); + model.backend = bark_ggml_backend_cpu_init(); } if (!model.backend) { if (verbosity == bark_verbosity_level::HIGH) { - fprintf(stderr, "%s: failed to initialize CPU backend\n", __func__); + LOGE("%s: failed to initialize CPU backend\n", __func__); } return false; } // allocate weights buffer - model.buffer_w = ggml_backend_alloc_buffer(model.backend, buffer_size); + model.buffer_w = bark_ggml_backend_alloc_buffer(model.backend, buffer_size); // prepare memory for the weights { @@ -880,21 +880,21 @@ static bool bark_model_load(std::ifstream& fin, gpt_model& model, int n_gpu_laye model.lm_heads.resize(n_lm_heads); model.wtes.resize(n_wtes); - model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + model.ln_f_g = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, n_embd); if (bias) { - model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + model.ln_f_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, n_embd); } - model.wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, block_size); + model.wpe = bark_ggml_new_tensor_2d(ctx, BARK_GGML_TYPE_F32, n_embd, block_size); for (int i = 0; i < n_wtes; i++) { - model.wtes[i] = ggml_new_tensor_2d(ctx, wtype, n_embd, n_in_vocab); + model.wtes[i] = bark_ggml_new_tensor_2d(ctx, wtype, n_embd, n_in_vocab); model.tensors["model/wte/" + std::to_string(i)] = model.wtes[i]; } for (int i = 0; i < n_lm_heads; i++) { - model.lm_heads[i] = ggml_new_tensor_2d(ctx, wtype, n_embd, n_out_vocab); + model.lm_heads[i] = bark_ggml_new_tensor_2d(ctx, wtype, n_embd, n_out_vocab); model.tensors["model/lm_head/" + std::to_string(i)] = model.lm_heads[i]; } @@ -906,24 +906,24 @@ static bool bark_model_load(std::ifstream& fin, gpt_model& model, int n_gpu_laye for (int i = 0; i < n_layer; ++i) { auto& layer = model.layers[i]; - layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + layer.ln_1_g = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, n_embd); + layer.ln_2_g = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, n_embd); - layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3 * n_embd); - layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); + layer.c_attn_attn_w = bark_ggml_new_tensor_2d(ctx, wtype, n_embd, 3 * n_embd); + layer.c_attn_proj_w = bark_ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); - layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4 * n_embd); - layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4 * n_embd, n_embd); + layer.c_mlp_fc_w = bark_ggml_new_tensor_2d(ctx, wtype, n_embd, 4 * n_embd); + layer.c_mlp_proj_w = bark_ggml_new_tensor_2d(ctx, wtype, 4 * n_embd, n_embd); if (bias) { - layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + layer.ln_1_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, n_embd); + layer.ln_2_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, n_embd); - layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3 * n_embd); - layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + layer.c_attn_attn_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 3 * n_embd); + layer.c_attn_proj_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, n_embd); - layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * n_embd); - layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + layer.c_mlp_fc_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 4 * n_embd); + layer.c_mlp_proj_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, n_embd); } // map by name @@ -965,36 +965,36 @@ static bool bark_model_load(std::ifstream& fin, gpt_model& model, int n_gpu_laye // hack: if one LM head and one token embedding layer, we are loading weights // of the text and coarse encoder. In this case, we need KV cache. // for fine encoder, no need for KV cache, skip this part. - model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); - model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); + model.memory_k = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, n_elements); + model.memory_v = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, n_elements); - const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); + const size_t memory_size = bark_ggml_nbytes(model.memory_k) + bark_ggml_nbytes(model.memory_v); if (verbosity == bark_verbosity_level::HIGH) { - printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size / 1024.0 / 1024.0, n_mem); + LOGI("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size / 1024.0 / 1024.0, n_mem); } // create a backend buffer (can be in host or device memory) - model.buffer_kv = ggml_backend_alloc_buffer(model.backend, memory_size + 256); + model.buffer_kv = bark_ggml_backend_alloc_buffer(model.backend, memory_size + 256); // allocate the tensors into the backend buffer { - ggml_allocr* alloc = ggml_allocr_new_from_buffer(model.buffer_kv); + bark_ggml_allocr* alloc = bark_ggml_allocr_new_from_buffer(model.buffer_kv); // this updates the pointers in the tensors to point to the correct location in the buffer - // this is necessary since the ggml_context is .no_alloc == true + // this is necessary since the bark_ggml_context is .no_alloc == true // note that the buffer can actually be a device buffer, depending on the backend - ggml_allocr_alloc(alloc, model.memory_k); - ggml_allocr_alloc(alloc, model.memory_v); + bark_ggml_allocr_alloc(alloc, model.memory_k); + bark_ggml_allocr_alloc(alloc, model.memory_v); - ggml_allocr_free(alloc); + bark_ggml_allocr_free(alloc); } } } // load weights { - ggml_allocr* alloc = ggml_allocr_new_from_buffer(model.buffer_w); + bark_ggml_allocr* alloc = bark_ggml_allocr_new_from_buffer(model.buffer_w); size_t total_size = 0; @@ -1004,7 +1004,7 @@ static bool bark_model_load(std::ifstream& fin, gpt_model& model, int n_gpu_laye read_safe(fin, n_tensors); if (verbosity == bark_verbosity_level::MEDIUM || verbosity == bark_verbosity_level::HIGH) { - printf("%s: loading %d tensors\n", __func__, n_tensors); + LOGI("%s: loading %d tensors\n", __func__, n_tensors); } for (int i = 0; i < n_tensors; i++) { @@ -1027,59 +1027,59 @@ static bool bark_model_load(std::ifstream& fin, gpt_model& model, int n_gpu_laye fin.read(&name[0], length); if (model.tensors.find(name.data()) == model.tensors.end()) { - fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); + LOGE("%s: unknown tensor '%s' in model file\n", __func__, name.data()); return false; } auto tensor = model.tensors[name]; - ggml_set_name(tensor, name.c_str()); + bark_ggml_set_name(tensor, name.c_str()); if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", + LOGE("%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", __func__, name.data(), (int)tensor->ne[0], (int)tensor->ne[1], ne[0], ne[1]); return false; } - if (ggml_nelements(tensor) != nelements) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); + if (bark_ggml_nelements(tensor) != nelements) { + LOGE("%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); return false; } - const size_t bpe = ggml_type_size(ggml_type(ttype)); + const size_t bpe = bark_ggml_type_size(bark_ggml_type(ttype)); - if ((nelements * bpe) / ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", - __func__, name.data(), ggml_nbytes(tensor), nelements * bpe); + if ((nelements * bpe) / bark_ggml_blck_size(tensor->type) != bark_ggml_nbytes(tensor)) { + LOGE("%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", + __func__, name.data(), bark_ggml_nbytes(tensor), nelements * bpe); return false; } - ggml_allocr_alloc(alloc, tensor); + bark_ggml_allocr_alloc(alloc, tensor); - if (ggml_backend_is_cpu(model.backend) -#ifdef GGML_USE_METAL - || ggml_backend_is_metal(model.backend) + if (bark_ggml_backend_is_cpu(model.backend) +#ifdef BARK_GGML_USE_METAL + || bark_ggml_backend_is_metal(model.backend) #endif ) { // for the CPU and Metal backends, we can read directly into the device memory - fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); + fin.read(reinterpret_cast(tensor->data), bark_ggml_nbytes(tensor)); } else { // read into a temporary buffer first, then copy to device memory - read_buf.resize(ggml_nbytes(tensor)); - fin.read(read_buf.data(), ggml_nbytes(tensor)); - ggml_backend_tensor_set(tensor, read_buf.data(), 0, ggml_nbytes(tensor)); + read_buf.resize(bark_ggml_nbytes(tensor)); + fin.read(read_buf.data(), bark_ggml_nbytes(tensor)); + bark_ggml_backend_tensor_set(tensor, read_buf.data(), 0, bark_ggml_nbytes(tensor)); } if (verbosity == bark_verbosity_level::HIGH) { - printf("%48s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], "float", ggml_nbytes(tensor) / 1024.0 / 1024.0); + LOGI("%48s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], "float", bark_ggml_nbytes(tensor) / 1024.0 / 1024.0); } - total_size += ggml_nbytes(tensor); + total_size += bark_ggml_nbytes(tensor); } - ggml_allocr_free(alloc); + bark_ggml_allocr_free(alloc); if (verbosity == bark_verbosity_level::MEDIUM || verbosity == bark_verbosity_level::HIGH) { - printf("%s: model size = %8.2f MB\n", __func__, total_size / 1024.0 / 1024.0); + LOGI("%s: model size = %8.2f MB\n", __func__, total_size / 1024.0 / 1024.0); } model.memsize = total_size; @@ -1093,12 +1093,12 @@ static bool bark_load_model_from_file( struct bark_context* bctx, bark_verbosity_level verbosity) { if (verbosity == bark_verbosity_level::MEDIUM || verbosity == bark_verbosity_level::HIGH) { - printf("%s: loading model from '%s'\n", __func__, fname.c_str()); + LOGI("%s: loading model from '%s'\n", __func__, fname.c_str()); } auto fin = std::ifstream(fname, std::ios::binary); if (!fin) { - fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); + LOGE("%s: failed to open '%s'\n", __func__, fname.c_str()); return false; } @@ -1106,8 +1106,8 @@ static bool bark_load_model_from_file( { uint32_t magic; fin.read((char*)&magic, sizeof(magic)); - if (magic != GGML_FILE_MAGIC) { - fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); + if (magic != BARK_GGML_FILE_MAGIC) { + LOGE("%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); return false; } } @@ -1115,11 +1115,11 @@ static bool bark_load_model_from_file( // vocab { if (verbosity == bark_verbosity_level::MEDIUM || verbosity == bark_verbosity_level::HIGH) { - printf("%s: reading bark vocab\n", __func__); + LOGI("%s: reading bark vocab\n", __func__); } if (!bark_vocab_load(fin, &bctx->text_model.vocab)) { - fprintf(stderr, "%s: failed to load vocab\n", __func__); + LOGE("%s: failed to load vocab\n", __func__); return false; } } @@ -1129,11 +1129,11 @@ static bool bark_load_model_from_file( // text { if (verbosity == bark_verbosity_level::MEDIUM || verbosity == bark_verbosity_level::HIGH) { - printf("%s: reading bark text model\n", __func__); + LOGI("%s: reading bark text model\n", __func__); } if (!bark_model_load(fin, bctx->text_model.semantic_model, n_gpu_layers, verbosity)) { - fprintf(stderr, "%s: invalid model file '%s' (bad text)\n", __func__, fname.c_str()); + LOGE("%s: invalid model file '%s' (bad text)\n", __func__, fname.c_str()); return false; } } @@ -1141,7 +1141,7 @@ static bool bark_load_model_from_file( // coarse { if (!bark_model_load(fin, bctx->text_model.coarse_model, n_gpu_layers, verbosity)) { - fprintf(stderr, "%s: invalid model file '%s' (bad coarse)\n", __func__, fname.c_str()); + LOGE("%s: invalid model file '%s' (bad coarse)\n", __func__, fname.c_str()); return false; } } @@ -1149,7 +1149,7 @@ static bool bark_load_model_from_file( // fine { if (!bark_model_load(fin, bctx->text_model.fine_model, n_gpu_layers, verbosity)) { - fprintf(stderr, "%s: invalid model file '%s' (bad fine)\n", __func__, fname.c_str()); + LOGE("%s: invalid model file '%s' (bad fine)\n", __func__, fname.c_str()); return false; } } @@ -1163,45 +1163,45 @@ static bool bark_load_model_from_file( bctx->encodec_ctx = encodec_load_model(fname.c_str(), offset, n_gpu_layers); if (!bctx->encodec_ctx) { - fprintf(stderr, "%s: invalid model file '%s' (bad encodec)\n", __func__, fname.c_str()); + LOGE("%s: invalid model file '%s' (bad encodec)\n", __func__, fname.c_str()); return false; } } - printf("\n"); + LOGI("\n"); return true; } struct bark_context* bark_load_model(const char* model_path, struct bark_context_params params, uint32_t seed) { - int64_t t_load_start_us = ggml_time_us(); + int64_t t_load_start_us = bark_ggml_time_us(); struct bark_context* bctx = new bark_context(); bctx->text_model = bark_model(); std::string model_path_str(model_path); if (!bark_load_model_from_file(model_path_str, bctx, params.verbosity)) { - fprintf(stderr, "%s: failed to load model weights from '%s'\n", __func__, model_path); + LOGE("%s: failed to load model weights from '%s'\n", __func__, model_path); return nullptr; } bctx->rng = std::mt19937(seed); bctx->params = params; - bctx->stats.t_load_us = ggml_time_us() - t_load_start_us; + bctx->stats.t_load_us = bark_ggml_time_us() - t_load_start_us; return bctx; } -static struct ggml_cgraph* bark_build_gpt_graph( +static struct bark_ggml_cgraph* bark_build_gpt_graph( gpt_model* model, - ggml_allocr* allocr, + bark_ggml_allocr* allocr, bark_sequence& tokens, int* n_past, bool merge_ctx, int n_threads) { if (!n_past) { - fprintf(stderr, "%s: n_past is null\n", __func__); + LOGE("%s: n_past is null\n", __func__); return NULL; } @@ -1216,32 +1216,32 @@ static struct ggml_cgraph* bark_build_gpt_graph( const int n_vocab = hparams.n_out_vocab; const int bias = hparams.bias; - static size_t buf_size = ggml_tensor_overhead() * GGML_MAX_NODES + ggml_graph_overhead(); + static size_t buf_size = bark_ggml_tensor_overhead() * BARK_GGML_MAX_NODES + bark_ggml_graph_overhead(); static std::vector buf(buf_size); - struct ggml_init_params ggml_params = { + struct bark_ggml_init_params bark_ggml_params = { /*.mem_size =*/buf_size, /*.mem_buffer =*/buf.data(), /*.no_alloc =*/true, }; - struct ggml_context* ctx0 = ggml_init(ggml_params); + struct bark_ggml_context* ctx0 = bark_ggml_init(bark_ggml_params); - struct ggml_cgraph* gf = ggml_new_graph(ctx0); + struct bark_ggml_cgraph* gf = bark_ggml_new_graph(ctx0); - struct ggml_tensor* input = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - ggml_allocr_alloc(allocr, input); + struct bark_ggml_tensor* input = bark_ggml_new_tensor_1d(ctx0, BARK_GGML_TYPE_I32, N); + bark_ggml_allocr_alloc(allocr, input); // avoid writing to tensors if we are only measuring the memory usage - if (!ggml_allocr_is_measure(allocr)) { - ggml_backend_tensor_set(input, tokens.data(), 0, N * ggml_element_size(input)); + if (!bark_ggml_allocr_is_measure(allocr)) { + bark_ggml_backend_tensor_set(input, tokens.data(), 0, N * bark_ggml_element_size(input)); } - struct ggml_tensor* tok_emb; + struct bark_ggml_tensor* tok_emb; if (*n_past > 0) { assert(N == 1); - tok_emb = ggml_get_rows(ctx0, model->wtes[0], input); + tok_emb = bark_ggml_get_rows(ctx0, model->wtes[0], input); } else { if (merge_ctx) { assert(N == 256 + 256 + 1); @@ -1251,197 +1251,197 @@ static struct ggml_cgraph* bark_build_gpt_graph( } if (merge_ctx) { - struct ggml_tensor* seq_embd = ggml_get_rows(ctx0, model->wtes[0], ggml_view_1d(ctx0, input, 256, 0)); - struct ggml_tensor* ctx_embd = ggml_get_rows(ctx0, model->wtes[0], ggml_view_1d(ctx0, input, 256, 256 * ggml_element_size(input))); - struct ggml_tensor* rem_embd = ggml_get_rows(ctx0, model->wtes[0], ggml_view_1d(ctx0, input, 1, 512 * ggml_element_size(input))); + struct bark_ggml_tensor* seq_embd = bark_ggml_get_rows(ctx0, model->wtes[0], bark_ggml_view_1d(ctx0, input, 256, 0)); + struct bark_ggml_tensor* ctx_embd = bark_ggml_get_rows(ctx0, model->wtes[0], bark_ggml_view_1d(ctx0, input, 256, 256 * bark_ggml_element_size(input))); + struct bark_ggml_tensor* rem_embd = bark_ggml_get_rows(ctx0, model->wtes[0], bark_ggml_view_1d(ctx0, input, 1, 512 * bark_ggml_element_size(input))); - struct ggml_tensor* cat_emb = ggml_add(ctx0, seq_embd, ctx_embd); + struct bark_ggml_tensor* cat_emb = bark_ggml_add(ctx0, seq_embd, ctx_embd); - tok_emb = ggml_new_tensor_2d(ctx0, cat_emb->type, cat_emb->ne[0], cat_emb->ne[1] + rem_embd->ne[1]); - ggml_allocr_alloc(allocr, tok_emb); + tok_emb = bark_ggml_new_tensor_2d(ctx0, cat_emb->type, cat_emb->ne[0], cat_emb->ne[1] + rem_embd->ne[1]); + bark_ggml_allocr_alloc(allocr, tok_emb); - tok_emb = ggml_set_1d(ctx0, tok_emb, cat_emb, 0); - tok_emb = ggml_set_1d(ctx0, tok_emb, rem_embd, cat_emb->ne[0] * cat_emb->ne[1] * ggml_element_size(cat_emb)); + tok_emb = bark_ggml_set_1d(ctx0, tok_emb, cat_emb, 0); + tok_emb = bark_ggml_set_1d(ctx0, tok_emb, rem_embd, cat_emb->ne[0] * cat_emb->ne[1] * bark_ggml_element_size(cat_emb)); } else { - tok_emb = ggml_get_rows(ctx0, model->wtes[0], input); + tok_emb = bark_ggml_get_rows(ctx0, model->wtes[0], input); } } - struct ggml_tensor* position = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - ggml_allocr_alloc(allocr, position); - if (!ggml_allocr_is_measure(allocr)) { + struct bark_ggml_tensor* position = bark_ggml_new_tensor_1d(ctx0, BARK_GGML_TYPE_I32, N); + bark_ggml_allocr_alloc(allocr, position); + if (!bark_ggml_allocr_is_measure(allocr)) { for (int i = 0; i < N; ++i) { int32_t v = *n_past + i; - ggml_backend_tensor_set(position, &v, i * sizeof(int32_t), sizeof(v)); + bark_ggml_backend_tensor_set(position, &v, i * sizeof(int32_t), sizeof(v)); } } - struct ggml_tensor* KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - ggml_allocr_alloc(allocr, KQ_scale); - if (!ggml_allocr_is_measure(allocr)) { + struct bark_ggml_tensor* KQ_scale = bark_ggml_new_tensor_1d(ctx0, BARK_GGML_TYPE_F32, 1); + bark_ggml_allocr_alloc(allocr, KQ_scale); + if (!bark_ggml_allocr_is_measure(allocr)) { float s = 1.0f / sqrtf(float(n_embd) / n_head); - ggml_backend_tensor_set(KQ_scale, &s, 0, sizeof(s)); + bark_ggml_backend_tensor_set(KQ_scale, &s, 0, sizeof(s)); } // wte + wpe - struct ggml_tensor* inpL = ggml_add(ctx0, tok_emb, ggml_get_rows(ctx0, model->wpe, position)); + struct bark_ggml_tensor* inpL = bark_ggml_add(ctx0, tok_emb, bark_ggml_get_rows(ctx0, model->wpe, position)); for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor* cur; + struct bark_ggml_tensor* cur; // norm { - cur = ggml_norm(ctx0, inpL, EPS_NORM); + cur = bark_ggml_norm(ctx0, inpL, EPS_NORM); // cur = ln_1_g*cur + ln_1_b - cur = ggml_mul(ctx0, cur, model->layers[il].ln_1_g); + cur = bark_ggml_mul(ctx0, cur, model->layers[il].ln_1_g); if (bias) { - cur = ggml_add(ctx0, cur, model->layers[il].ln_1_b); + cur = bark_ggml_add(ctx0, cur, model->layers[il].ln_1_b); } } // attn { - cur = ggml_mul_mat(ctx0, + cur = bark_ggml_mul_mat(ctx0, model->layers[il].c_attn_attn_w, cur); if (bias) { - cur = ggml_add(ctx0, cur, model->layers[il].c_attn_attn_b); + cur = bark_ggml_add(ctx0, cur, model->layers[il].c_attn_attn_b); } } // self-attention { - struct ggml_tensor* Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0 * sizeof(float) * n_embd); - struct ggml_tensor* Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1 * sizeof(float) * n_embd); - struct ggml_tensor* Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2 * sizeof(float) * n_embd); + struct bark_ggml_tensor* Qcur = bark_ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0 * sizeof(float) * n_embd); + struct bark_ggml_tensor* Kcur = bark_ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1 * sizeof(float) * n_embd); + struct bark_ggml_tensor* Vcur = bark_ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2 * sizeof(float) * n_embd); // store key and value to memory if (N >= 1) { - struct ggml_tensor* k = ggml_view_1d(ctx0, model->memory_k, N * n_embd, (ggml_element_size(model->memory_k) * n_embd) * (il * n_ctx + *n_past)); - struct ggml_tensor* v = ggml_view_1d(ctx0, model->memory_v, N * n_embd, (ggml_element_size(model->memory_v) * n_embd) * (il * n_ctx + *n_past)); + struct bark_ggml_tensor* k = bark_ggml_view_1d(ctx0, model->memory_k, N * n_embd, (bark_ggml_element_size(model->memory_k) * n_embd) * (il * n_ctx + *n_past)); + struct bark_ggml_tensor* v = bark_ggml_view_1d(ctx0, model->memory_v, N * n_embd, (bark_ggml_element_size(model->memory_v) * n_embd) * (il * n_ctx + *n_past)); - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); + bark_ggml_build_forward_expand(gf, bark_ggml_cpy(ctx0, Kcur, k)); + bark_ggml_build_forward_expand(gf, bark_ggml_cpy(ctx0, Vcur, v)); } - struct ggml_tensor* Q = - ggml_permute(ctx0, - ggml_cpy(ctx0, + struct bark_ggml_tensor* Q = + bark_ggml_permute(ctx0, + bark_ggml_cpy(ctx0, Qcur, - ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd / n_head, n_head, N)), + bark_ggml_new_tensor_3d(ctx0, BARK_GGML_TYPE_F32, n_embd / n_head, n_head, N)), 0, 2, 1, 3); - struct ggml_tensor* K = - ggml_permute(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, model->memory_k, (*n_past + N) * n_embd, il * n_ctx * ggml_element_size(model->memory_k) * n_embd), + struct bark_ggml_tensor* K = + bark_ggml_permute(ctx0, + bark_ggml_reshape_3d(ctx0, + bark_ggml_view_1d(ctx0, model->memory_k, (*n_past + N) * n_embd, il * n_ctx * bark_ggml_element_size(model->memory_k) * n_embd), n_embd / n_head, n_head, *n_past + N), 0, 2, 1, 3); - struct ggml_tensor* KQ = ggml_mul_mat(ctx0, K, Q); + struct bark_ggml_tensor* KQ = bark_ggml_mul_mat(ctx0, K, Q); - struct ggml_tensor* KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale); + struct bark_ggml_tensor* KQ_scaled = bark_ggml_scale_inplace(ctx0, KQ, KQ_scale); - struct ggml_tensor* KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, *n_past); + struct bark_ggml_tensor* KQ_masked = bark_ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, *n_past); - struct ggml_tensor* KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); + struct bark_ggml_tensor* KQ_soft_max = bark_ggml_soft_max_inplace(ctx0, KQ_masked); - struct ggml_tensor* V_trans = - ggml_cpy(ctx0, - ggml_permute(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, model->memory_v, (*n_past + N) * n_embd, il * n_ctx * ggml_element_size(model->memory_v) * n_embd), + struct bark_ggml_tensor* V_trans = + bark_ggml_cpy(ctx0, + bark_ggml_permute(ctx0, + bark_ggml_reshape_3d(ctx0, + bark_ggml_view_1d(ctx0, model->memory_v, (*n_past + N) * n_embd, il * n_ctx * bark_ggml_element_size(model->memory_v) * n_embd), n_embd / n_head, n_head, *n_past + N), 1, 2, 0, 3), - ggml_new_tensor_3d(ctx0, model->memory_v->type, *n_past + N, n_embd / n_head, n_head)); + bark_ggml_new_tensor_3d(ctx0, model->memory_v->type, *n_past + N, n_embd / n_head, n_head)); - struct ggml_tensor* KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max); + struct bark_ggml_tensor* KQV = bark_ggml_mul_mat(ctx0, V_trans, KQ_soft_max); - struct ggml_tensor* KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + struct bark_ggml_tensor* KQV_merged = bark_ggml_permute(ctx0, KQV, 0, 2, 1, 3); - cur = ggml_cpy(ctx0, + cur = bark_ggml_cpy(ctx0, KQV_merged, - ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); + bark_ggml_new_tensor_2d(ctx0, BARK_GGML_TYPE_F32, n_embd, N)); } // projection { - cur = ggml_mul_mat(ctx0, model->layers[il].c_attn_proj_w, cur); + cur = bark_ggml_mul_mat(ctx0, model->layers[il].c_attn_proj_w, cur); if (bias) { - cur = ggml_add(ctx0, cur, model->layers[il].c_attn_proj_b); + cur = bark_ggml_add(ctx0, cur, model->layers[il].c_attn_proj_b); } } // add the input - cur = ggml_add(ctx0, cur, inpL); + cur = bark_ggml_add(ctx0, cur, inpL); - struct ggml_tensor* inpFF = cur; + struct bark_ggml_tensor* inpFF = cur; // feed-forward network { // norm { - cur = ggml_norm(ctx0, inpFF, EPS_NORM); + cur = bark_ggml_norm(ctx0, inpFF, EPS_NORM); // cur = ln_2_g*cur + ln_2_b - cur = ggml_mul(ctx0, cur, model->layers[il].ln_2_g); + cur = bark_ggml_mul(ctx0, cur, model->layers[il].ln_2_g); if (bias) { - cur = ggml_add(ctx0, cur, model->layers[il].ln_2_b); + cur = bark_ggml_add(ctx0, cur, model->layers[il].ln_2_b); } } // cur = fc_w*cur + fc_b - cur = ggml_mul_mat(ctx0, model->layers[il].c_mlp_fc_w, cur); + cur = bark_ggml_mul_mat(ctx0, model->layers[il].c_mlp_fc_w, cur); if (bias) { - cur = ggml_add(ctx0, cur, model->layers[il].c_mlp_fc_b); + cur = bark_ggml_add(ctx0, cur, model->layers[il].c_mlp_fc_b); } - cur = ggml_gelu(ctx0, cur); + cur = bark_ggml_gelu(ctx0, cur); // projection - cur = ggml_mul_mat(ctx0, model->layers[il].c_mlp_proj_w, cur); + cur = bark_ggml_mul_mat(ctx0, model->layers[il].c_mlp_proj_w, cur); if (bias) { - cur = ggml_add(ctx0, cur, model->layers[il].c_mlp_proj_b); + cur = bark_ggml_add(ctx0, cur, model->layers[il].c_mlp_proj_b); } } // input for next layer - inpL = ggml_add(ctx0, cur, inpFF); + inpL = bark_ggml_add(ctx0, cur, inpFF); } // norm { - inpL = ggml_norm(ctx0, inpL, EPS_NORM); + inpL = bark_ggml_norm(ctx0, inpL, EPS_NORM); // inpL = ln_f_g*inpL + ln_f_b - inpL = ggml_mul(ctx0, inpL, model->ln_f_g); + inpL = bark_ggml_mul(ctx0, inpL, model->ln_f_g); if (bias) { - inpL = ggml_add(ctx0, inpL, model->ln_f_b); + inpL = bark_ggml_add(ctx0, inpL, model->ln_f_b); } } - inpL = ggml_mul_mat(ctx0, + inpL = bark_ggml_mul_mat(ctx0, model->lm_heads[0], - ggml_view_1d(ctx0, inpL, inpL->ne[0], (inpL->ne[1] - 1) * inpL->nb[1])); + bark_ggml_view_1d(ctx0, inpL, inpL->ne[0], (inpL->ne[1] - 1) * inpL->nb[1])); - ggml_build_forward_expand(gf, inpL); + bark_ggml_build_forward_expand(gf, inpL); - ggml_free(ctx0); + bark_ggml_free(ctx0); return gf; } -static ggml_cgraph* bark_build_fine_gpt_graph( +static bark_ggml_cgraph* bark_build_fine_gpt_graph( gpt_model* model, - ggml_allocr* allocr, + bark_ggml_allocr* allocr, bark_sequence& tokens, int codebook_idx, int n_fine_codebooks, @@ -1462,173 +1462,173 @@ static ggml_cgraph* bark_build_fine_gpt_graph( assert(N <= n_ctx); assert(codebook_idx > 0); - static size_t buf_size = ggml_tensor_overhead() * GGML_MAX_NODES + ggml_graph_overhead(); + static size_t buf_size = bark_ggml_tensor_overhead() * BARK_GGML_MAX_NODES + bark_ggml_graph_overhead(); static std::vector buf(buf_size); - struct ggml_init_params ggml_params = { + struct bark_ggml_init_params bark_ggml_params = { /*.mem_size =*/buf_size, /*.mem_buffer =*/buf.data(), /*.no_alloc =*/true, }; - struct ggml_context* ctx0 = ggml_init(ggml_params); + struct bark_ggml_context* ctx0 = bark_ggml_init(bark_ggml_params); - struct ggml_cgraph* gf = ggml_new_graph(ctx0); + struct bark_ggml_cgraph* gf = bark_ggml_new_graph(ctx0); - struct ggml_tensor* input = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, N, n_channels); - ggml_allocr_alloc(allocr, input); + struct bark_ggml_tensor* input = bark_ggml_new_tensor_2d(ctx0, BARK_GGML_TYPE_I32, N, n_channels); + bark_ggml_allocr_alloc(allocr, input); - struct ggml_tensor* tok_emb = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N); - ggml_allocr_alloc(allocr, tok_emb); + struct bark_ggml_tensor* tok_emb = bark_ggml_new_tensor_2d(ctx0, BARK_GGML_TYPE_F32, n_embd, N); + bark_ggml_allocr_alloc(allocr, tok_emb); - if (!ggml_allocr_is_measure(allocr)) { - ggml_backend_tensor_set(input, tokens.data(), 0, N * n_channels * ggml_element_size(input)); - ggml_set_zero(tok_emb); + if (!bark_ggml_allocr_is_measure(allocr)) { + bark_ggml_backend_tensor_set(input, tokens.data(), 0, N * n_channels * bark_ggml_element_size(input)); + bark_ggml_set_zero(tok_emb); } for (int wte_ix = 0; wte_ix < codebook_idx + 1; wte_ix++) { - struct ggml_tensor* cur = ggml_get_rows(ctx0, + struct bark_ggml_tensor* cur = bark_ggml_get_rows(ctx0, model->wtes[wte_ix], - ggml_view_1d(ctx0, input, N, wte_ix * input->nb[1])); + bark_ggml_view_1d(ctx0, input, N, wte_ix * input->nb[1])); - tok_emb = ggml_add(ctx0, tok_emb, cur); + tok_emb = bark_ggml_add(ctx0, tok_emb, cur); } - ggml_set_name(tok_emb, "tok_emb"); + bark_ggml_set_name(tok_emb, "tok_emb"); - struct ggml_tensor* position = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - ggml_allocr_alloc(allocr, position); - if (!ggml_allocr_is_measure(allocr)) { + struct bark_ggml_tensor* position = bark_ggml_new_tensor_1d(ctx0, BARK_GGML_TYPE_I32, N); + bark_ggml_allocr_alloc(allocr, position); + if (!bark_ggml_allocr_is_measure(allocr)) { for (int32_t i = 0; i < N; ++i) { - ggml_backend_tensor_set(position, &i, i * sizeof(int32_t), sizeof(i)); + bark_ggml_backend_tensor_set(position, &i, i * sizeof(int32_t), sizeof(i)); } } - ggml_set_name(position, "position"); + bark_ggml_set_name(position, "position"); - struct ggml_tensor* pos_emb = ggml_get_rows(ctx0, model->wpe, position); - ggml_set_name(pos_emb, "pos_emb"); + struct bark_ggml_tensor* pos_emb = bark_ggml_get_rows(ctx0, model->wpe, position); + bark_ggml_set_name(pos_emb, "pos_emb"); - struct ggml_tensor* KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - ggml_allocr_alloc(allocr, KQ_scale); - if (!ggml_allocr_is_measure(allocr)) { + struct bark_ggml_tensor* KQ_scale = bark_ggml_new_tensor_1d(ctx0, BARK_GGML_TYPE_F32, 1); + bark_ggml_allocr_alloc(allocr, KQ_scale); + if (!bark_ggml_allocr_is_measure(allocr)) { float s = 1.0f / sqrtf(float(n_embd) / n_head); - ggml_backend_tensor_set(KQ_scale, &s, 0, sizeof(s)); + bark_ggml_backend_tensor_set(KQ_scale, &s, 0, sizeof(s)); } // wte + wpe - struct ggml_tensor* inpL = ggml_add(ctx0, tok_emb, pos_emb); + struct bark_ggml_tensor* inpL = bark_ggml_add(ctx0, tok_emb, pos_emb); for (int il = 0; il < n_layer; il++) { - struct ggml_tensor* cur; + struct bark_ggml_tensor* cur; // norm { - cur = ggml_norm(ctx0, inpL, EPS_NORM); + cur = bark_ggml_norm(ctx0, inpL, EPS_NORM); // cur = ln_1_g*cur + ln_1_b - cur = ggml_mul(ctx0, cur, model->layers[il].ln_1_g); - cur = ggml_add(ctx0, cur, model->layers[il].ln_1_b); + cur = bark_ggml_mul(ctx0, cur, model->layers[il].ln_1_g); + cur = bark_ggml_add(ctx0, cur, model->layers[il].ln_1_b); } // self-attention { // cur = attn_w*cur - cur = ggml_mul_mat(ctx0, model->layers[il].c_attn_attn_w, cur); + cur = bark_ggml_mul_mat(ctx0, model->layers[il].c_attn_attn_w, cur); - struct ggml_tensor* Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0 * sizeof(float) * n_embd); - struct ggml_tensor* Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1 * sizeof(float) * n_embd); - struct ggml_tensor* Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2 * sizeof(float) * n_embd); + struct bark_ggml_tensor* Qcur = bark_ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0 * sizeof(float) * n_embd); + struct bark_ggml_tensor* Kcur = bark_ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1 * sizeof(float) * n_embd); + struct bark_ggml_tensor* Vcur = bark_ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2 * sizeof(float) * n_embd); - struct ggml_tensor* Q = - ggml_permute(ctx0, - ggml_cpy(ctx0, + struct bark_ggml_tensor* Q = + bark_ggml_permute(ctx0, + bark_ggml_cpy(ctx0, Qcur, - ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd / n_head, n_head, N)), + bark_ggml_new_tensor_3d(ctx0, BARK_GGML_TYPE_F32, n_embd / n_head, n_head, N)), 0, 2, 1, 3); - struct ggml_tensor* K = - ggml_permute(ctx0, - ggml_cpy(ctx0, + struct bark_ggml_tensor* K = + bark_ggml_permute(ctx0, + bark_ggml_cpy(ctx0, Kcur, - ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd / n_head, n_head, N)), + bark_ggml_new_tensor_3d(ctx0, BARK_GGML_TYPE_F32, n_embd / n_head, n_head, N)), 0, 2, 1, 3); - struct ggml_tensor* KQ = ggml_mul_mat(ctx0, K, Q); + struct bark_ggml_tensor* KQ = bark_ggml_mul_mat(ctx0, K, Q); - struct ggml_tensor* KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale); + struct bark_ggml_tensor* KQ_scaled = bark_ggml_scale_inplace(ctx0, KQ, KQ_scale); - struct ggml_tensor* KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_scaled); + struct bark_ggml_tensor* KQ_soft_max = bark_ggml_soft_max_inplace(ctx0, KQ_scaled); - struct ggml_tensor* V_trans = - ggml_cont(ctx0, - ggml_permute(ctx0, - ggml_cpy(ctx0, + struct bark_ggml_tensor* V_trans = + bark_ggml_cont(ctx0, + bark_ggml_permute(ctx0, + bark_ggml_cpy(ctx0, Vcur, - ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd / n_head, n_head, N)), + bark_ggml_new_tensor_3d(ctx0, BARK_GGML_TYPE_F32, n_embd / n_head, n_head, N)), 1, 2, 0, 3)); - struct ggml_tensor* KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max); + struct bark_ggml_tensor* KQV = bark_ggml_mul_mat(ctx0, V_trans, KQ_soft_max); - struct ggml_tensor* KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + struct bark_ggml_tensor* KQV_merged = bark_ggml_permute(ctx0, KQV, 0, 2, 1, 3); // [n_embd, N] - cur = ggml_cpy(ctx0, + cur = bark_ggml_cpy(ctx0, KQV_merged, - ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); + bark_ggml_new_tensor_2d(ctx0, BARK_GGML_TYPE_F32, n_embd, N)); // cur = proj_w*cur - cur = ggml_mul_mat(ctx0, model->layers[il].c_attn_proj_w, cur); + cur = bark_ggml_mul_mat(ctx0, model->layers[il].c_attn_proj_w, cur); } // residual connection - cur = ggml_add(ctx0, cur, inpL); + cur = bark_ggml_add(ctx0, cur, inpL); - struct ggml_tensor* inpFF = cur; + struct bark_ggml_tensor* inpFF = cur; // feed-forward { // norm { - cur = ggml_norm(ctx0, inpFF, EPS_NORM); + cur = bark_ggml_norm(ctx0, inpFF, EPS_NORM); - cur = ggml_mul(ctx0, cur, model->layers[il].ln_2_g); - cur = ggml_add(ctx0, cur, model->layers[il].ln_2_b); + cur = bark_ggml_mul(ctx0, cur, model->layers[il].ln_2_g); + cur = bark_ggml_add(ctx0, cur, model->layers[il].ln_2_b); } // cur = fc_w*cur - cur = ggml_mul_mat(ctx0, model->layers[il].c_mlp_fc_w, cur); + cur = bark_ggml_mul_mat(ctx0, model->layers[il].c_mlp_fc_w, cur); // GELU activation - cur = ggml_gelu(ctx0, cur); + cur = bark_ggml_gelu(ctx0, cur); // cur = proj_w*cur - cur = ggml_mul_mat(ctx0, model->layers[il].c_mlp_proj_w, cur); + cur = bark_ggml_mul_mat(ctx0, model->layers[il].c_mlp_proj_w, cur); } - inpL = ggml_add(ctx0, cur, inpFF); + inpL = bark_ggml_add(ctx0, cur, inpFF); } // norm { - inpL = ggml_norm(ctx0, inpL, EPS_NORM); + inpL = bark_ggml_norm(ctx0, inpL, EPS_NORM); - inpL = ggml_mul(ctx0, inpL, model->ln_f_g); - inpL = ggml_add(ctx0, inpL, model->ln_f_b); + inpL = bark_ggml_mul(ctx0, inpL, model->ln_f_g); + inpL = bark_ggml_add(ctx0, inpL, model->ln_f_b); } // inpL = WTE * inpL - struct ggml_tensor* lm_head = model->lm_heads[codebook_idx - n_codes_given]; - inpL = ggml_mul_mat(ctx0, lm_head, inpL); + struct bark_ggml_tensor* lm_head = model->lm_heads[codebook_idx - n_codes_given]; + inpL = bark_ggml_mul_mat(ctx0, lm_head, inpL); - ggml_build_forward_expand(gf, inpL); + bark_ggml_build_forward_expand(gf, inpL); - ggml_free(ctx0); + bark_ggml_free(ctx0); return gf; } static bool bark_eval_encoder_internal( gpt_model& model, - ggml_allocr* allocr, + bark_ggml_allocr* allocr, bark_sequence& input, std::vector& logits, int* n_past, @@ -1637,29 +1637,29 @@ static bool bark_eval_encoder_internal( auto& hparams = model.hparams; const int n_vocab = hparams.n_out_vocab; - const int64_t t_predict_us_start = ggml_time_us(); + const int64_t t_predict_us_start = bark_ggml_time_us(); // reset the allocator to free all the memory allocated during the previous inference - ggml_allocr_reset(allocr); + bark_ggml_allocr_reset(allocr); - struct ggml_cgraph* gf = bark_build_gpt_graph( + struct bark_ggml_cgraph* gf = bark_build_gpt_graph( &model, allocr, input, n_past, merge_ctx, n_threads); // allocate tensors - ggml_allocr_alloc_graph(allocr, gf); + bark_ggml_allocr_alloc_graph(allocr, gf); // run the computation - if (ggml_backend_is_cpu(model.backend)) { - ggml_backend_cpu_set_n_threads(model.backend, n_threads); + if (bark_ggml_backend_is_cpu(model.backend)) { + bark_ggml_backend_cpu_set_n_threads(model.backend, n_threads); } -#ifdef GGML_USE_METAL - if (ggml_backend_is_metal(model.backend)) { - ggml_backend_metal_set_n_cb(model.backend, n_threads); +#ifdef BARK_GGML_USE_METAL + if (bark_ggml_backend_is_metal(model.backend)) { + bark_ggml_backend_metal_set_n_cb(model.backend, n_threads); } #endif - ggml_backend_graph_compute(model.backend, gf); + bark_ggml_backend_graph_compute(model.backend, gf); - struct ggml_tensor* inpL = gf->nodes[gf->n_nodes - 1]; + struct bark_ggml_tensor* inpL = gf->nodes[gf->n_nodes - 1]; int N = input.size(); if (merge_ctx && *n_past == 0) { @@ -1667,14 +1667,14 @@ static bool bark_eval_encoder_internal( } logits.resize(n_vocab); - ggml_backend_tensor_get(inpL, logits.data(), 0, sizeof(float) * n_vocab); + bark_ggml_backend_tensor_get(inpL, logits.data(), 0, sizeof(float) * n_vocab); // updating n_past with N (-256 if merge_ctx) if (n_past) { *n_past += N; } - model.t_predict_us += ggml_time_us() - t_predict_us_start; + model.t_predict_us += bark_ggml_time_us() - t_predict_us_start; return true; } @@ -1713,7 +1713,7 @@ static bool bark_eval_text_encoder(struct bark_context* bctx, int n_threads) { } if (!bark_eval_encoder_internal(model, allocr, input, logits, &n_past, true, n_threads)) { - fprintf(stderr, "%s: Could not generate token\n", __func__); + LOGE("%s: Could not generate token\n", __func__); return false; } @@ -1740,7 +1740,7 @@ static bool bark_eval_text_encoder(struct bark_context* bctx, int n_threads) { } bool bark_forward_text_encoder(struct bark_context* bctx, int n_threads) { - const int64_t t_main_start_us = ggml_time_us(); + const int64_t t_main_start_us = bark_ggml_time_us(); auto& model = bctx->text_model.semantic_model; auto& allocr = bctx->allocr; @@ -1750,40 +1750,40 @@ bool bark_forward_text_encoder(struct bark_context* bctx, int n_threads) { // allocate the compute buffer { // alignment required by the backend - size_t align = ggml_backend_get_alignment(model.backend); - bctx->allocr = ggml_allocr_new_measure(align); + size_t align = bark_ggml_backend_get_alignment(model.backend); + bctx->allocr = bark_ggml_allocr_new_measure(align); // create the worst-case graph for memory usage estimation int n_past = 0; std::vector decoy_tokens(256 + 256 + 1, 0); - struct ggml_cgraph* gf = bark_build_gpt_graph( + struct bark_ggml_cgraph* gf = bark_build_gpt_graph( &model, allocr, decoy_tokens, &n_past, true /* merge_ctx */, n_threads); // compute the required memory - size_t mem_size = ggml_allocr_alloc_graph(bctx->allocr, gf); + size_t mem_size = bark_ggml_allocr_alloc_graph(bctx->allocr, gf); // recreate the allocator with the required memory - ggml_allocr_free(bctx->allocr); - bctx->buf_compute = ggml_backend_alloc_buffer(model.backend, mem_size); - bctx->allocr = ggml_allocr_new_from_buffer(bctx->buf_compute); + bark_ggml_allocr_free(bctx->allocr); + bctx->buf_compute = bark_ggml_backend_alloc_buffer(model.backend, mem_size); + bctx->allocr = bark_ggml_allocr_new_from_buffer(bctx->buf_compute); if (verbosity == bark_verbosity_level::MEDIUM || verbosity == bark_verbosity_level::HIGH) { - fprintf(stderr, "%s: compute buffer size: %.2f MB\n\n", __func__, mem_size / 1024.0 / 1024.0); + LOGE("%s: compute buffer size: %.2f MB\n\n", __func__, mem_size / 1024.0 / 1024.0); } } if (!bark_eval_text_encoder(bctx, n_threads)) { - fprintf(stderr, "%s: failed to forward text encoder\n", __func__); + LOGE("%s: failed to forward text encoder\n", __func__); return false; } - model.t_main_us = ggml_time_us() - t_main_start_us; + model.t_main_us = bark_ggml_time_us() - t_main_start_us; bctx->stats.t_semantic_us = model.t_main_us; bark_print_statistics(&model); - ggml_backend_buffer_free(bctx->buf_compute); - ggml_allocr_free(bctx->allocr); + bark_ggml_backend_buffer_free(bctx->buf_compute); + bark_ggml_allocr_free(bctx->allocr); return true; } @@ -1867,7 +1867,7 @@ static bool bark_eval_coarse_encoder(struct bark_context* bctx, int n_threads) { } if (!bark_eval_encoder_internal(model, allocr, input_in, logits, &n_past, false, n_threads)) { - fprintf(stderr, "%s: Could not generate token\n", __func__); + LOGE("%s: Could not generate token\n", __func__); return false; } @@ -1912,7 +1912,7 @@ static bool bark_eval_coarse_encoder(struct bark_context* bctx, int n_threads) { } bool bark_forward_coarse_encoder(struct bark_context* bctx, int n_threads) { - const int64_t t_main_start_us = ggml_time_us(); + const int64_t t_main_start_us = bark_ggml_time_us(); auto& model = bctx->text_model.coarse_model; auto& allocr = bctx->allocr; @@ -1922,40 +1922,40 @@ bool bark_forward_coarse_encoder(struct bark_context* bctx, int n_threads) { // allocate the compute buffer { // alignment required by the backend - size_t align = ggml_backend_get_alignment(model.backend); - bctx->allocr = ggml_allocr_new_measure(align); + size_t align = bark_ggml_backend_get_alignment(model.backend); + bctx->allocr = bark_ggml_allocr_new_measure(align); // create the worst-case graph for memory usage estimation int n_past = 0; std::vector decoy_tokens(hparams.block_size, 0); - struct ggml_cgraph* gf = bark_build_gpt_graph( + struct bark_ggml_cgraph* gf = bark_build_gpt_graph( &model, allocr, decoy_tokens, &n_past, false /* merge_ctx */, n_threads); // compute the required memory - size_t mem_size = ggml_allocr_alloc_graph(bctx->allocr, gf); + size_t mem_size = bark_ggml_allocr_alloc_graph(bctx->allocr, gf); // recreate the allocator with the required memory - ggml_allocr_free(bctx->allocr); - bctx->buf_compute = ggml_backend_alloc_buffer(model.backend, mem_size); - bctx->allocr = ggml_allocr_new_from_buffer(bctx->buf_compute); + bark_ggml_allocr_free(bctx->allocr); + bctx->buf_compute = bark_ggml_backend_alloc_buffer(model.backend, mem_size); + bctx->allocr = bark_ggml_allocr_new_from_buffer(bctx->buf_compute); if (verbosity == bark_verbosity_level::MEDIUM || verbosity == bark_verbosity_level::HIGH) { - fprintf(stderr, "%s: compute buffer size: %.2f MB\n\n", __func__, mem_size / 1024.0 / 1024.0); + LOGE("%s: compute buffer size: %.2f MB\n\n", __func__, mem_size / 1024.0 / 1024.0); } } if (!bark_eval_coarse_encoder(bctx, n_threads)) { - fprintf(stderr, "%s: failed to forward coarse encoder\n", __func__); + LOGE("%s: failed to forward coarse encoder\n", __func__); return false; } - model.t_main_us = ggml_time_us() - t_main_start_us; + model.t_main_us = bark_ggml_time_us() - t_main_start_us; bctx->stats.t_coarse_us = model.t_main_us; bark_print_statistics(&model); - ggml_backend_buffer_free(bctx->buf_compute); - ggml_allocr_free(bctx->allocr); + bark_ggml_backend_buffer_free(bctx->buf_compute); + bark_ggml_allocr_free(bctx->allocr); return true; } @@ -1976,33 +1976,33 @@ static bool bark_eval_fine_encoder_internal( const int n_fine_codebooks = params.n_fine_codebooks; - const int64_t t_predict_us_start = ggml_time_us(); + const int64_t t_predict_us_start = bark_ggml_time_us(); // reset the allocator to free all the memory allocated during the previous inference - ggml_allocr_reset(allocr); + bark_ggml_allocr_reset(allocr); - struct ggml_cgraph* gf = bark_build_fine_gpt_graph( + struct bark_ggml_cgraph* gf = bark_build_fine_gpt_graph( &model, allocr, input, nn, n_fine_codebooks, n_threads); // allocate tensors - ggml_allocr_alloc_graph(allocr, gf); + bark_ggml_allocr_alloc_graph(allocr, gf); // run the computation - if (ggml_backend_is_cpu(model.backend)) { - ggml_backend_cpu_set_n_threads(model.backend, n_threads); + if (bark_ggml_backend_is_cpu(model.backend)) { + bark_ggml_backend_cpu_set_n_threads(model.backend, n_threads); } -#ifdef GGML_USE_METAL - if (ggml_backend_is_metal(model.backend)) { - ggml_backend_metal_set_n_cb(model.backend, n_threads); +#ifdef BARK_GGML_USE_METAL + if (bark_ggml_backend_is_metal(model.backend)) { + bark_ggml_backend_metal_set_n_cb(model.backend, n_threads); } #endif - ggml_backend_graph_compute(model.backend, gf); + bark_ggml_backend_graph_compute(model.backend, gf); - struct ggml_tensor* inpL = gf->nodes[gf->n_nodes - 1]; + struct bark_ggml_tensor* inpL = gf->nodes[gf->n_nodes - 1]; - ggml_backend_tensor_get(inpL, logits.data(), 0, sizeof(float) * n_vocab * block_size); + bark_ggml_backend_tensor_get(inpL, logits.data(), 0, sizeof(float) * n_vocab * block_size); - model.t_predict_us += ggml_time_us() - t_predict_us_start; + model.t_predict_us += bark_ggml_time_us() - t_predict_us_start; return true; } @@ -2070,7 +2070,7 @@ static bool bark_eval_fine_encoder(struct bark_context* bctx, int n_threads) { } if (!bark_eval_fine_encoder_internal(bctx, in_buffer, logits, nn, n_threads)) { - fprintf(stderr, "%s: Could not generate token\n", __func__); + LOGE("%s: Could not generate token\n", __func__); return false; } for (int i = 0; i < 1024; i++) { @@ -2108,7 +2108,7 @@ static bool bark_eval_fine_encoder(struct bark_context* bctx, int n_threads) { } bool bark_forward_fine_encoder(struct bark_context* bctx, int n_threads) { - const int64_t t_main_start_us = ggml_time_us(); + const int64_t t_main_start_us = bark_ggml_time_us(); auto& model = bctx->text_model.fine_model; auto& allocr = bctx->allocr; @@ -2121,56 +2121,56 @@ bool bark_forward_fine_encoder(struct bark_context* bctx, int n_threads) { // allocate the compute buffer { // alignment required by the backend - size_t align = ggml_backend_get_alignment(model.backend); - bctx->allocr = ggml_allocr_new_measure(align); + size_t align = bark_ggml_backend_get_alignment(model.backend); + bctx->allocr = bark_ggml_allocr_new_measure(align); // create the worst-case graph for memory usage estimation std::vector decoy_tokens(hparams.block_size * n_fine_codebooks, 0); - struct ggml_cgraph* gf = bark_build_fine_gpt_graph( + struct bark_ggml_cgraph* gf = bark_build_fine_gpt_graph( &model, allocr, decoy_tokens, 2 /* codebook_idx */, n_fine_codebooks, n_threads); // compute the required memory - size_t mem_size = ggml_allocr_alloc_graph(bctx->allocr, gf); + size_t mem_size = bark_ggml_allocr_alloc_graph(bctx->allocr, gf); // recreate the allocator with the required memory - ggml_allocr_free(bctx->allocr); - bctx->buf_compute = ggml_backend_alloc_buffer(model.backend, mem_size); - bctx->allocr = ggml_allocr_new_from_buffer(bctx->buf_compute); + bark_ggml_allocr_free(bctx->allocr); + bctx->buf_compute = bark_ggml_backend_alloc_buffer(model.backend, mem_size); + bctx->allocr = bark_ggml_allocr_new_from_buffer(bctx->buf_compute); if (verbosity == bark_verbosity_level::MEDIUM || verbosity == bark_verbosity_level::HIGH) { - fprintf(stderr, "%s: compute buffer size: %.2f MB\n\n", __func__, mem_size / 1024.0 / 1024.0); + LOGE("%s: compute buffer size: %.2f MB\n\n", __func__, mem_size / 1024.0 / 1024.0); } } if (!bark_eval_fine_encoder(bctx, n_threads)) { - fprintf(stderr, "%s: failed to forward coarse encoder\n", __func__); + LOGE("%s: failed to forward coarse encoder\n", __func__); return false; } - model.t_main_us = ggml_time_us() - t_main_start_us; + model.t_main_us = bark_ggml_time_us() - t_main_start_us; bctx->stats.t_fine_us = model.t_main_us; bark_print_statistics(&model); - ggml_backend_buffer_free(bctx->buf_compute); - ggml_allocr_free(bctx->allocr); + bark_ggml_backend_buffer_free(bctx->buf_compute); + bark_ggml_allocr_free(bctx->allocr); return true; } static bool bark_forward_eval(struct bark_context* bctx, int n_threads) { if (!bark_forward_text_encoder(bctx, n_threads)) { - fprintf(stderr, "%s: failed to forward text encoder\n", __func__); + LOGE("%s: failed to forward text encoder\n", __func__); return false; } if (!bark_forward_coarse_encoder(bctx, n_threads)) { - fprintf(stderr, "%s: failed to forward coarse encoder\n", __func__); + LOGE("%s: failed to forward coarse encoder\n", __func__); return false; } if (!bark_forward_fine_encoder(bctx, n_threads)) { - fprintf(stderr, "%s: failed to forward fine encoder\n", __func__); + LOGE("%s: failed to forward fine encoder\n", __func__); return false; } @@ -2179,19 +2179,19 @@ static bool bark_forward_eval(struct bark_context* bctx, int n_threads) { bool bark_generate_audio(struct bark_context* bctx, const char* text, int n_threads) { if (!bctx) { - fprintf(stderr, "%s: invalid bark context\n", __func__); + LOGE("%s: invalid bark context\n", __func__); return false; } bark_reset_statistics(bctx); - int64_t t_start_eval_us = ggml_time_us(); + int64_t t_start_eval_us = bark_ggml_time_us(); std::string text_str(text); bark_tokenize_input(bctx, text_str); if (!bark_forward_eval(bctx, n_threads)) { - fprintf(stderr, "%s: failed to forward eval\n", __func__); + LOGE("%s: failed to forward eval\n", __func__); return false; } @@ -2214,14 +2214,14 @@ bool bark_generate_audio(struct bark_context* bctx, const char* text, int n_thre } if (!encodec_decompress_audio(bctx->encodec_ctx, encodec_tokens.data(), encodec_tokens.size(), n_threads)) { - printf("%s: Could not generate waveform from tokens with Encodec\n", __func__); + LOGI("%s: Could not generate waveform from tokens with Encodec\n", __func__); return false; } bctx->generated_audio = encodec_get_audio(bctx->encodec_ctx); bctx->n_generated_samples = encodec_get_audio_size(bctx->encodec_ctx); - bctx->stats.t_eval_us = ggml_time_us() - t_start_eval_us; + bctx->stats.t_eval_us = bark_ggml_time_us() - t_start_eval_us; return true; } @@ -2232,11 +2232,11 @@ static void bark_free_model(struct gpt_model* model) { } if (model->ctx) { - ggml_free(model->ctx); + bark_ggml_free(model->ctx); } - ggml_backend_buffer_free(model->buffer_w); - ggml_backend_free(model->backend); + bark_ggml_backend_buffer_free(model->buffer_w); + bark_ggml_backend_free(model->backend); } void bark_free(struct bark_context* bctx) { @@ -2285,7 +2285,7 @@ struct bark_context_params bark_context_default_params() { return result; } -bool bark_model_weights_quantize(std::ifstream& fin, std::ofstream& fout, ggml_ftype ftype) { +bool bark_model_weights_quantize(std::ifstream& fin, std::ofstream& fout, bark_ggml_ftype ftype) { gpt_model model; gpt_hparams hparams; @@ -2304,22 +2304,22 @@ bool bark_model_weights_quantize(std::ifstream& fin, std::ofstream& fout, ggml_f read_safe(fin, hparams.n_wtes); read_safe(fin, hparams.ftype); - const int32_t qntvr_src = hparams.ftype / GGML_QNT_VERSION_FACTOR; - int32_t ftype_dst = GGML_QNT_VERSION * GGML_QNT_VERSION_FACTOR + ftype; - - printf("%s: n_in_vocab = %d\n", __func__, hparams.n_in_vocab); - printf("%s: n_out_vocab = %d\n", __func__, hparams.n_out_vocab); - printf("%s: block_size = %d\n", __func__, hparams.block_size); - printf("%s: bias = %d\n", __func__, hparams.bias); - printf("%s: n_embd = %d\n", __func__, hparams.n_embd); - printf("%s: n_head = %d\n", __func__, hparams.n_head); - printf("%s: n_layer = %d\n", __func__, hparams.n_layer); - printf("%s: n_lm_heads = %d\n", __func__, hparams.n_lm_heads); - printf("%s: n_wtes = %d\n", __func__, hparams.n_wtes); - printf("%s: ftype (src) = %d\n", __func__, hparams.ftype); - printf("%s: qntvr (src) = %d\n", __func__, qntvr_src); - printf("%s: ftype (dst) = %d\n", __func__, ftype_dst); - printf("%s: qntvr (dst) = %d\n", __func__, GGML_QNT_VERSION); + const int32_t qntvr_src = hparams.ftype / BARK_GGML_QNT_VERSION_FACTOR; + int32_t ftype_dst = BARK_GGML_QNT_VERSION * BARK_GGML_QNT_VERSION_FACTOR + ftype; + + LOGI("%s: n_in_vocab = %d\n", __func__, hparams.n_in_vocab); + LOGI("%s: n_out_vocab = %d\n", __func__, hparams.n_out_vocab); + LOGI("%s: block_size = %d\n", __func__, hparams.block_size); + LOGI("%s: bias = %d\n", __func__, hparams.bias); + LOGI("%s: n_embd = %d\n", __func__, hparams.n_embd); + LOGI("%s: n_head = %d\n", __func__, hparams.n_head); + LOGI("%s: n_layer = %d\n", __func__, hparams.n_layer); + LOGI("%s: n_lm_heads = %d\n", __func__, hparams.n_lm_heads); + LOGI("%s: n_wtes = %d\n", __func__, hparams.n_wtes); + LOGI("%s: ftype (src) = %d\n", __func__, hparams.ftype); + LOGI("%s: qntvr (src) = %d\n", __func__, qntvr_src); + LOGI("%s: ftype (dst) = %d\n", __func__, ftype_dst); + LOGI("%s: qntvr (dst) = %d\n", __func__, BARK_GGML_QNT_VERSION); write_safe(fout, hparams.n_layer); write_safe(fout, hparams.n_head); @@ -2343,29 +2343,29 @@ bool bark_model_weights_quantize(std::ifstream& fin, std::ofstream& fout, ggml_f "model/h.*/mlp/c_proj/w", }; - if (!ggml_quantize_weights(fin, fout, ftype, to_quant, {})) { - fprintf(stderr, "%s: failed to quantize model\n", __func__); + if (!bark_ggml_quantize_weights(fin, fout, ftype, to_quant, {})) { + LOGE("%s: failed to quantize model\n", __func__); return false; } return true; } -bool bark_model_quantize(const char* fname_inp, const char* fname_out, enum ggml_ftype ftype) { - printf("%s: loading model from '%s'\n", __func__, fname_inp); +bool bark_model_quantize(const char* fname_inp, const char* fname_out, enum bark_ggml_ftype ftype) { + LOGI("%s: loading model from '%s'\n", __func__, fname_inp); std::string fname_inp_str(fname_inp); std::string fname_out_str(fname_out); auto fin = std::ifstream(fname_inp_str, std::ios::binary); if (!fin) { - fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__, fname_inp); + LOGE("%s: failed to open '%s' for reading\n", __func__, fname_inp); return false; } auto fout = std::ofstream(fname_out_str, std::ios::binary); if (!fout) { - fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname_out); + LOGE("%s: failed to open '%s' for writing\n", __func__, fname_out); return false; } @@ -2373,8 +2373,8 @@ bool bark_model_quantize(const char* fname_inp, const char* fname_out, enum ggml { uint32_t magic; fin.read((char*)&magic, sizeof(magic)); - if (magic != GGML_FILE_MAGIC) { - fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp); + if (magic != BARK_GGML_FILE_MAGIC) { + LOGE("%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp); return false; } @@ -2401,19 +2401,19 @@ bool bark_model_quantize(const char* fname_inp, const char* fname_out, enum ggml // text model if (!bark_model_weights_quantize(fin, fout, ftype)) { - fprintf(stderr, "%s: failed to quantize text model\n", __func__); + LOGE("%s: failed to quantize text model\n", __func__); return false; } // coarse model if (!bark_model_weights_quantize(fin, fout, ftype)) { - fprintf(stderr, "%s: failed to quantize coarse model\n", __func__); + LOGE("%s: failed to quantize coarse model\n", __func__); return false; } // fine model if (!bark_model_weights_quantize(fin, fout, ftype)) { - fprintf(stderr, "%s: failed to quantize fine model\n", __func__); + LOGE("%s: failed to quantize fine model\n", __func__); return false; } diff --git a/cpp/bark.h b/cpp/bark.h index e6426f8..d8be51a 100644 --- a/cpp/bark.h +++ b/cpp/bark.h @@ -16,6 +16,7 @@ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #pragma once +#include "log.h" #include "encodec.h" #include "ggml-backend.h" @@ -229,7 +230,7 @@ extern "C" { BARK_API bool bark_model_quantize( const char *fname_inp, const char *fname_out, - enum ggml_ftype ftype); + enum bark_ggml_ftype ftype); /** * @brief Frees the memory allocated for a bark context. diff --git a/cpp/encodec.cpp b/cpp/encodec.cpp index d332c39..287edbd 100644 --- a/cpp/encodec.cpp +++ b/cpp/encodec.cpp @@ -2,11 +2,11 @@ #include "ggml-backend.h" #include "ggml.h" -#ifdef GGML_USE_CUBLAS +#ifdef BARK_GGML_USE_CUBLAS #include "ggml-cuda.h" #endif -#ifdef GGML_USE_METAL +#ifdef BARK_GGML_USE_METAL #include "ggml-metal.h" #endif @@ -70,50 +70,50 @@ struct encodec_hparams { // res + downsample block at some ratio struct encodec_encoder_block { // conv1 - struct ggml_tensor *conv_1_w; - struct ggml_tensor *conv_1_b; + struct bark_ggml_tensor *conv_1_w; + struct bark_ggml_tensor *conv_1_b; // conv2 - struct ggml_tensor *conv_2_w; - struct ggml_tensor *conv_2_b; + struct bark_ggml_tensor *conv_2_w; + struct bark_ggml_tensor *conv_2_b; // shortcut - struct ggml_tensor *conv_sc_w; - struct ggml_tensor *conv_sc_b; + struct bark_ggml_tensor *conv_sc_w; + struct bark_ggml_tensor *conv_sc_b; // downsampling layers - struct ggml_tensor *ds_conv_w; - struct ggml_tensor *ds_conv_b; + struct bark_ggml_tensor *ds_conv_w; + struct bark_ggml_tensor *ds_conv_b; }; struct encodec_lstm { - struct ggml_tensor *l0_ih_w; - struct ggml_tensor *l0_hh_w; + struct bark_ggml_tensor *l0_ih_w; + struct bark_ggml_tensor *l0_hh_w; - struct ggml_tensor *l0_ih_b; - struct ggml_tensor *l0_hh_b; + struct bark_ggml_tensor *l0_ih_b; + struct bark_ggml_tensor *l0_hh_b; - struct ggml_tensor *l1_ih_w; - struct ggml_tensor *l1_hh_w; + struct bark_ggml_tensor *l1_ih_w; + struct bark_ggml_tensor *l1_hh_w; - struct ggml_tensor *l1_ih_b; - struct ggml_tensor *l1_hh_b; + struct bark_ggml_tensor *l1_ih_b; + struct bark_ggml_tensor *l1_hh_b; }; struct encodec_encoder { - struct ggml_tensor *init_conv_w; - struct ggml_tensor *init_conv_b; + struct bark_ggml_tensor *init_conv_w; + struct bark_ggml_tensor *init_conv_b; encodec_lstm lstm; - struct ggml_tensor *final_conv_w; - struct ggml_tensor *final_conv_b; + struct bark_ggml_tensor *final_conv_w; + struct bark_ggml_tensor *final_conv_b; std::vector blocks; }; struct encodec_quant_block { - struct ggml_tensor *embed; + struct bark_ggml_tensor *embed; }; struct encodec_quantizer { @@ -122,30 +122,30 @@ struct encodec_quantizer { struct encodec_decoder_block { // upsampling layers - struct ggml_tensor *us_conv_w; - struct ggml_tensor *us_conv_b; + struct bark_ggml_tensor *us_conv_w; + struct bark_ggml_tensor *us_conv_b; // conv1 - struct ggml_tensor *conv_1_w; - struct ggml_tensor *conv_1_b; + struct bark_ggml_tensor *conv_1_w; + struct bark_ggml_tensor *conv_1_b; // conv2 - struct ggml_tensor *conv_2_w; - struct ggml_tensor *conv_2_b; + struct bark_ggml_tensor *conv_2_w; + struct bark_ggml_tensor *conv_2_b; // shortcut - struct ggml_tensor *conv_sc_w; - struct ggml_tensor *conv_sc_b; + struct bark_ggml_tensor *conv_sc_w; + struct bark_ggml_tensor *conv_sc_b; }; struct encodec_decoder { - struct ggml_tensor *init_conv_w; - struct ggml_tensor *init_conv_b; + struct bark_ggml_tensor *init_conv_w; + struct bark_ggml_tensor *init_conv_b; encodec_lstm lstm; - struct ggml_tensor *final_conv_w; - struct ggml_tensor *final_conv_b; + struct bark_ggml_tensor *final_conv_w; + struct bark_ggml_tensor *final_conv_b; std::vector blocks; }; @@ -158,29 +158,29 @@ struct encodec_model { encodec_decoder decoder; // context - struct ggml_context *ctx; + struct bark_ggml_context *ctx; int n_loaded; - ggml_backend_t backend = NULL; + bark_ggml_backend_t backend = NULL; - ggml_backend_buffer_t buffer_w; + bark_ggml_backend_buffer_t buffer_w; - std::map tensors; + std::map tensors; }; struct encodec_context { encodec_model model; // buffer for model evaluation - ggml_backend_buffer_t buf_compute; + bark_ggml_backend_buffer_t buf_compute; // custom allocrator - struct ggml_allocr *allocr = NULL; + struct bark_ggml_allocr *allocr = NULL; // intermediate steps - struct ggml_tensor *encoded = NULL; // Encoded audio - struct ggml_tensor *codes = NULL; // Quantized representation of audio in codebook - struct ggml_tensor *decoded = NULL; // Reconstructed audio from codes + struct bark_ggml_tensor *encoded = NULL; // Encoded audio + struct bark_ggml_tensor *codes = NULL; // Quantized representation of audio in codebook + struct bark_ggml_tensor *decoded = NULL; // Reconstructed audio from codes std::vector out_codes; std::vector out_audio; @@ -203,7 +203,7 @@ static void read_safe(std::ifstream &infile, T &dest) { infile.read((char *)&dest, sizeof(T)); } -static void ggml_log_callback_default(ggml_log_level level, const char *text, void *user_data) { +static void bark_ggml_log_callback_default(bark_ggml_log_level level, const char *text, void *user_data) { (void)level; (void)user_data; fputs(text, stderr); @@ -211,20 +211,20 @@ static void ggml_log_callback_default(ggml_log_level level, const char *text, vo } static void encodec_sigmoid_impl( - struct ggml_tensor *dst, - const struct ggml_tensor *src, + struct bark_ggml_tensor *dst, + const struct bark_ggml_tensor *src, int ith, int nth, void *userdata) { - GGML_ASSERT(userdata == NULL); - GGML_ASSERT(ggml_are_same_shape(dst, src)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_is_contiguous(src)); + BARK_GGML_ASSERT(userdata == NULL); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(dst, src)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(dst)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src)); - const float *src_data = ggml_get_data_f32(src); - float *dst_data = ggml_get_data_f32(dst); + const float *src_data = bark_ggml_get_data_f32(src); + float *dst_data = bark_ggml_get_data_f32(dst); - const int ne = (int)ggml_nelements(dst); + const int ne = (int)bark_ggml_nelements(dst); const int dr = (ne + nth - 1) / nth; const int ie0 = dr * ith; const int ie1 = std::min(ie0 + dr, ne); @@ -234,14 +234,14 @@ static void encodec_sigmoid_impl( } } -static struct ggml_tensor *encodec_sigmoid( - struct ggml_context *ctx, - struct ggml_tensor *x) { - return ggml_map_custom1(ctx, x, encodec_sigmoid_impl, GGML_N_TASKS_MAX, NULL); +static struct bark_ggml_tensor *encodec_sigmoid( + struct bark_ggml_context *ctx, + struct bark_ggml_tensor *x) { + return bark_ggml_map_custom1(ctx, x, encodec_sigmoid_impl, BARK_GGML_N_TASKS_MAX, NULL); } static int get_extra_padding_for_conv_1d( - struct ggml_tensor *inp, + struct bark_ggml_tensor *inp, float kernel_size, float stride, float padding_total) { @@ -251,9 +251,9 @@ static int get_extra_padding_for_conv_1d( return ideal_length - length; } -static struct ggml_tensor *pad_1d( - struct ggml_context *ctx0, - struct ggml_tensor *inp, +static struct bark_ggml_tensor *pad_1d( + struct bark_ggml_context *ctx0, + struct bark_ggml_tensor *inp, int padding_left, int padding_right) { int length = inp->ne[0]; @@ -266,15 +266,15 @@ static struct ggml_tensor *pad_1d( extra_pad = max_pad - length + 1; // constant padding - struct ggml_tensor *out = ggml_new_tensor_2d(ctx0, inp->type, length + extra_pad, dim); - ggml_set_zero(out); - out = ggml_set_2d(ctx0, out, inp, out->nb[1], 0); + struct bark_ggml_tensor *out = bark_ggml_new_tensor_2d(ctx0, inp->type, length + extra_pad, dim); + bark_ggml_set_zero(out); + out = bark_ggml_set_2d(ctx0, out, inp, out->nb[1], 0); } - struct ggml_tensor *padded = ggml_pad_reflec_1d(ctx0, inp, padding_left, padding_right); + struct bark_ggml_tensor *padded = bark_ggml_pad_reflec_1d(ctx0, inp, padding_left, padding_right); const int end = padded->ne[0] - extra_pad; - struct ggml_tensor *dest = ggml_view_2d(ctx0, padded, end, dim, padded->nb[1], 0); + struct bark_ggml_tensor *dest = bark_ggml_view_2d(ctx0, padded, end, dim, padded->nb[1], 0); return dest; } @@ -296,9 +296,9 @@ static int32_t get_num_quantizers_for_bandwidth(int bins, float frame_rate, floa return n_q; } -static struct ggml_tensor *unpad_1d( - struct ggml_context *ctx0, - struct ggml_tensor *inp, +static struct bark_ggml_tensor *unpad_1d( + struct bark_ggml_context *ctx0, + struct bark_ggml_tensor *inp, int padding_left, int padding_right) { int length = inp->ne[0]; @@ -311,45 +311,45 @@ static struct ggml_tensor *unpad_1d( int end = length - padding_right; int offset = padding_left * inp->nb[1]; - struct ggml_tensor *dst = ggml_view_2d(ctx0, inp, end, dim, inp->nb[1], offset); + struct bark_ggml_tensor *dst = bark_ggml_view_2d(ctx0, inp, end, dim, inp->nb[1], offset); return dst; } -static struct ggml_tensor *strided_conv_1d( - ggml_context *ctx0, - ggml_tensor *inp, - ggml_tensor *conv_w, - ggml_tensor *conv_b, +static struct bark_ggml_tensor *strided_conv_1d( + bark_ggml_context *ctx0, + bark_ggml_tensor *inp, + bark_ggml_tensor *conv_w, + bark_ggml_tensor *conv_b, int stride) { int kernel_size = conv_w->ne[0]; int padding_total = kernel_size - stride; int extra_padding = get_extra_padding_for_conv_1d(inp, kernel_size, stride, padding_total); - struct ggml_tensor *padded_inp = pad_1d(ctx0, inp, padding_total, extra_padding); - struct ggml_tensor *dst = ggml_conv_1d(ctx0, conv_w, padded_inp, stride, 0, 1); + struct bark_ggml_tensor *padded_inp = pad_1d(ctx0, inp, padding_total, extra_padding); + struct bark_ggml_tensor *dst = bark_ggml_conv_1d(ctx0, conv_w, padded_inp, stride, 0, 1); // add bias - dst = ggml_transpose(ctx0, dst); - dst = ggml_add(ctx0, ggml_repeat(ctx0, conv_b, dst), dst); - dst = ggml_cont(ctx0, ggml_transpose(ctx0, dst)); + dst = bark_ggml_transpose(ctx0, dst); + dst = bark_ggml_add(ctx0, bark_ggml_repeat(ctx0, conv_b, dst), dst); + dst = bark_ggml_cont(ctx0, bark_ggml_transpose(ctx0, dst)); return dst; } -static struct ggml_tensor *strided_conv_transpose_1d( - struct ggml_context *ctx0, - struct ggml_tensor *inp, - struct ggml_tensor *conv_w, - struct ggml_tensor *conv_b, +static struct bark_ggml_tensor *strided_conv_transpose_1d( + struct bark_ggml_context *ctx0, + struct bark_ggml_tensor *inp, + struct bark_ggml_tensor *conv_w, + struct bark_ggml_tensor *conv_b, int stride) { - struct ggml_tensor *dst = ggml_conv_transpose_1d( + struct bark_ggml_tensor *dst = bark_ggml_conv_transpose_1d( ctx0, conv_w, inp, stride, 0 /* p0 */, 1 /* d0 */); // add bias - dst = ggml_transpose(ctx0, dst); - dst = ggml_add(ctx0, ggml_repeat(ctx0, conv_b, dst), dst); - dst = ggml_cont(ctx0, ggml_transpose(ctx0, dst)); + dst = bark_ggml_transpose(ctx0, dst); + dst = bark_ggml_add(ctx0, bark_ggml_repeat(ctx0, conv_b, dst), dst); + dst = bark_ggml_cont(ctx0, bark_ggml_transpose(ctx0, dst)); int kernel_size = conv_w->ne[0]; int padding_total = kernel_size - stride; @@ -357,64 +357,64 @@ static struct ggml_tensor *strided_conv_transpose_1d( int padding_right = ceilf(padding_total); int padding_left = padding_total - padding_right; - struct ggml_tensor *unpadded = unpad_1d(ctx0, dst, padding_left, padding_right); - unpadded = ggml_cont(ctx0, unpadded); + struct bark_ggml_tensor *unpadded = unpad_1d(ctx0, dst, padding_left, padding_right); + unpadded = bark_ggml_cont(ctx0, unpadded); return unpadded; } -static struct ggml_tensor *forward_pass_lstm_unilayer( - struct ggml_context *ctx0, - struct ggml_allocr *allocr, - struct ggml_tensor *inp, - struct ggml_tensor *weight_ih, - struct ggml_tensor *weight_hh, - struct ggml_tensor *bias_ih, - struct ggml_tensor *bias_hh) { +static struct bark_ggml_tensor *forward_pass_lstm_unilayer( + struct bark_ggml_context *ctx0, + struct bark_ggml_allocr *allocr, + struct bark_ggml_tensor *inp, + struct bark_ggml_tensor *weight_ih, + struct bark_ggml_tensor *weight_hh, + struct bark_ggml_tensor *bias_ih, + struct bark_ggml_tensor *bias_hh) { const int input_dim = inp->ne[1]; const int hidden_dim = weight_ih->ne[1] / 4; const int seq_length = inp->ne[0]; - struct ggml_tensor *hs = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, hidden_dim, seq_length); - ggml_allocr_alloc(allocr, hs); + struct bark_ggml_tensor *hs = bark_ggml_new_tensor_2d(ctx0, BARK_GGML_TYPE_F32, hidden_dim, seq_length); + bark_ggml_allocr_alloc(allocr, hs); - struct ggml_tensor *c_t = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, hidden_dim); - ggml_allocr_alloc(allocr, c_t); + struct bark_ggml_tensor *c_t = bark_ggml_new_tensor_1d(ctx0, BARK_GGML_TYPE_F32, hidden_dim); + bark_ggml_allocr_alloc(allocr, c_t); - struct ggml_tensor *h_t = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, hidden_dim); - ggml_allocr_alloc(allocr, h_t); + struct bark_ggml_tensor *h_t = bark_ggml_new_tensor_1d(ctx0, BARK_GGML_TYPE_F32, hidden_dim); + bark_ggml_allocr_alloc(allocr, h_t); - if (!ggml_allocr_is_measure(allocr)) { - h_t = ggml_set_zero(h_t); - c_t = ggml_set_zero(c_t); + if (!bark_ggml_allocr_is_measure(allocr)) { + h_t = bark_ggml_set_zero(h_t); + c_t = bark_ggml_set_zero(c_t); } - struct ggml_tensor *current = ggml_cont(ctx0, ggml_transpose(ctx0, inp)); + struct bark_ggml_tensor *current = bark_ggml_cont(ctx0, bark_ggml_transpose(ctx0, inp)); for (int t = 0; t < seq_length; t++) { - struct ggml_tensor *x_t = ggml_view_1d(ctx0, current, input_dim, t * current->nb[1]); + struct bark_ggml_tensor *x_t = bark_ggml_view_1d(ctx0, current, input_dim, t * current->nb[1]); - struct ggml_tensor *inp_gates = ggml_mul_mat(ctx0, weight_ih, x_t); - inp_gates = ggml_add(ctx0, inp_gates, bias_ih); + struct bark_ggml_tensor *inp_gates = bark_ggml_mul_mat(ctx0, weight_ih, x_t); + inp_gates = bark_ggml_add(ctx0, inp_gates, bias_ih); - struct ggml_tensor *hid_gates = ggml_mul_mat(ctx0, weight_hh, h_t); - hid_gates = ggml_add(ctx0, hid_gates, bias_hh); + struct bark_ggml_tensor *hid_gates = bark_ggml_mul_mat(ctx0, weight_hh, h_t); + hid_gates = bark_ggml_add(ctx0, hid_gates, bias_hh); - struct ggml_tensor *out_gates = ggml_add(ctx0, inp_gates, hid_gates); + struct bark_ggml_tensor *out_gates = bark_ggml_add(ctx0, inp_gates, hid_gates); - struct ggml_tensor *i_t = encodec_sigmoid(ctx0, ggml_view_1d(ctx0, out_gates, hidden_dim, 0 * sizeof(float) * hidden_dim)); - struct ggml_tensor *f_t = encodec_sigmoid(ctx0, ggml_view_1d(ctx0, out_gates, hidden_dim, 1 * sizeof(float) * hidden_dim)); - struct ggml_tensor *g_t = ggml_tanh(ctx0, ggml_view_1d(ctx0, out_gates, hidden_dim, 2 * sizeof(float) * hidden_dim)); - struct ggml_tensor *o_t = encodec_sigmoid(ctx0, ggml_view_1d(ctx0, out_gates, hidden_dim, 3 * sizeof(float) * hidden_dim)); + struct bark_ggml_tensor *i_t = encodec_sigmoid(ctx0, bark_ggml_view_1d(ctx0, out_gates, hidden_dim, 0 * sizeof(float) * hidden_dim)); + struct bark_ggml_tensor *f_t = encodec_sigmoid(ctx0, bark_ggml_view_1d(ctx0, out_gates, hidden_dim, 1 * sizeof(float) * hidden_dim)); + struct bark_ggml_tensor *g_t = bark_ggml_tanh(ctx0, bark_ggml_view_1d(ctx0, out_gates, hidden_dim, 2 * sizeof(float) * hidden_dim)); + struct bark_ggml_tensor *o_t = encodec_sigmoid(ctx0, bark_ggml_view_1d(ctx0, out_gates, hidden_dim, 3 * sizeof(float) * hidden_dim)); - c_t = ggml_add(ctx0, ggml_mul(ctx0, f_t, c_t), ggml_mul(ctx0, i_t, g_t)); + c_t = bark_ggml_add(ctx0, bark_ggml_mul(ctx0, f_t, c_t), bark_ggml_mul(ctx0, i_t, g_t)); - h_t = ggml_mul(ctx0, o_t, ggml_tanh(ctx0, c_t)); + h_t = bark_ggml_mul(ctx0, o_t, bark_ggml_tanh(ctx0, c_t)); - hs = ggml_set_1d(ctx0, hs, h_t, t * hs->nb[1]); + hs = bark_ggml_set_1d(ctx0, hs, h_t, t * hs->nb[1]); } - hs = ggml_cont(ctx0, ggml_transpose(ctx0, hs)); + hs = bark_ggml_cont(ctx0, bark_ggml_transpose(ctx0, hs)); return hs; } @@ -425,7 +425,7 @@ bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int uint32_t magic; read_safe(infile, magic); if (magic != ENCODEC_FILE_MAGIC) { - fprintf(stderr, "%s: invalid model file (bad magic)\n", __func__); + LOGE("%s: invalid model file (bad magic)\n", __func__); return false; } } @@ -445,28 +445,28 @@ bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int read_safe(infile, hparams.sr); read_safe(infile, hparams.ftype); - const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR; - - printf("%s: in_channels = %d\n", __func__, hparams.in_channels); - printf("%s: hidden_dim = %d\n", __func__, hparams.hidden_dim); - printf("%s: n_filters = %d\n", __func__, hparams.n_filters); - printf("%s: kernel_size = %d\n", __func__, hparams.kernel_size); - printf("%s: res_kernel = %d\n", __func__, hparams.residual_kernel_size); - // printf("%s: ratios = %d\n", __func__, hparams.ratios); - printf("%s: n_bins = %d\n", __func__, hparams.n_bins); - printf("%s: bandwidth = %d\n", __func__, hparams.bandwidth); - printf("%s: sample_rate = %d\n", __func__, hparams.sr); - printf("%s: ftype = %d\n", __func__, hparams.ftype); - printf("%s: qntvr = %d\n", __func__, qntvr); - - hparams.ftype %= GGML_QNT_VERSION_FACTOR; + const int32_t qntvr = hparams.ftype / BARK_GGML_QNT_VERSION_FACTOR; + + LOGI("%s: in_channels = %d\n", __func__, hparams.in_channels); + LOGI("%s: hidden_dim = %d\n", __func__, hparams.hidden_dim); + LOGI("%s: n_filters = %d\n", __func__, hparams.n_filters); + LOGI("%s: kernel_size = %d\n", __func__, hparams.kernel_size); + LOGI("%s: res_kernel = %d\n", __func__, hparams.residual_kernel_size); + // LOGI("%s: ratios = %d\n", __func__, hparams.ratios); + LOGI("%s: n_bins = %d\n", __func__, hparams.n_bins); + LOGI("%s: bandwidth = %d\n", __func__, hparams.bandwidth); + LOGI("%s: sample_rate = %d\n", __func__, hparams.sr); + LOGI("%s: ftype = %d\n", __func__, hparams.ftype); + LOGI("%s: qntvr = %d\n", __func__, qntvr); + + hparams.ftype %= BARK_GGML_QNT_VERSION_FACTOR; } // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation - ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype)(model.hparams.ftype)); - if (wtype == GGML_TYPE_COUNT) { - fprintf(stderr, "%s: invalid model file (bad ftype value %d)\n", + bark_ggml_type wtype = bark_ggml_ftype_to_bark_ggml_type((bark_ggml_ftype)(model.hparams.ftype)); + if (wtype == BARK_GGML_TYPE_COUNT) { + LOGE("%s: invalid model file (bad ftype value %d)\n", __func__, model.hparams.ftype); return 1; } @@ -494,37 +494,37 @@ bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int int mult = 1; // scaling factor for hidden size // initial conv1d layer - buffer_size += in_channels * n_filters * kernel_size * ggml_type_size(wtype); // weight - buffer_size += n_filters * ggml_type_size(GGML_TYPE_F32); // bias + buffer_size += in_channels * n_filters * kernel_size * bark_ggml_type_size(wtype); // weight + buffer_size += n_filters * bark_ggml_type_size(BARK_GGML_TYPE_F32); // bias // resnet blocks for (int i = 0; i < 4; i++) { // conv1 - buffer_size += res_kernel_sz * (mult * n_filters) * (mult * n_filters / 2) * ggml_type_size(wtype); // weight - buffer_size += (mult * n_filters / 2) * ggml_type_size(GGML_TYPE_F32); // bias + buffer_size += res_kernel_sz * (mult * n_filters) * (mult * n_filters / 2) * bark_ggml_type_size(wtype); // weight + buffer_size += (mult * n_filters / 2) * bark_ggml_type_size(BARK_GGML_TYPE_F32); // bias // conv2 - buffer_size += (mult * n_filters / 2) * (mult * n_filters) * ggml_type_size(wtype); // weight - buffer_size += (mult * n_filters) * ggml_type_size(GGML_TYPE_F32); // bias + buffer_size += (mult * n_filters / 2) * (mult * n_filters) * bark_ggml_type_size(wtype); // weight + buffer_size += (mult * n_filters) * bark_ggml_type_size(BARK_GGML_TYPE_F32); // bias // shortcut - buffer_size += (mult * n_filters) * (mult * n_filters) * ggml_type_size(wtype); // weight - buffer_size += (mult * n_filters) * ggml_type_size(GGML_TYPE_F32); // bias + buffer_size += (mult * n_filters) * (mult * n_filters) * bark_ggml_type_size(wtype); // weight + buffer_size += (mult * n_filters) * bark_ggml_type_size(BARK_GGML_TYPE_F32); // bias // downsampling layers - buffer_size += (2 * ratios[3 - i]) * (mult * n_filters) * (mult * n_filters * 2) * ggml_type_size(wtype); // weight - buffer_size += (2 * mult * n_filters) * ggml_type_size(GGML_TYPE_F32); // bias + buffer_size += (2 * ratios[3 - i]) * (mult * n_filters) * (mult * n_filters * 2) * bark_ggml_type_size(wtype); // weight + buffer_size += (2 * mult * n_filters) * bark_ggml_type_size(BARK_GGML_TYPE_F32); // bias mult *= 2; } // lstm - buffer_size += 2 * n_lstm_layers * (mult * n_filters) * (4 * mult * n_filters) * ggml_type_size(wtype); // weight_ih and weight_hh - buffer_size += 2 * n_lstm_layers * (4 * mult * n_filters) * ggml_type_size(GGML_TYPE_F32); // bias_ih and bias_hh + buffer_size += 2 * n_lstm_layers * (mult * n_filters) * (4 * mult * n_filters) * bark_ggml_type_size(wtype); // weight_ih and weight_hh + buffer_size += 2 * n_lstm_layers * (4 * mult * n_filters) * bark_ggml_type_size(BARK_GGML_TYPE_F32); // bias_ih and bias_hh // final conv - buffer_size += kernel_size * (mult * n_filters) * hidden_dim * ggml_type_size(wtype); // weight - buffer_size += hidden_dim * ggml_type_size(GGML_TYPE_F32); // bias + buffer_size += kernel_size * (mult * n_filters) * hidden_dim * bark_ggml_type_size(wtype); // weight + buffer_size += hidden_dim * bark_ggml_type_size(BARK_GGML_TYPE_F32); // bias } // decoder mirrors the encoder (same number of parameters), just double context size @@ -532,66 +532,66 @@ bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int // quantizer int n_q = 32; // 32 is an upper bound on the number of codebooks. - buffer_size += n_q * hidden_dim * n_bins * ggml_type_size(GGML_TYPE_F32); // embed + buffer_size += n_q * hidden_dim * n_bins * bark_ggml_type_size(BARK_GGML_TYPE_F32); // embed buffer_size += 10ull * MB; // object overhead n_tensors = ((4 * 2) * 4 + 2 + 4 * n_lstm_layers + 2) * 2; // encoder and decoder n_tensors += n_q * 1; // quantizer - printf("%s: ggml tensor size = %d bytes\n", __func__, (int)sizeof(ggml_tensor)); - printf("%s: backend buffer size = %6.2f MB\n", __func__, buffer_size / (1024.0 * 1024.0)); + LOGI("%s: ggml tensor size = %d bytes\n", __func__, (int)sizeof(bark_ggml_tensor)); + LOGI("%s: backend buffer size = %6.2f MB\n", __func__, buffer_size / (1024.0 * 1024.0)); } // create the ggml context { - struct ggml_init_params params = { - /* .mem_size = */ ggml_tensor_overhead() * n_tensors, + struct bark_ggml_init_params params = { + /* .mem_size = */ bark_ggml_tensor_overhead() * n_tensors, /* .mem_buffer = */ NULL, /* .no_alloc = */ true, }; - model.ctx = ggml_init(params); + model.ctx = bark_ggml_init(params); if (!model.ctx) { - fprintf(stderr, "%s: ggml_init() failed\n", __func__); + LOGE("%s: bark_ggml_init() failed\n", __func__); return false; } } -#ifdef GGML_USE_CUBLAS +#ifdef BARK_GGML_USE_CUBLAS if (n_gpu_layers > 0) { - fprintf(stderr, "%s: using CUDA backend\n", __func__); - model.backend = ggml_backend_cuda_init(); + LOGE("%s: using CUDA backend\n", __func__); + model.backend = bark_ggml_backend_cuda_init(); if (!model.backend) { - fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__); + LOGE("%s: bark_ggml_backend_cuda_init() failed\n", __func__); } } #endif -#ifdef GGML_USE_METAL +#ifdef BARK_GGML_USE_METAL if (n_gpu_layers > 0) { - fprintf(stderr, "%s: using Metal backend\n", __func__); - ggml_metal_log_set_callback(ggml_log_callback_default, nullptr); - model.backend = ggml_backend_metal_init(); + LOGE("%s: using Metal backend\n", __func__); + bark_ggml_metal_log_set_callback(bark_ggml_log_callback_default, nullptr); + model.backend = bark_ggml_backend_metal_init(); if (!model.backend) { - fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); + LOGE("%s: bark_ggml_backend_metal_init() failed\n", __func__); } } #endif if (!model.backend) { // fallback to CPU backend - fprintf(stderr, "%s: using CPU backend\n", __func__); - model.backend = ggml_backend_cpu_init(); + LOGE("%s: using CPU backend\n", __func__); + model.backend = bark_ggml_backend_cpu_init(); } if (!model.backend) { - fprintf(stderr, "%s: ggml_backend_cpu_init() failed\n", __func__); + LOGE("%s: bark_ggml_backend_cpu_init() failed\n", __func__); return false; } // allocate weights buffer - model.buffer_w = ggml_backend_alloc_buffer(model.backend, buffer_size); + model.buffer_w = bark_ggml_backend_alloc_buffer(model.backend, buffer_size); // prepare memory for the weights { @@ -612,37 +612,37 @@ bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int int mult = 1; // scaling factor for hidden size - model.encoder.init_conv_w = ggml_new_tensor_3d(ctx, wtype, kernel_size, in_channels, mult * n_filters); - model.encoder.init_conv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters); + model.encoder.init_conv_w = bark_ggml_new_tensor_3d(ctx, wtype, kernel_size, in_channels, mult * n_filters); + model.encoder.init_conv_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, mult * n_filters); model.tensors["encoder.model.0.conv.conv.weight"] = model.encoder.init_conv_w; model.tensors["encoder.model.0.conv.conv.bias"] = model.encoder.init_conv_b; for (int i = 0; i < 4; i++) { // conv1 - model.encoder.blocks[i].conv_1_w = ggml_new_tensor_3d(ctx, wtype, res_kernel_sz, mult * n_filters, mult * n_filters / 2); - model.encoder.blocks[i].conv_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters / 2); + model.encoder.blocks[i].conv_1_w = bark_ggml_new_tensor_3d(ctx, wtype, res_kernel_sz, mult * n_filters, mult * n_filters / 2); + model.encoder.blocks[i].conv_1_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, mult * n_filters / 2); model.tensors["encoder.model." + std::to_string(3 * i + 1) + ".block.1.conv.conv.weight"] = model.encoder.blocks[i].conv_1_w; model.tensors["encoder.model." + std::to_string(3 * i + 1) + ".block.1.conv.conv.bias"] = model.encoder.blocks[i].conv_1_b; // conv2 - model.encoder.blocks[i].conv_2_w = ggml_new_tensor_3d(ctx, wtype, 1, mult * n_filters / 2, mult * n_filters); - model.encoder.blocks[i].conv_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters); + model.encoder.blocks[i].conv_2_w = bark_ggml_new_tensor_3d(ctx, wtype, 1, mult * n_filters / 2, mult * n_filters); + model.encoder.blocks[i].conv_2_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, mult * n_filters); model.tensors["encoder.model." + std::to_string(3 * i + 1) + ".block.3.conv.conv.weight"] = model.encoder.blocks[i].conv_2_w; model.tensors["encoder.model." + std::to_string(3 * i + 1) + ".block.3.conv.conv.bias"] = model.encoder.blocks[i].conv_2_b; // shortcut conv - model.encoder.blocks[i].conv_sc_w = ggml_new_tensor_3d(ctx, wtype, 1, mult * n_filters, mult * n_filters); - model.encoder.blocks[i].conv_sc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters); + model.encoder.blocks[i].conv_sc_w = bark_ggml_new_tensor_3d(ctx, wtype, 1, mult * n_filters, mult * n_filters); + model.encoder.blocks[i].conv_sc_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, mult * n_filters); model.tensors["encoder.model." + std::to_string(3 * i + 1) + ".shortcut.conv.conv.weight"] = model.encoder.blocks[i].conv_sc_w; model.tensors["encoder.model." + std::to_string(3 * i + 1) + ".shortcut.conv.conv.bias"] = model.encoder.blocks[i].conv_sc_b; // downsampling - model.encoder.blocks[i].ds_conv_w = ggml_new_tensor_3d(ctx, wtype, 2 * ratios[3 - i], mult * n_filters, mult * n_filters * 2); - model.encoder.blocks[i].ds_conv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters * 2); + model.encoder.blocks[i].ds_conv_w = bark_ggml_new_tensor_3d(ctx, wtype, 2 * ratios[3 - i], mult * n_filters, mult * n_filters * 2); + model.encoder.blocks[i].ds_conv_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, mult * n_filters * 2); model.tensors["encoder.model." + std::to_string(3 * (i + 1)) + ".conv.conv.weight"] = model.encoder.blocks[i].ds_conv_w; model.tensors["encoder.model." + std::to_string(3 * (i + 1)) + ".conv.conv.bias"] = model.encoder.blocks[i].ds_conv_b; @@ -651,33 +651,33 @@ bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int } // LSTM - model.encoder.lstm.l0_ih_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); - model.encoder.lstm.l1_ih_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); + model.encoder.lstm.l0_ih_w = bark_ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); + model.encoder.lstm.l1_ih_w = bark_ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); model.tensors["encoder.model.13.lstm.weight_ih_l0"] = model.encoder.lstm.l0_ih_w; model.tensors["encoder.model.13.lstm.weight_ih_l1"] = model.encoder.lstm.l1_ih_w; - model.encoder.lstm.l0_hh_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); - model.encoder.lstm.l1_hh_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); + model.encoder.lstm.l0_hh_w = bark_ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); + model.encoder.lstm.l1_hh_w = bark_ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); model.tensors["encoder.model.13.lstm.weight_hh_l0"] = model.encoder.lstm.l0_hh_w; model.tensors["encoder.model.13.lstm.weight_hh_l1"] = model.encoder.lstm.l1_hh_w; - model.encoder.lstm.l0_ih_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters); - model.encoder.lstm.l1_ih_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters); + model.encoder.lstm.l0_ih_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 4 * mult * n_filters); + model.encoder.lstm.l1_ih_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 4 * mult * n_filters); model.tensors["encoder.model.13.lstm.bias_ih_l0"] = model.encoder.lstm.l0_ih_b; model.tensors["encoder.model.13.lstm.bias_ih_l1"] = model.encoder.lstm.l1_ih_b; - model.encoder.lstm.l0_hh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters); - model.encoder.lstm.l1_hh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters); + model.encoder.lstm.l0_hh_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 4 * mult * n_filters); + model.encoder.lstm.l1_hh_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 4 * mult * n_filters); model.tensors["encoder.model.13.lstm.bias_hh_l0"] = model.encoder.lstm.l0_hh_b; model.tensors["encoder.model.13.lstm.bias_hh_l1"] = model.encoder.lstm.l1_hh_b; // final conv - model.encoder.final_conv_w = ggml_new_tensor_3d(ctx, wtype, kernel_size, mult * n_filters, hidden_dim); - model.encoder.final_conv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_dim); + model.encoder.final_conv_w = bark_ggml_new_tensor_3d(ctx, wtype, kernel_size, mult * n_filters, hidden_dim); + model.encoder.final_conv_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, hidden_dim); model.tensors["encoder.model.15.conv.conv.weight"] = model.encoder.final_conv_w; model.tensors["encoder.model.15.conv.conv.bias"] = model.encoder.final_conv_b; @@ -689,62 +689,62 @@ bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int int mult = 16; // 2**len(ratios) - model.decoder.init_conv_w = ggml_new_tensor_3d(ctx, wtype, kernel_size, hidden_dim, mult * n_filters); - model.decoder.init_conv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters); + model.decoder.init_conv_w = bark_ggml_new_tensor_3d(ctx, wtype, kernel_size, hidden_dim, mult * n_filters); + model.decoder.init_conv_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, mult * n_filters); model.tensors["decoder.model.0.conv.conv.weight"] = model.decoder.init_conv_w; model.tensors["decoder.model.0.conv.conv.bias"] = model.decoder.init_conv_b; // LSTM - model.decoder.lstm.l0_ih_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); - model.decoder.lstm.l1_ih_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); + model.decoder.lstm.l0_ih_w = bark_ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); + model.decoder.lstm.l1_ih_w = bark_ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); model.tensors["decoder.model.1.lstm.weight_ih_l0"] = model.decoder.lstm.l0_ih_w; model.tensors["decoder.model.1.lstm.weight_ih_l1"] = model.decoder.lstm.l1_ih_w; - model.decoder.lstm.l0_hh_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); - model.decoder.lstm.l1_hh_w = ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); + model.decoder.lstm.l0_hh_w = bark_ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); + model.decoder.lstm.l1_hh_w = bark_ggml_new_tensor_2d(ctx, wtype, mult * n_filters, 4 * mult * n_filters); model.tensors["decoder.model.1.lstm.weight_hh_l0"] = model.decoder.lstm.l0_hh_w; model.tensors["decoder.model.1.lstm.weight_hh_l1"] = model.decoder.lstm.l1_hh_w; - model.decoder.lstm.l0_ih_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters); - model.decoder.lstm.l1_ih_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters); + model.decoder.lstm.l0_ih_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 4 * mult * n_filters); + model.decoder.lstm.l1_ih_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 4 * mult * n_filters); model.tensors["decoder.model.1.lstm.bias_ih_l0"] = model.decoder.lstm.l0_ih_b; model.tensors["decoder.model.1.lstm.bias_ih_l1"] = model.decoder.lstm.l1_ih_b; - model.decoder.lstm.l0_hh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters); - model.decoder.lstm.l1_hh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4 * mult * n_filters); + model.decoder.lstm.l0_hh_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 4 * mult * n_filters); + model.decoder.lstm.l1_hh_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 4 * mult * n_filters); model.tensors["decoder.model.1.lstm.bias_hh_l0"] = model.decoder.lstm.l0_hh_b; model.tensors["decoder.model.1.lstm.bias_hh_l1"] = model.decoder.lstm.l1_hh_b; for (int i = 0; i < 4; i++) { // upsampling - model.decoder.blocks[i].us_conv_w = ggml_new_tensor_3d(ctx, wtype, ratios[i] * 2, mult * n_filters / 2, mult * n_filters); - model.decoder.blocks[i].us_conv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters / 2); + model.decoder.blocks[i].us_conv_w = bark_ggml_new_tensor_3d(ctx, wtype, ratios[i] * 2, mult * n_filters / 2, mult * n_filters); + model.decoder.blocks[i].us_conv_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, mult * n_filters / 2); model.tensors["decoder.model." + std::to_string(3 * (i + 1)) + ".convtr.convtr.weight"] = model.decoder.blocks[i].us_conv_w; model.tensors["decoder.model." + std::to_string(3 * (i + 1)) + ".convtr.convtr.bias"] = model.decoder.blocks[i].us_conv_b; // conv1 - model.decoder.blocks[i].conv_1_w = ggml_new_tensor_3d(ctx, wtype, res_kernel_sz, mult * n_filters / 2, mult * n_filters / 4); - model.decoder.blocks[i].conv_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters / 4); + model.decoder.blocks[i].conv_1_w = bark_ggml_new_tensor_3d(ctx, wtype, res_kernel_sz, mult * n_filters / 2, mult * n_filters / 4); + model.decoder.blocks[i].conv_1_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, mult * n_filters / 4); model.tensors["decoder.model." + std::to_string(3 * (i + 1) + 1) + ".block.1.conv.conv.weight"] = model.decoder.blocks[i].conv_1_w; model.tensors["decoder.model." + std::to_string(3 * (i + 1) + 1) + ".block.1.conv.conv.bias"] = model.decoder.blocks[i].conv_1_b; // conv2 - model.decoder.blocks[i].conv_2_w = ggml_new_tensor_3d(ctx, wtype, 1, mult * n_filters / 4, mult * n_filters / 2); - model.decoder.blocks[i].conv_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters / 2); + model.decoder.blocks[i].conv_2_w = bark_ggml_new_tensor_3d(ctx, wtype, 1, mult * n_filters / 4, mult * n_filters / 2); + model.decoder.blocks[i].conv_2_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, mult * n_filters / 2); model.tensors["decoder.model." + std::to_string(3 * (i + 1) + 1) + ".block.3.conv.conv.weight"] = model.decoder.blocks[i].conv_2_w; model.tensors["decoder.model." + std::to_string(3 * (i + 1) + 1) + ".block.3.conv.conv.bias"] = model.decoder.blocks[i].conv_2_b; // shortcut - model.decoder.blocks[i].conv_sc_w = ggml_new_tensor_3d(ctx, wtype, 1, mult * n_filters / 2, mult * n_filters / 2); - model.decoder.blocks[i].conv_sc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, mult * n_filters / 2); + model.decoder.blocks[i].conv_sc_w = bark_ggml_new_tensor_3d(ctx, wtype, 1, mult * n_filters / 2, mult * n_filters / 2); + model.decoder.blocks[i].conv_sc_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, mult * n_filters / 2); model.tensors["decoder.model." + std::to_string(3 * (i + 1) + 1) + ".shortcut.conv.conv.weight"] = model.decoder.blocks[i].conv_sc_w; model.tensors["decoder.model." + std::to_string(3 * (i + 1) + 1) + ".shortcut.conv.conv.bias"] = model.decoder.blocks[i].conv_sc_b; @@ -752,8 +752,8 @@ bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int mult /= 2; } - model.decoder.final_conv_w = ggml_new_tensor_3d(ctx, wtype, kernel_size, n_filters, in_channels); - model.decoder.final_conv_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, in_channels); + model.decoder.final_conv_w = bark_ggml_new_tensor_3d(ctx, wtype, kernel_size, n_filters, in_channels); + model.decoder.final_conv_b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, in_channels); model.tensors["decoder.model.15.conv.conv.weight"] = model.decoder.final_conv_w; model.tensors["decoder.model.15.conv.conv.bias"] = model.decoder.final_conv_b; @@ -764,7 +764,7 @@ bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int model.quantizer.blocks.resize(n_q); for (int i = 0; i < n_q; i++) { - model.quantizer.blocks[i].embed = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, hidden_dim, n_bins); + model.quantizer.blocks[i].embed = bark_ggml_new_tensor_2d(ctx, BARK_GGML_TYPE_F32, hidden_dim, n_bins); model.tensors["quantizer.vq.layers." + std::to_string(i) + "._codebook.embed"] = model.quantizer.blocks[i].embed; } @@ -773,7 +773,7 @@ bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int // load weights { - ggml_allocr *alloc = ggml_allocr_new_from_buffer(model.buffer_w); + bark_ggml_allocr *alloc = bark_ggml_allocr_new_from_buffer(model.buffer_w); size_t total_size = 0; model.n_loaded = 0; @@ -806,55 +806,55 @@ bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int name.assign(&buf[0], buf.size()); if (model.tensors.find(name.data()) == model.tensors.end()) { - fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); + LOGE("%s: unknown tensor '%s' in model file\n", __func__, name.data()); return false; } auto tensor = model.tensors[name.data()]; - ggml_set_name(tensor, name.c_str()); - if (ggml_nelements(tensor) != nelements) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); + bark_ggml_set_name(tensor, name.c_str()); + if (bark_ggml_nelements(tensor) != nelements) { + LOGE("%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); return false; } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1] || tensor->ne[2] != ne[2]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%lld, %lld, %lld], expected [%d, %d, %d]\n", + LOGE("%s: tensor '%s' has wrong shape in model file: got [%lld, %lld, %lld], expected [%d, %d, %d]\n", __func__, name.data(), tensor->ne[0], tensor->ne[1], tensor->ne[2], ne[0], ne[1], ne[2]); return false; } - const size_t bpe = ggml_type_size(ggml_type(ftype)); + const size_t bpe = bark_ggml_type_size(bark_ggml_type(ftype)); - if ((nelements * bpe) / ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", - __func__, name.data(), ggml_nbytes(tensor), nelements * bpe); + if ((nelements * bpe) / bark_ggml_blck_size(tensor->type) != bark_ggml_nbytes(tensor)) { + LOGE("%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", + __func__, name.data(), bark_ggml_nbytes(tensor), nelements * bpe); return false; } - ggml_allocr_alloc(alloc, tensor); + bark_ggml_allocr_alloc(alloc, tensor); - if (ggml_backend_is_cpu(model.backend) -#ifdef GGML_USE_METAL - || ggml_backend_is_metal(model.backend) + if (bark_ggml_backend_is_cpu(model.backend) +#ifdef BARK_GGML_USE_METAL + || bark_ggml_backend_is_metal(model.backend) #endif ) { // for the CPU and Metal backends, we can read directly into the device memory - infile.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); + infile.read(reinterpret_cast(tensor->data), bark_ggml_nbytes(tensor)); } else { // read into a temporary buffer first, then copy to device memory - read_buf.resize(ggml_nbytes(tensor)); - infile.read(read_buf.data(), ggml_nbytes(tensor)); - ggml_backend_tensor_set(tensor, read_buf.data(), 0, ggml_nbytes(tensor)); + read_buf.resize(bark_ggml_nbytes(tensor)); + infile.read(read_buf.data(), bark_ggml_nbytes(tensor)); + bark_ggml_backend_tensor_set(tensor, read_buf.data(), 0, bark_ggml_nbytes(tensor)); } - // printf("%48s - [%5d, %5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ne[2], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); + // LOGI("%48s - [%5d, %5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ne[2], ftype == 0 ? "float" : "f16", bark_ggml_nbytes(tensor)/1024.0/1024.0); - total_size += ggml_nbytes(tensor); + total_size += bark_ggml_nbytes(tensor); model.n_loaded++; } - ggml_allocr_free(alloc); - printf("%s: model size = %8.2f MB\n", __func__, total_size / 1024.0 / 1024.0); + bark_ggml_allocr_free(alloc); + LOGI("%s: model size = %8.2f MB\n", __func__, total_size / 1024.0 / 1024.0); } infile.close(); @@ -862,12 +862,12 @@ bool encodec_load_model_weights(std::ifstream &infile, encodec_model &model, int return true; } -struct ggml_tensor *encodec_forward_encoder( +struct bark_ggml_tensor *encodec_forward_encoder( struct encodec_context *ectx, - struct ggml_context *ctx0, - struct ggml_tensor *inp) { + struct bark_ggml_context *ctx0, + struct bark_ggml_tensor *inp) { if (!inp) { - fprintf(stderr, "%s: null input tensor\n", __func__); + LOGE("%s: null input tensor\n", __func__); return NULL; } @@ -880,35 +880,35 @@ struct ggml_tensor *encodec_forward_encoder( const int res_kernel_sz = hparams.residual_kernel_size; const int stride = hparams.stride; - struct ggml_tensor *inpL = strided_conv_1d( + struct bark_ggml_tensor *inpL = strided_conv_1d( ctx0, inp, model.encoder.init_conv_w, model.encoder.init_conv_b, stride); for (int layer_ix = 0; layer_ix < 4; layer_ix++) { encodec_encoder_block block = model.encoder.blocks[layer_ix]; - struct ggml_tensor *current = inpL; + struct bark_ggml_tensor *current = inpL; // shortcut - struct ggml_tensor *shortcut = strided_conv_1d( + struct bark_ggml_tensor *shortcut = strided_conv_1d( ctx0, inpL, block.conv_sc_w, block.conv_sc_b, stride); // conv1 - current = ggml_elu(ctx0, current); + current = bark_ggml_elu(ctx0, current); current = strided_conv_1d( ctx0, current, block.conv_1_w, block.conv_1_b, stride); // conv2 - current = ggml_elu(ctx0, current); + current = bark_ggml_elu(ctx0, current); current = strided_conv_1d( ctx0, current, block.conv_2_w, block.conv_2_b, stride); // residual connection - inpL = ggml_add(ctx0, current, shortcut); + inpL = bark_ggml_add(ctx0, current, shortcut); // downsampling layers - inpL = ggml_elu(ctx0, inpL); + inpL = bark_ggml_elu(ctx0, inpL); inpL = strided_conv_1d( ctx0, inpL, block.ds_conv_w, block.ds_conv_b, ratios[3 - layer_ix]); @@ -916,38 +916,38 @@ struct ggml_tensor *encodec_forward_encoder( // lstm { - struct ggml_tensor *cur = inpL; + struct bark_ggml_tensor *cur = inpL; const encodec_lstm lstm = model.encoder.lstm; // first lstm layer - struct ggml_tensor *hs1 = forward_pass_lstm_unilayer( + struct bark_ggml_tensor *hs1 = forward_pass_lstm_unilayer( ctx0, allocr, cur, lstm.l0_ih_w, lstm.l0_hh_w, lstm.l0_ih_b, lstm.l0_hh_b); // second lstm layer - struct ggml_tensor *out = forward_pass_lstm_unilayer( + struct bark_ggml_tensor *out = forward_pass_lstm_unilayer( ctx0, allocr, hs1, lstm.l1_ih_w, lstm.l1_hh_w, lstm.l1_ih_b, lstm.l1_hh_b); - inpL = ggml_add(ctx0, inpL, out); + inpL = bark_ggml_add(ctx0, inpL, out); } // final conv - inpL = ggml_elu(ctx0, inpL); + inpL = bark_ggml_elu(ctx0, inpL); - struct ggml_tensor *encoded_inp = strided_conv_1d( + struct bark_ggml_tensor *encoded_inp = strided_conv_1d( ctx0, inpL, model.encoder.final_conv_w, model.encoder.final_conv_b, stride); return encoded_inp; } -struct ggml_tensor *encodec_forward_quantizer_encode( +struct bark_ggml_tensor *encodec_forward_quantizer_encode( struct encodec_context *ectx, - struct ggml_context *ctx0, - struct ggml_tensor *encoded_inp) { + struct bark_ggml_context *ctx0, + struct bark_ggml_tensor *encoded_inp) { if (!encoded_inp) { - fprintf(stderr, "%s: null input tensor\n", __func__); + LOGE("%s: null input tensor\n", __func__); return NULL; } @@ -965,63 +965,63 @@ struct ggml_tensor *encodec_forward_quantizer_encode( const int seq_length = encoded_inp->ne[0]; - struct ggml_tensor *codes = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, seq_length, n_q); - ggml_allocr_alloc(allocr, codes); + struct bark_ggml_tensor *codes = bark_ggml_new_tensor_2d(ctx0, BARK_GGML_TYPE_I32, seq_length, n_q); + bark_ggml_allocr_alloc(allocr, codes); - struct ggml_tensor *dist_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - ggml_allocr_alloc(allocr, dist_scale); + struct bark_ggml_tensor *dist_scale = bark_ggml_new_tensor_1d(ctx0, BARK_GGML_TYPE_F32, 1); + bark_ggml_allocr_alloc(allocr, dist_scale); - if (!ggml_allocr_is_measure(allocr)) { + if (!bark_ggml_allocr_is_measure(allocr)) { float s = -2.0f; - ggml_backend_tensor_set(dist_scale, &s, 0, sizeof(s)); + bark_ggml_backend_tensor_set(dist_scale, &s, 0, sizeof(s)); } - struct ggml_tensor *inpL = ggml_cont(ctx0, ggml_transpose(ctx0, encoded_inp)); - struct ggml_tensor *residual = inpL; - struct ggml_tensor *indices; + struct bark_ggml_tensor *inpL = bark_ggml_cont(ctx0, bark_ggml_transpose(ctx0, encoded_inp)); + struct bark_ggml_tensor *residual = inpL; + struct bark_ggml_tensor *indices; for (int i = 0; i < n_q; i++) { encodec_quant_block block = model.quantizer.blocks[i]; // compute distance // [seq_length, n_bins] - struct ggml_tensor *dp = ggml_scale( - ctx0, ggml_mul_mat(ctx0, block.embed, residual), dist_scale); + struct bark_ggml_tensor *dp = bark_ggml_scale( + ctx0, bark_ggml_mul_mat(ctx0, block.embed, residual), dist_scale); // [n_bins] - struct ggml_tensor *sqr_embed = ggml_sqr(ctx0, block.embed); - struct ggml_tensor *sqr_embed_nrm = ggml_sum_rows(ctx0, sqr_embed); + struct bark_ggml_tensor *sqr_embed = bark_ggml_sqr(ctx0, block.embed); + struct bark_ggml_tensor *sqr_embed_nrm = bark_ggml_sum_rows(ctx0, sqr_embed); // [seq_length] - struct ggml_tensor *sqr_inp = ggml_sqr(ctx0, residual); - struct ggml_tensor *sqr_inp_nrm = ggml_sum_rows(ctx0, sqr_inp); + struct bark_ggml_tensor *sqr_inp = bark_ggml_sqr(ctx0, residual); + struct bark_ggml_tensor *sqr_inp_nrm = bark_ggml_sum_rows(ctx0, sqr_inp); // [seq_length, n_bins] - struct ggml_tensor *dist = ggml_add(ctx0, ggml_repeat(ctx0, sqr_inp_nrm, dp), dp); - dist = ggml_add(ctx0, ggml_repeat(ctx0, ggml_transpose(ctx0, sqr_embed_nrm), dist), dist); - dist = ggml_neg(ctx0, dist); + struct bark_ggml_tensor *dist = bark_ggml_add(ctx0, bark_ggml_repeat(ctx0, sqr_inp_nrm, dp), dp); + dist = bark_ggml_add(ctx0, bark_ggml_repeat(ctx0, bark_ggml_transpose(ctx0, sqr_embed_nrm), dist), dist); + dist = bark_ggml_neg(ctx0, dist); // take the argmax over the column dimension // [seq_length] - indices = ggml_argmax(ctx0, dist); + indices = bark_ggml_argmax(ctx0, dist); // look up in embedding table - struct ggml_tensor *quantized = ggml_get_rows(ctx0, block.embed, indices); + struct bark_ggml_tensor *quantized = bark_ggml_get_rows(ctx0, block.embed, indices); - residual = ggml_sub(ctx0, residual, quantized); + residual = bark_ggml_sub(ctx0, residual, quantized); - codes = ggml_set_1d(ctx0, codes, indices, i * codes->nb[1]); + codes = bark_ggml_set_1d(ctx0, codes, indices, i * codes->nb[1]); } return codes; } -struct ggml_tensor *encodec_forward_quantizer_decode( +struct bark_ggml_tensor *encodec_forward_quantizer_decode( struct encodec_context *ectx, - struct ggml_context *ctx0, - struct ggml_tensor *codes) { + struct bark_ggml_context *ctx0, + struct bark_ggml_tensor *codes) { if (!codes) { - fprintf(stderr, "%s: null input tensor\n", __func__); + LOGE("%s: null input tensor\n", __func__); return NULL; } @@ -1042,33 +1042,33 @@ struct ggml_tensor *encodec_forward_quantizer_decode( assert(n_q == codes->ne[1]); - struct ggml_tensor *quantized_out = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, hidden_dim, seq_length); - ggml_allocr_alloc(allocr, quantized_out); + struct bark_ggml_tensor *quantized_out = bark_ggml_new_tensor_2d(ctx0, BARK_GGML_TYPE_F32, hidden_dim, seq_length); + bark_ggml_allocr_alloc(allocr, quantized_out); - if (!ggml_allocr_is_measure(allocr)) { - quantized_out = ggml_set_zero(quantized_out); + if (!bark_ggml_allocr_is_measure(allocr)) { + quantized_out = bark_ggml_set_zero(quantized_out); } for (int i = 0; i < n_q; i++) { encodec_quant_block block = model.quantizer.blocks[i]; - struct ggml_tensor *indices = ggml_view_1d(ctx0, codes, seq_length, i * codes->nb[1]); - struct ggml_tensor *quantized = ggml_get_rows(ctx0, block.embed, indices); + struct bark_ggml_tensor *indices = bark_ggml_view_1d(ctx0, codes, seq_length, i * codes->nb[1]); + struct bark_ggml_tensor *quantized = bark_ggml_get_rows(ctx0, block.embed, indices); - quantized_out = ggml_add(ctx0, quantized_out, quantized); + quantized_out = bark_ggml_add(ctx0, quantized_out, quantized); } - quantized_out = ggml_cont(ctx0, ggml_transpose(ctx0, quantized_out)); + quantized_out = bark_ggml_cont(ctx0, bark_ggml_transpose(ctx0, quantized_out)); return quantized_out; } -struct ggml_tensor *encodec_forward_decoder( +struct bark_ggml_tensor *encodec_forward_decoder( struct encodec_context *ectx, - struct ggml_context *ctx0, - struct ggml_tensor *quantized_out) { + struct bark_ggml_context *ctx0, + struct bark_ggml_tensor *quantized_out) { if (!quantized_out) { - fprintf(stderr, "%s: null input tensor\n", __func__); + LOGE("%s: null input tensor\n", __func__); return NULL; } @@ -1081,71 +1081,71 @@ struct ggml_tensor *encodec_forward_decoder( const int res_kernel_sz = hparams.residual_kernel_size; const int stride = hparams.stride; - struct ggml_tensor *inpL = strided_conv_1d( + struct bark_ggml_tensor *inpL = strided_conv_1d( ctx0, quantized_out, model.decoder.init_conv_w, model.decoder.init_conv_b, stride); // lstm { - struct ggml_tensor *cur = inpL; + struct bark_ggml_tensor *cur = inpL; const encodec_lstm lstm = model.decoder.lstm; // first lstm layer - struct ggml_tensor *hs1 = forward_pass_lstm_unilayer( + struct bark_ggml_tensor *hs1 = forward_pass_lstm_unilayer( ctx0, allocr, cur, lstm.l0_ih_w, lstm.l0_hh_w, lstm.l0_ih_b, lstm.l0_hh_b); // second lstm layer - struct ggml_tensor *out = forward_pass_lstm_unilayer( + struct bark_ggml_tensor *out = forward_pass_lstm_unilayer( ctx0, allocr, hs1, lstm.l1_ih_w, lstm.l1_hh_w, lstm.l1_ih_b, lstm.l1_hh_b); - inpL = ggml_add(ctx0, inpL, out); + inpL = bark_ggml_add(ctx0, inpL, out); } for (int layer_ix = 0; layer_ix < 4; layer_ix++) { encodec_decoder_block block = model.decoder.blocks[layer_ix]; // upsampling layers - inpL = ggml_elu(ctx0, inpL); + inpL = bark_ggml_elu(ctx0, inpL); inpL = strided_conv_transpose_1d( ctx0, inpL, block.us_conv_w, block.us_conv_b, ratios[layer_ix]); - struct ggml_tensor *current = inpL; + struct bark_ggml_tensor *current = inpL; // shortcut - struct ggml_tensor *shortcut = strided_conv_1d( + struct bark_ggml_tensor *shortcut = strided_conv_1d( ctx0, inpL, block.conv_sc_w, block.conv_sc_b, stride); // conv1 - current = ggml_elu(ctx0, current); + current = bark_ggml_elu(ctx0, current); current = strided_conv_1d( ctx0, current, block.conv_1_w, block.conv_1_b, stride); // conv2 - current = ggml_elu(ctx0, current); + current = bark_ggml_elu(ctx0, current); current = strided_conv_1d( ctx0, current, block.conv_2_w, block.conv_2_b, stride); // residual connection - inpL = ggml_add(ctx0, current, shortcut); + inpL = bark_ggml_add(ctx0, current, shortcut); } // final conv - inpL = ggml_elu(ctx0, inpL); + inpL = bark_ggml_elu(ctx0, inpL); - struct ggml_tensor *decoded_inp = strided_conv_1d( + struct bark_ggml_tensor *decoded_inp = strided_conv_1d( ctx0, inpL, model.decoder.final_conv_w, model.decoder.final_conv_b, stride); return decoded_inp; } -struct ggml_cgraph *encodec_build_graph( +struct bark_ggml_cgraph *encodec_build_graph( struct encodec_context *ectx, const float * inp_audio, const int n_samples, @@ -1159,50 +1159,50 @@ struct ggml_cgraph *encodec_build_graph( const int n_q = hparams.n_q; // since we are using ggml-alloc, this buffer only needs enough space to hold the - // ggml_tensor and ggml_cgraph structs, but not the tensor data - static size_t buf_size = ggml_tensor_overhead() * GGML_MAX_NODES + ggml_graph_overhead(); + // bark_ggml_tensor and bark_ggml_cgraph structs, but not the tensor data + static size_t buf_size = bark_ggml_tensor_overhead() * BARK_GGML_MAX_NODES + bark_ggml_graph_overhead(); static std::vector buf(buf_size); - struct ggml_init_params ggml_params = { + struct bark_ggml_init_params bark_ggml_params = { /*.mem_size =*/buf_size, /*.mem_buffer =*/buf.data(), - /*.no_alloc =*/true, // skip allocating as we use ggml_alloc to allocate exact memory requirements + /*.no_alloc =*/true, // skip allocating as we use bark_ggml_alloc to allocate exact memory requirements }; - struct ggml_context *ctx0 = ggml_init(ggml_params); + struct bark_ggml_context *ctx0 = bark_ggml_init(bark_ggml_params); - struct ggml_cgraph *gf = ggml_new_graph(ctx0); + struct bark_ggml_cgraph *gf = bark_ggml_new_graph(ctx0); - struct ggml_tensor *inp = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, n_samples); - ggml_allocr_alloc(allocr, inp); + struct bark_ggml_tensor *inp = bark_ggml_new_tensor_1d(ctx0, BARK_GGML_TYPE_F32, n_samples); + bark_ggml_allocr_alloc(allocr, inp); // avoid writing to tensors if we are only measuring the memory usage - if (!ggml_allocr_is_measure(allocr)) { - ggml_backend_tensor_set(inp, inp_audio, 0, n_samples * ggml_element_size(inp)); + if (!bark_ggml_allocr_is_measure(allocr)) { + bark_ggml_backend_tensor_set(inp, inp_audio, 0, n_samples * bark_ggml_element_size(inp)); } - struct ggml_tensor *encoded = encodec_forward_encoder(ectx, ctx0, inp); - struct ggml_tensor *codes = encodec_forward_quantizer_encode(ectx, ctx0, encoded); - struct ggml_tensor *quantized = encodec_forward_quantizer_decode(ectx, ctx0, codes); - struct ggml_tensor *decoded = encodec_forward_decoder(ectx, ctx0, quantized); + struct bark_ggml_tensor *encoded = encodec_forward_encoder(ectx, ctx0, inp); + struct bark_ggml_tensor *codes = encodec_forward_quantizer_encode(ectx, ctx0, encoded); + struct bark_ggml_tensor *quantized = encodec_forward_quantizer_decode(ectx, ctx0, codes); + struct bark_ggml_tensor *decoded = encodec_forward_decoder(ectx, ctx0, quantized); switch (mode) { case encodec_run_mode::full: { - ggml_build_forward_expand(gf, decoded); + bark_ggml_build_forward_expand(gf, decoded); } break; case encodec_run_mode::encode: { - ggml_build_forward_expand(gf, codes); + bark_ggml_build_forward_expand(gf, codes); } break; case encodec_run_mode::decode: { return NULL; } break; default: { - fprintf(stderr, "%s: unknown run mode\n", __func__); + LOGE("%s: unknown run mode\n", __func__); return NULL; } break; } - ggml_free(ctx0); + bark_ggml_free(ctx0); ectx->encoded = encoded; ectx->codes = codes; @@ -1211,7 +1211,7 @@ struct ggml_cgraph *encodec_build_graph( return gf; } -struct ggml_cgraph *encodec_build_graph( +struct bark_ggml_cgraph *encodec_build_graph( struct encodec_context *ectx, const int32_t * codes, const int n_codes, @@ -1231,49 +1231,49 @@ struct ggml_cgraph *encodec_build_graph( const int n_q = get_num_quantizers_for_bandwidth(n_bins, frame_rate, bandwidth); if (n_codes % n_q != 0) { - fprintf(stderr, "%s: invalid number of codes\n", __func__); + LOGE("%s: invalid number of codes\n", __func__); return NULL; } const int N = n_codes / n_q; // since we are using ggml-alloc, this buffer only needs enough space to hold the - // ggml_tensor and ggml_cgraph structs, but not the tensor data - static size_t buf_size = ggml_tensor_overhead() * GGML_MAX_NODES + ggml_graph_overhead(); + // bark_ggml_tensor and bark_ggml_cgraph structs, but not the tensor data + static size_t buf_size = bark_ggml_tensor_overhead() * BARK_GGML_MAX_NODES + bark_ggml_graph_overhead(); static std::vector buf(buf_size); - struct ggml_init_params ggml_params = { + struct bark_ggml_init_params bark_ggml_params = { /*.mem_size =*/buf_size, /*.mem_buffer =*/buf.data(), /*.no_alloc =*/true, }; - struct ggml_context *ctx0 = ggml_init(ggml_params); + struct bark_ggml_context *ctx0 = bark_ggml_init(bark_ggml_params); - struct ggml_cgraph *gf = ggml_new_graph(ctx0); + struct bark_ggml_cgraph *gf = bark_ggml_new_graph(ctx0); - struct ggml_tensor *inp_codes = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, N, n_q); - ggml_allocr_alloc(allocr, inp_codes); + struct bark_ggml_tensor *inp_codes = bark_ggml_new_tensor_2d(ctx0, BARK_GGML_TYPE_I32, N, n_q); + bark_ggml_allocr_alloc(allocr, inp_codes); // avoid writing to tensors if we are only measuring the memory usage - if (!ggml_allocr_is_measure(allocr)) { - ggml_backend_tensor_set(inp_codes, codes, 0, N * n_q * ggml_element_size(inp_codes)); + if (!bark_ggml_allocr_is_measure(allocr)) { + bark_ggml_backend_tensor_set(inp_codes, codes, 0, N * n_q * bark_ggml_element_size(inp_codes)); } - struct ggml_tensor *quantized = encodec_forward_quantizer_decode(ectx, ctx0, inp_codes); - struct ggml_tensor *decoded = encodec_forward_decoder(ectx, ctx0, quantized); + struct bark_ggml_tensor *quantized = encodec_forward_quantizer_decode(ectx, ctx0, inp_codes); + struct bark_ggml_tensor *decoded = encodec_forward_decoder(ectx, ctx0, quantized); switch (mode) { case encodec_run_mode::decode: { - ggml_build_forward_expand(gf, decoded); + bark_ggml_build_forward_expand(gf, decoded); } break; default: { - fprintf(stderr, "%s: unknown run mode\n", __func__); + LOGE("%s: unknown run mode\n", __func__); return NULL; } break; } - ggml_free(ctx0); + bark_ggml_free(ctx0); ectx->codes = inp_codes; ectx->decoded = decoded; @@ -1291,23 +1291,23 @@ bool encodec_eval_internal( auto &allocr = ectx->allocr; // reset the allocator to free all the memory allocated during the previous inference - ggml_allocr_reset(allocr); + bark_ggml_allocr_reset(allocr); - struct ggml_cgraph *gf = encodec_build_graph(ectx, raw_audio, n_samples, mode); + struct bark_ggml_cgraph *gf = encodec_build_graph(ectx, raw_audio, n_samples, mode); // allocate tensors - ggml_allocr_alloc_graph(allocr, gf); + bark_ggml_allocr_alloc_graph(allocr, gf); // run the computation - if (ggml_backend_is_cpu(model.backend)) { - ggml_backend_cpu_set_n_threads(model.backend, n_threads); + if (bark_ggml_backend_is_cpu(model.backend)) { + bark_ggml_backend_cpu_set_n_threads(model.backend, n_threads); } -#ifdef GGML_USE_METAL - if (ggml_backend_is_metal(model.backend)) { - ggml_backend_metal_set_n_cb(model.backend, n_threads); +#ifdef BARK_GGML_USE_METAL + if (bark_ggml_backend_is_metal(model.backend)) { + bark_ggml_backend_metal_set_n_cb(model.backend, n_threads); } #endif - ggml_backend_graph_compute(model.backend, gf); + bark_ggml_backend_graph_compute(model.backend, gf); return true; } @@ -1322,23 +1322,23 @@ bool encodec_eval_internal( auto &allocr = ectx->allocr; // reset the allocator to free all the memory allocated during the previous inference - ggml_allocr_reset(allocr); + bark_ggml_allocr_reset(allocr); - struct ggml_cgraph *gf = encodec_build_graph(ectx, codes, n_codes, mode); + struct bark_ggml_cgraph *gf = encodec_build_graph(ectx, codes, n_codes, mode); // allocate tensors - ggml_allocr_alloc_graph(allocr, gf); + bark_ggml_allocr_alloc_graph(allocr, gf); // run the computation - if (ggml_backend_is_cpu(model.backend)) { - ggml_backend_cpu_set_n_threads(model.backend, n_threads); + if (bark_ggml_backend_is_cpu(model.backend)) { + bark_ggml_backend_cpu_set_n_threads(model.backend, n_threads); } -#ifdef GGML_USE_METAL - if (ggml_backend_is_metal(model.backend)) { - ggml_backend_metal_set_n_cb(model.backend, n_threads); +#ifdef BARK_GGML_USE_METAL + if (bark_ggml_backend_is_metal(model.backend)) { + bark_ggml_backend_metal_set_n_cb(model.backend, n_threads); } #endif - ggml_backend_graph_compute(model.backend, gf); + bark_ggml_backend_graph_compute(model.backend, gf); return true; } @@ -1349,35 +1349,35 @@ bool encodec_eval( const int n_samples, const int n_threads, const encodec_run_mode mode) { - const int64_t t_start_us = ggml_time_us(); + const int64_t t_start_us = bark_ggml_time_us(); // allocate the compute buffer { // alignment required by the backend - size_t align = ggml_backend_get_alignment(ectx->model.backend); - ectx->allocr = ggml_allocr_new_measure(align); + size_t align = bark_ggml_backend_get_alignment(ectx->model.backend); + ectx->allocr = bark_ggml_allocr_new_measure(align); // create the graph for memory usage estimation - struct ggml_cgraph *gf = encodec_build_graph(ectx, raw_audio, n_samples, mode); + struct bark_ggml_cgraph *gf = encodec_build_graph(ectx, raw_audio, n_samples, mode); // compute the required memory - size_t mem_size = ggml_allocr_alloc_graph(ectx->allocr, gf); + size_t mem_size = bark_ggml_allocr_alloc_graph(ectx->allocr, gf); // recreate the allocator with the required memory - ggml_allocr_free(ectx->allocr); - ectx->buf_compute = ggml_backend_alloc_buffer(ectx->model.backend, mem_size); - ectx->allocr = ggml_allocr_new_from_buffer(ectx->buf_compute); + bark_ggml_allocr_free(ectx->allocr); + ectx->buf_compute = bark_ggml_backend_alloc_buffer(ectx->model.backend, mem_size); + ectx->allocr = bark_ggml_allocr_new_from_buffer(ectx->buf_compute); - fprintf(stderr, "%s: compute buffer size: %.2f MB\n\n", __func__, mem_size / 1024.0 / 1024.0); + LOGE("%s: compute buffer size: %.2f MB\n\n", __func__, mem_size / 1024.0 / 1024.0); } // encodec eval if (!encodec_eval_internal(ectx, raw_audio, n_samples, n_threads, mode)) { - fprintf(stderr, "%s: failed to run encodec eval\n", __func__); + LOGE("%s: failed to run encodec eval\n", __func__); return false; } - ectx->stats.t_compute_us = ggml_time_us() - t_start_us; + ectx->stats.t_compute_us = bark_ggml_time_us() - t_start_us; return true; } @@ -1388,35 +1388,35 @@ bool encodec_eval( const int n_codes, const int n_threads, const encodec_run_mode mode) { - const int64_t t_start_ms = ggml_time_us(); + const int64_t t_start_ms = bark_ggml_time_us(); // allocate the compute buffer { // alignment required by the backend - size_t align = ggml_backend_get_alignment(ectx->model.backend); - ectx->allocr = ggml_allocr_new_measure(align); + size_t align = bark_ggml_backend_get_alignment(ectx->model.backend); + ectx->allocr = bark_ggml_allocr_new_measure(align); // create the graph for memory usage estimation - struct ggml_cgraph *gf = encodec_build_graph(ectx, codes, n_codes, mode); + struct bark_ggml_cgraph *gf = encodec_build_graph(ectx, codes, n_codes, mode); // compute the required memory - size_t mem_size = ggml_allocr_alloc_graph(ectx->allocr, gf); + size_t mem_size = bark_ggml_allocr_alloc_graph(ectx->allocr, gf); // recreate the allocator with the required memory - ggml_allocr_free(ectx->allocr); - ectx->buf_compute = ggml_backend_alloc_buffer(ectx->model.backend, mem_size); - ectx->allocr = ggml_allocr_new_from_buffer(ectx->buf_compute); + bark_ggml_allocr_free(ectx->allocr); + ectx->buf_compute = bark_ggml_backend_alloc_buffer(ectx->model.backend, mem_size); + ectx->allocr = bark_ggml_allocr_new_from_buffer(ectx->buf_compute); - fprintf(stderr, "%s: compute buffer size: %.2f MB\n\n", __func__, mem_size / 1024.0 / 1024.0); + LOGE("%s: compute buffer size: %.2f MB\n\n", __func__, mem_size / 1024.0 / 1024.0); } // encodec eval if (!encodec_eval_internal(ectx, codes, n_codes, n_threads, mode)) { - fprintf(stderr, "%s: failed to run encodec eval\n", __func__); + LOGE("%s: failed to run encodec eval\n", __func__); return false; } - ectx->stats.t_compute_us = ggml_time_us() - t_start_ms; + ectx->stats.t_compute_us = bark_ggml_time_us() - t_start_ms; return true; } @@ -1427,28 +1427,28 @@ bool encodec_reconstruct_audio( const int n_samples, int n_threads) { if (raw_audio == nullptr) { - fprintf(stderr, "%s: null input audio\n", __func__); + LOGE("%s: null input audio\n", __func__); return false; } if (!encodec_eval(ectx, raw_audio, n_samples, n_threads, encodec_run_mode::full)) { - fprintf(stderr, "%s: failed to run encodec eval\n", __func__); + LOGE("%s: failed to run encodec eval\n", __func__); return false; } if (!ectx->decoded) { - fprintf(stderr, "%s: null decoded tensor\n", __func__); + LOGE("%s: null decoded tensor\n", __func__); return false; } - struct ggml_tensor *decoded = ectx->decoded; + struct bark_ggml_tensor *decoded = ectx->decoded; auto &out_audio = ectx->out_audio; int out_length = decoded->ne[0]; out_audio.resize(out_length); - ggml_backend_tensor_get(decoded, out_audio.data(), 0, out_length * ggml_element_size(decoded)); + bark_ggml_backend_tensor_get(decoded, out_audio.data(), 0, out_length * bark_ggml_element_size(decoded)); return true; } @@ -1459,23 +1459,23 @@ bool encodec_compress_audio( const int n_samples, int n_threads) { if (!encodec_eval(ectx, raw_audio, n_samples, n_threads, encodec_run_mode::encode)) { - fprintf(stderr, "%s: failed to run encodec eval\n", __func__); + LOGE("%s: failed to run encodec eval\n", __func__); return false; } if (!ectx->codes) { - fprintf(stderr, "%s: null codes tensor\n", __func__); + LOGE("%s: null codes tensor\n", __func__); return false; } - struct ggml_tensor *codes = ectx->codes; + struct bark_ggml_tensor *codes = ectx->codes; auto &out_codes = ectx->out_codes; int out_length = codes->ne[0] * codes->ne[1]; out_codes.resize(out_length); - ggml_backend_tensor_get(codes, out_codes.data(), 0, out_length * ggml_element_size(codes)); + bark_ggml_backend_tensor_get(codes, out_codes.data(), 0, out_length * bark_ggml_element_size(codes)); return true; } @@ -1486,23 +1486,23 @@ bool encodec_decompress_audio( const int n_codes, int n_threads) { if (!encodec_eval(ectx, codes, n_codes, n_threads, encodec_run_mode::decode)) { - fprintf(stderr, "%s: failed to run encodec eval\n", __func__); + LOGE("%s: failed to run encodec eval\n", __func__); return false; } if (!ectx->decoded) { - fprintf(stderr, "%s: null decoded tensor\n", __func__); + LOGE("%s: null decoded tensor\n", __func__); return false; } - struct ggml_tensor *decoded = ectx->decoded; + struct bark_ggml_tensor *decoded = ectx->decoded; auto &out_audio = ectx->out_audio; int out_length = decoded->ne[0]; out_audio.resize(out_length); - ggml_backend_tensor_get(decoded, out_audio.data(), 0, out_length * ggml_element_size(decoded)); + bark_ggml_backend_tensor_get(decoded, out_audio.data(), 0, out_length * bark_ggml_element_size(decoded)); return true; } @@ -1515,11 +1515,11 @@ bool encodec_decompress_audio( // Note that we used to have an encodec_load_model taking a reference to a file stream // but it was removed to comply the C-header requirements. struct encodec_context *encodec_load_model(const char* model_path, const int offset, int n_gpu_layers) { - int64_t t_start_load_us = ggml_time_us(); + int64_t t_start_load_us = bark_ggml_time_us(); auto infile = std::ifstream(model_path, std::ios::binary); if (!infile) { - fprintf(stderr, "%s: failed to open '%s'\n", __func__, model_path); + LOGE("%s: failed to open '%s'\n", __func__, model_path); return nullptr; } @@ -1531,7 +1531,7 @@ struct encodec_context *encodec_load_model(const char* model_path, const int off ectx->model = encodec_model(); if (!encodec_load_model_weights(infile, ectx->model, n_gpu_layers)) { - fprintf(stderr, "%s: failed to load model weights from '%s'\n", __func__, model_path); + LOGE("%s: failed to load model weights from '%s'\n", __func__, model_path); return {}; } @@ -1546,9 +1546,9 @@ struct encodec_context *encodec_load_model(const char* model_path, const int off ectx->model.hparams.hop_length = hop_length; ectx->model.hparams.n_q = get_num_codebooks(bandwidth, hop_length, sr); - fprintf(stderr, "%s: n_q = %d\n", __func__, ectx->model.hparams.n_q); + LOGE("%s: n_q = %d\n", __func__, ectx->model.hparams.n_q); - ectx->stats.t_load_us = ggml_time_us() - t_start_load_us; + ectx->stats.t_load_us = bark_ggml_time_us() - t_start_load_us; return ectx; } @@ -1559,15 +1559,15 @@ void encodec_free(struct encodec_context *ectx) { } if (ectx->model.ctx) { - ggml_free(ectx->model.ctx); + bark_ggml_free(ectx->model.ctx); } if (ectx->buf_compute) { - ggml_backend_buffer_free(ectx->buf_compute); + bark_ggml_backend_buffer_free(ectx->buf_compute); } - ggml_backend_buffer_free(ectx->model.buffer_w); - ggml_backend_free(ectx->model.backend); + bark_ggml_backend_buffer_free(ectx->model.buffer_w); + bark_ggml_backend_free(ectx->model.backend); delete ectx; } @@ -1582,7 +1582,7 @@ void encodec_set_sample_rate(struct encodec_context *ectx, int sample_rate) { const struct encodec_statistics* encodec_get_statistics(struct encodec_context *ectx) { if (!ectx) { - fprintf(stderr, "%s: null context\n", __func__); + LOGE("%s: null context\n", __func__); return nullptr; } return &ectx->stats; @@ -1590,7 +1590,7 @@ const struct encodec_statistics* encodec_get_statistics(struct encodec_context * void encodec_reset_statistics(struct encodec_context *ectx) { if (!ectx) { - fprintf(stderr, "%s: null context\n", __func__); + LOGE("%s: null context\n", __func__); return; } memset(&ectx->stats, 0, sizeof(ectx->stats)); @@ -1598,7 +1598,7 @@ void encodec_reset_statistics(struct encodec_context *ectx) { float * encodec_get_audio(struct encodec_context *ectx) { if (!ectx) { - fprintf(stderr, "%s: null context\n", __func__); + LOGE("%s: null context\n", __func__); return nullptr; } return ectx->out_audio.data(); @@ -1606,7 +1606,7 @@ float * encodec_get_audio(struct encodec_context *ectx) { int encodec_get_audio_size(struct encodec_context *ectx) { if (!ectx) { - fprintf(stderr, "%s: null context\n", __func__); + LOGE("%s: null context\n", __func__); return 0; } return ectx->out_audio.size(); @@ -1614,7 +1614,7 @@ int encodec_get_audio_size(struct encodec_context *ectx) { int32_t * encodec_get_codes(struct encodec_context *ectx) { if (!ectx) { - fprintf(stderr, "%s: null context\n", __func__); + LOGE("%s: null context\n", __func__); return nullptr; } return ectx->out_codes.data(); @@ -1622,7 +1622,7 @@ int32_t * encodec_get_codes(struct encodec_context *ectx) { int encodec_get_codes_size(struct encodec_context *ectx) { if (!ectx) { - fprintf(stderr, "%s: null context\n", __func__); + LOGE("%s: null context\n", __func__); return 0; } return ectx->out_codes.size(); diff --git a/cpp/encodec.h b/cpp/encodec.h index 0a8d8b9..b7593fe 100644 --- a/cpp/encodec.h +++ b/cpp/encodec.h @@ -23,6 +23,7 @@ * */ #pragma once +#include "log.h" #include "ggml-alloc.h" #include "ggml-backend.h" diff --git a/cpp/ggml-alloc.c b/cpp/ggml-alloc.c index 34eba3f..f9f8f29 100644 --- a/cpp/ggml-alloc.c +++ b/cpp/ggml-alloc.c @@ -10,24 +10,24 @@ #define UNUSED(x) (void)(x) #define MAX(a, b) ((a) > (b) ? (a) : (b)) -#define GGML_MAX_CONCUR (2*GGML_MAX_NODES) +#define BARK_GGML_MAX_CONCUR (2*BARK_GGML_MAX_NODES) -//#define GGML_ALLOCATOR_DEBUG +//#define BARK_GGML_ALLOCATOR_DEBUG //#define AT_PRINTF printf #define AT_PRINTF(...) ((void)0) struct hash_node { - struct ggml_tensor * t; + struct bark_ggml_tensor * t; int n_children; int n_views; }; static size_t hash(void * p) { - return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE; + return (size_t)p % BARK_GGML_GRAPH_HASHTABLE_SIZE; } -static struct hash_node * hash_get(struct hash_node hash_table[], struct ggml_tensor * t) { +static struct hash_node * hash_get(struct hash_node hash_table[], struct bark_ggml_tensor * t) { size_t h = hash(t); // linear probing @@ -36,10 +36,10 @@ static struct hash_node * hash_get(struct hash_node hash_table[], struct ggml_te if (hash_table[i].t == t) { return &hash_table[i]; } - i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE; + i = (i + 1) % BARK_GGML_GRAPH_HASHTABLE_SIZE; if (i == h) { // hash table is full - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } } @@ -47,7 +47,7 @@ static struct hash_node * hash_get(struct hash_node hash_table[], struct ggml_te return &hash_table[i]; } -// TODO: GGML_PAD ? +// TODO: BARK_GGML_PAD ? static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) { assert(alignment && !(alignment & (alignment - 1))); // power of 2 size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment; @@ -61,35 +61,35 @@ struct free_block { #define MAX_FREE_BLOCKS 256 -struct ggml_allocr { - struct ggml_backend_buffer * buffer; +struct bark_ggml_allocr { + struct bark_ggml_backend_buffer * buffer; bool buffer_owned; void * data; size_t alignment; int n_free_blocks; struct free_block free_blocks[MAX_FREE_BLOCKS]; - struct hash_node hash_table[GGML_GRAPH_HASHTABLE_SIZE]; + struct hash_node hash_table[BARK_GGML_GRAPH_HASHTABLE_SIZE]; size_t max_size; bool measure; - int parse_seq[GGML_MAX_CONCUR]; + int parse_seq[BARK_GGML_MAX_CONCUR]; int parse_seq_len; -#ifdef GGML_ALLOCATOR_DEBUG - struct ggml_tensor * allocated_tensors[1024]; +#ifdef BARK_GGML_ALLOCATOR_DEBUG + struct bark_ggml_tensor * allocated_tensors[1024]; #endif }; -#ifdef GGML_ALLOCATOR_DEBUG -static void add_allocated_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) { +#ifdef BARK_GGML_ALLOCATOR_DEBUG +static void add_allocated_tensor(struct bark_ggml_allocr * alloc, struct bark_ggml_tensor * tensor) { for (int i = 0; i < 1024; i++) { if (alloc->allocated_tensors[i] == NULL) { alloc->allocated_tensors[i] = tensor; return; } } - GGML_ASSERT(!"out of allocated_tensors"); + BARK_GGML_ASSERT(!"out of allocated_tensors"); } -static void remove_allocated_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) { +static void remove_allocated_tensor(struct bark_ggml_allocr * alloc, struct bark_ggml_tensor * tensor) { for (int i = 0; i < 1024; i++) { if (alloc->allocated_tensors[i] == tensor || (alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) { @@ -98,24 +98,24 @@ static void remove_allocated_tensor(struct ggml_allocr * alloc, struct ggml_tens } } printf("tried to free tensor %s not found\n", tensor->name); - GGML_ASSERT(!"tensor not found"); + BARK_GGML_ASSERT(!"tensor not found"); } #endif // check if a tensor is allocated by this buffer -static bool ggml_allocr_is_own(struct ggml_allocr * alloc, const struct ggml_tensor * tensor) { +static bool bark_ggml_allocr_is_own(struct bark_ggml_allocr * alloc, const struct bark_ggml_tensor * tensor) { return tensor->buffer == alloc->buffer; } -static bool ggml_is_view(struct ggml_tensor * t) { +static bool bark_ggml_is_view(struct bark_ggml_tensor * t) { return t->view_src != NULL; } -void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) { - GGML_ASSERT(!ggml_is_view(tensor)); // views generally get data pointer from one of their sources - GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated +void bark_ggml_allocr_alloc(struct bark_ggml_allocr * alloc, struct bark_ggml_tensor * tensor) { + BARK_GGML_ASSERT(!bark_ggml_is_view(tensor)); // views generally get data pointer from one of their sources + BARK_GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated - size_t size = ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor); + size_t size = bark_ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor); size = aligned_offset(NULL, size, alloc->alignment); AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size); @@ -145,7 +145,7 @@ void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) } else { fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n", __func__, size, max_avail); - GGML_ASSERT(!"not enough space in the buffer"); + BARK_GGML_ASSERT(!"not enough space in the buffer"); return; } } @@ -164,16 +164,16 @@ void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) tensor->data = addr; AT_PRINTF("%s: allocated data at %p\n", __func__, tensor->data); tensor->buffer = alloc->buffer; - ggml_backend_buffer_init_tensor(alloc->buffer, tensor); + bark_ggml_backend_buffer_init_tensor(alloc->buffer, tensor); -#ifdef GGML_ALLOCATOR_DEBUG +#ifdef BARK_GGML_ALLOCATOR_DEBUG add_allocated_tensor(alloc, tensor); size_t cur_max = (char*)addr - (char*)alloc->data + size; if (cur_max > alloc->max_size) { printf("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0); for (int i = 0; i < 1024; i++) { if (alloc->allocated_tensors[i]) { - printf("%s (%.2f MB) ", alloc->allocated_tensors[i]->name, ggml_nbytes(alloc->allocated_tensors[i]) / 1024.0 / 1024.0); + printf("%s (%.2f MB) ", alloc->allocated_tensors[i]->name, bark_ggml_nbytes(alloc->allocated_tensors[i]) / 1024.0 / 1024.0); } } printf("\n"); @@ -184,8 +184,8 @@ void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) } // this is a very naive implementation, but for our case the number of free blocks should be very small -static void ggml_allocr_free_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) { - if (ggml_allocr_is_own(alloc, tensor) == false) { +static void bark_ggml_allocr_free_tensor(struct bark_ggml_allocr * alloc, struct bark_ggml_tensor * tensor) { + if (bark_ggml_allocr_is_own(alloc, tensor) == false) { // the tensor was not allocated in this buffer // this can happen because the graph allocator will try to free weights and other tensors from different buffers // the easiest way to deal with this is just to ignore it @@ -195,13 +195,13 @@ static void ggml_allocr_free_tensor(struct ggml_allocr * alloc, struct ggml_tens void * ptr = tensor->data; - size_t size = ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor); + size_t size = bark_ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor); size = aligned_offset(NULL, size, alloc->alignment); AT_PRINTF("%s: freeing %s at %p (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, ptr, size, alloc->n_free_blocks); - ggml_backend_buffer_free_tensor(alloc->buffer, tensor); + bark_ggml_backend_buffer_free_tensor(alloc->buffer, tensor); -#ifdef GGML_ALLOCATOR_DEBUG +#ifdef BARK_GGML_ALLOCATOR_DEBUG remove_allocated_tensor(alloc, tensor); #endif @@ -237,7 +237,7 @@ static void ggml_allocr_free_tensor(struct ggml_allocr * alloc, struct ggml_tens } } // otherwise, add a new block - GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks"); + BARK_GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks"); // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster) int insert_pos = 0; while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].addr < ptr) { @@ -253,97 +253,93 @@ static void ggml_allocr_free_tensor(struct ggml_allocr * alloc, struct ggml_tens alloc->n_free_blocks++; } -void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, const int * list, int n) { +void bark_ggml_allocr_set_parse_seq(struct bark_ggml_allocr * alloc, const int * list, int n) { for (int i = 0; i < n; i++) { alloc->parse_seq[i] = list[i]; } alloc->parse_seq_len = n; } -void ggml_allocr_reset(struct ggml_allocr * alloc) { +void bark_ggml_allocr_reset(struct bark_ggml_allocr * alloc) { alloc->n_free_blocks = 1; size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment); alloc->free_blocks[0].addr = (char *)alloc->data + align_offset; - alloc->free_blocks[0].size = ggml_backend_buffer_get_size(alloc->buffer) - align_offset; + alloc->free_blocks[0].size = bark_ggml_backend_buffer_get_size(alloc->buffer) - align_offset; } -struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment) { - struct ggml_backend_buffer * buffer = ggml_backend_cpu_buffer_from_ptr(NULL, data, size); - - struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr)); - - *alloc = (struct ggml_allocr){ - /*.buffer = */ buffer, - /*.buffer_owned = */ true, - /*.base = */ ggml_backend_buffer_get_base(buffer), - /*.alignment = */ alignment, - /*.n_free_blocks = */ 0, - /*.free_blocks = */ {{0}}, - /*.hash_table = */ {{0}}, - /*.max_size = */ 0, - /*.measure = */ false, - /*.parse_seq = */ {0}, - /*.parse_seq_len = */ 0, -#ifdef GGML_ALLOCATOR_DEBUG - /*.allocated_tensors = */ {0}, +struct bark_ggml_allocr * bark_ggml_allocr_new(void * data, size_t size, size_t alignment) { + struct bark_ggml_backend_buffer * buffer = bark_ggml_backend_cpu_buffer_from_ptr(NULL, data, size); + + struct bark_ggml_allocr * alloc = (struct bark_ggml_allocr *)malloc(sizeof(struct bark_ggml_allocr)); + + alloc->buffer = buffer; + alloc->buffer_owned = true; + alloc->data = bark_ggml_backend_buffer_get_base(buffer); + alloc->alignment = alignment; + alloc->n_free_blocks = 0; + memset(alloc->free_blocks, 0, sizeof(alloc->free_blocks)); + memset(alloc->hash_table, 0, sizeof(alloc->hash_table)); + alloc->max_size = 0; + alloc->measure = false; + memset(alloc->parse_seq, 0, sizeof(alloc->parse_seq)); + alloc->parse_seq_len = 0; +#ifdef BARK_GGML_ALLOCATOR_DEBUG + memset(alloc->allocated_tensors, 0, sizeof(alloc->allocated_tensors)); #endif - }; - ggml_allocr_reset(alloc); + bark_ggml_allocr_reset(alloc); return alloc; } -struct ggml_allocr * ggml_allocr_new_measure(size_t alignment) { - struct ggml_allocr * alloc = ggml_allocr_new((void *)0x1000, (size_t)-0x1001, alignment); +struct bark_ggml_allocr * bark_ggml_allocr_new_measure(size_t alignment) { + struct bark_ggml_allocr * alloc = bark_ggml_allocr_new((void *)0x1000, (size_t)-0x1001, alignment); alloc->measure = true; return alloc; } -struct ggml_allocr * ggml_allocr_new_from_buffer(struct ggml_backend_buffer * buffer) { - struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr)); - - *alloc = (struct ggml_allocr){ - /*.buffer = */ buffer, - /*.buffer_owned = */ false, - /*.base = */ ggml_backend_buffer_get_base(buffer), - /*.alignment = */ ggml_backend_buffer_get_alignment(buffer), - /*.n_free_blocks = */ 0, - /*.free_blocks = */ {{0}}, - /*.hash_table = */ {{0}}, - /*.max_size = */ 0, - /*.measure = */ false, - /*.parse_seq = */ {0}, - /*.parse_seq_len = */ 0, -#ifdef GGML_ALLOCATOR_DEBUG - /*.allocated_tensors = */ {0}, +struct bark_ggml_allocr * bark_ggml_allocr_new_from_buffer(struct bark_ggml_backend_buffer * buffer) { + struct bark_ggml_allocr * alloc = (struct bark_ggml_allocr *)malloc(sizeof(struct bark_ggml_allocr)); + + alloc->buffer = buffer; + alloc->buffer_owned = false; + alloc->data = bark_ggml_backend_buffer_get_base(buffer); + alloc->alignment = bark_ggml_backend_buffer_get_alignment(buffer); + alloc->n_free_blocks = 0; + memset(alloc->free_blocks, 0, sizeof(alloc->free_blocks)); + memset(alloc->hash_table, 0, sizeof(alloc->hash_table)); + alloc->max_size = 0; + alloc->measure = false; + memset(alloc->parse_seq, 0, sizeof(alloc->parse_seq)); + alloc->parse_seq_len = 0; +#ifdef BARK_GGML_ALLOCATOR_DEBUG + memset(alloc->allocated_tensors, 0, sizeof(alloc->allocated_tensors)); #endif - }; - ggml_allocr_reset(alloc); + bark_ggml_allocr_reset(alloc); return alloc; } -void ggml_allocr_free(struct ggml_allocr * alloc) { +void bark_ggml_allocr_free(struct bark_ggml_allocr * alloc) { if (alloc->buffer_owned) { - ggml_backend_buffer_free(alloc->buffer); + bark_ggml_backend_buffer_free(alloc->buffer); } free(alloc); } -bool ggml_allocr_is_measure(struct ggml_allocr * alloc) { +bool bark_ggml_allocr_is_measure(struct bark_ggml_allocr * alloc) { return alloc->measure; } //////////// compute graph allocator -static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) { +static bool bark_ggml_are_same_layout(const struct bark_ggml_tensor * a, const struct bark_ggml_tensor * b) { if (a->type != b->type) { return false; } - for (int i = 0; i < GGML_MAX_DIMS; i++) { + for (int i = 0; i < BARK_GGML_MAX_DIMS; i++) { if (a->ne[i] != b->ne[i]) { return false; } @@ -354,23 +350,23 @@ static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml return true; } -static bool ggml_op_can_inplace(enum ggml_op op) { +static bool bark_ggml_op_can_inplace(enum bark_ggml_op op) { switch (op) { - case GGML_OP_SCALE: - case GGML_OP_DIAG_MASK_ZERO: - case GGML_OP_DIAG_MASK_INF: - case GGML_OP_ADD: - case GGML_OP_ADD1: - case GGML_OP_SUB: - case GGML_OP_MUL: - case GGML_OP_DIV: - case GGML_OP_SQR: - case GGML_OP_SQRT: - case GGML_OP_LOG: - case GGML_OP_UNARY: - case GGML_OP_ROPE: - case GGML_OP_RMS_NORM: - case GGML_OP_SOFT_MAX: + case BARK_GGML_OP_SCALE: + case BARK_GGML_OP_DIAG_MASK_ZERO: + case BARK_GGML_OP_DIAG_MASK_INF: + case BARK_GGML_OP_ADD: + case BARK_GGML_OP_ADD1: + case BARK_GGML_OP_SUB: + case BARK_GGML_OP_MUL: + case BARK_GGML_OP_DIV: + case BARK_GGML_OP_SQR: + case BARK_GGML_OP_SQRT: + case BARK_GGML_OP_LOG: + case BARK_GGML_OP_UNARY: + case BARK_GGML_OP_ROPE: + case BARK_GGML_OP_RMS_NORM: + case BARK_GGML_OP_SOFT_MAX: return true; default: @@ -378,42 +374,42 @@ static bool ggml_op_can_inplace(enum ggml_op op) { } } -static void init_view(struct ggml_allocr * alloc, struct ggml_tensor * view) { +static void init_view(struct bark_ggml_allocr * alloc, struct bark_ggml_tensor * view) { assert(view->view_src != NULL && view->view_src->data != NULL); view->backend = view->view_src->backend; view->buffer = view->view_src->buffer; view->data = (char *)view->view_src->data + view->view_offs; // FIXME: the view should be initialized by the owning buffer, but currently this breaks the CUDA backend - // due to the ggml_tensor_extra_gpu ring buffer overwriting the KV cache extras - assert(ggml_allocr_is_measure(alloc) || !view->buffer || view->buffer->backend == alloc->buffer->backend); - ggml_backend_buffer_init_tensor(alloc->buffer, view); + // due to the bark_ggml_tensor_extra_gpu ring buffer overwriting the KV cache extras + assert(bark_ggml_allocr_is_measure(alloc) || !view->buffer || view->buffer->backend == alloc->buffer->backend); + bark_ggml_backend_buffer_init_tensor(alloc->buffer, view); } -static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node) { +static void allocate_node(struct bark_ggml_allocr * alloc, struct bark_ggml_tensor * node) { struct hash_node * ht = alloc->hash_table; if (node->data == NULL) { - if (ggml_is_view(node)) { + if (bark_ggml_is_view(node)) { init_view(alloc, node); } else { // see if we can reuse a parent's buffer (inplace) - if (ggml_op_can_inplace(node->op)) { - for (int i = 0; i < GGML_MAX_SRC; i++) { - struct ggml_tensor * parent = node->src[i]; + if (bark_ggml_op_can_inplace(node->op)) { + for (int i = 0; i < BARK_GGML_MAX_SRC; i++) { + struct bark_ggml_tensor * parent = node->src[i]; if (parent == NULL) { break; } // if the node's data is external, then we cannot re-use it - if (ggml_allocr_is_own(alloc, parent) == false) { + if (bark_ggml_allocr_is_own(alloc, parent) == false) { AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data); continue; } struct hash_node * p_hn = hash_get(ht, parent); - if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && ggml_are_same_layout(node, parent)) { - if (ggml_is_view(parent)) { - struct ggml_tensor * view_src = parent->view_src; + if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && bark_ggml_are_same_layout(node, parent)) { + if (bark_ggml_is_view(parent)) { + struct bark_ggml_tensor * view_src = parent->view_src; struct hash_node * view_src_hn = hash_get(ht, view_src); if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) { // TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite @@ -438,28 +434,28 @@ static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node) } } } - ggml_allocr_alloc(alloc, node); + bark_ggml_allocr_alloc(alloc, node); } } } -size_t ggml_allocr_alloc_graph_n( - struct ggml_allocr * alloc, - struct ggml_cgraph ** graphs, int n_graphs, - struct ggml_tensor *** inputs, struct ggml_tensor *** outputs) { +size_t bark_ggml_allocr_alloc_graph_n( + struct bark_ggml_allocr * alloc, + struct bark_ggml_cgraph ** graphs, int n_graphs, + struct bark_ggml_tensor *** inputs, struct bark_ggml_tensor *** outputs) { // reset hash table struct hash_node * ht = alloc->hash_table; - memset(ht, 0, sizeof(struct hash_node) * GGML_GRAPH_HASHTABLE_SIZE); + memset(ht, 0, sizeof(struct hash_node) * BARK_GGML_GRAPH_HASHTABLE_SIZE); // count number of children and views for (int g = 0; g < n_graphs; g++) { - struct ggml_cgraph * gf = graphs[g]; + struct bark_ggml_cgraph * gf = graphs[g]; for (int i = 0; i < gf->n_nodes; i++) { - struct ggml_tensor * node = gf->nodes[i]; + struct bark_ggml_tensor * node = gf->nodes[i]; - if (ggml_is_view(node)) { - struct ggml_tensor * view_src = node->view_src; + if (bark_ggml_is_view(node)) { + struct bark_ggml_tensor * view_src = node->view_src; hash_get(ht, view_src)->n_views += 1; if (node->buffer == NULL && node->data != NULL) { // view of a pre-allocated tensor, didn't call init_view() yet @@ -467,13 +463,13 @@ size_t ggml_allocr_alloc_graph_n( } } - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * parent = node->src[j]; + for (int j = 0; j < BARK_GGML_MAX_SRC; j++) { + struct bark_ggml_tensor * parent = node->src[j]; if (parent == NULL) { break; } hash_get(ht, parent)->n_children += 1; - if (ggml_is_view(parent) && parent->buffer == NULL && parent->data != NULL) { + if (bark_ggml_is_view(parent) && parent->buffer == NULL && parent->data != NULL) { init_view(alloc, parent); } } @@ -482,12 +478,12 @@ size_t ggml_allocr_alloc_graph_n( // allocate tensors for (int g = 0; g < n_graphs; g++) { - struct ggml_cgraph * gf = graphs[g]; + struct bark_ggml_cgraph * gf = graphs[g]; AT_PRINTF("####### graph %d/%d\n", g, n_graphs); // graph inputs are allocated first to ensure that they are not overwritten by each other if (inputs != NULL && inputs[g] != NULL) { for (int i = 0; inputs[g][i] != NULL; i++) { - struct ggml_tensor * input = inputs[g][i]; + struct bark_ggml_tensor * input = inputs[g][i]; AT_PRINTF("input: %s\n", input->name); allocate_node(alloc, input); } @@ -500,11 +496,11 @@ size_t ggml_allocr_alloc_graph_n( // allocate a node if there is no parse_seq or this is not a barrier if ((alloc->parse_seq_len==0) || alloc->parse_seq[ind] != -1) { int i = alloc->parse_seq_len ? alloc->parse_seq[ind] : ind; - struct ggml_tensor * node = gf->nodes[i]; + struct bark_ggml_tensor * node = gf->nodes[i]; // allocate parents (leafs) - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * parent = node->src[j]; + for (int j = 0; j < BARK_GGML_MAX_SRC; j++) { + struct bark_ggml_tensor * parent = node->src[j]; if (parent == NULL) { break; } @@ -514,14 +510,14 @@ size_t ggml_allocr_alloc_graph_n( // allocate node allocate_node(alloc, node); - AT_PRINTF("exec: %s (%s) <= ", ggml_op_name(node->op), node->name); - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * parent = node->src[j]; + AT_PRINTF("exec: %s (%s) <= ", bark_ggml_op_name(node->op), node->name); + for (int j = 0; j < BARK_GGML_MAX_SRC; j++) { + struct bark_ggml_tensor * parent = node->src[j]; if (parent == NULL) { break; } AT_PRINTF("%s", parent->name); - if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) { + if (j < BARK_GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) { AT_PRINTF(", "); } } @@ -536,10 +532,10 @@ size_t ggml_allocr_alloc_graph_n( int update_end = alloc->parse_seq_len ? ind : ind + 1; for (int i = update_start; i < update_end; i++) { int node_i = alloc->parse_seq_len ? alloc->parse_seq[i] : i; - struct ggml_tensor * node = gf->nodes[node_i]; + struct bark_ggml_tensor * node = gf->nodes[node_i]; - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * parent = node->src[j]; + for (int j = 0; j < BARK_GGML_MAX_SRC; j++) { + struct bark_ggml_tensor * parent = node->src[j]; if (parent == NULL) { break; } @@ -549,18 +545,18 @@ size_t ggml_allocr_alloc_graph_n( //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views); if (p_hn->n_children == 0 && p_hn->n_views == 0) { - if (ggml_is_view(parent)) { - struct ggml_tensor * view_src = parent->view_src; + if (bark_ggml_is_view(parent)) { + struct bark_ggml_tensor * view_src = parent->view_src; struct hash_node * view_src_hn = hash_get(ht, view_src); view_src_hn->n_views -= 1; AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views); if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) { - ggml_allocr_free_tensor(alloc, view_src); + bark_ggml_allocr_free_tensor(alloc, view_src); } } else { if (parent->data != node->data) { - ggml_allocr_free_tensor(alloc, parent); + bark_ggml_allocr_free_tensor(alloc, parent); } } } @@ -575,9 +571,9 @@ size_t ggml_allocr_alloc_graph_n( // free graph outputs here that wouldn't be freed otherwise because they have no children if (outputs != NULL && outputs[g] != NULL) { for (int i = 0; outputs[g][i] != NULL; i++) { - struct ggml_tensor * output = outputs[g][i]; + struct bark_ggml_tensor * output = outputs[g][i]; AT_PRINTF("output: %s\n", output->name); - ggml_allocr_free_tensor(alloc, output); + bark_ggml_allocr_free_tensor(alloc, output); } } } @@ -585,10 +581,10 @@ size_t ggml_allocr_alloc_graph_n( return alloc->max_size; } -size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph) { - return ggml_allocr_alloc_graph_n(alloc, &graph, 1, NULL, NULL); +size_t bark_ggml_allocr_alloc_graph(struct bark_ggml_allocr * alloc, struct bark_ggml_cgraph * graph) { + return bark_ggml_allocr_alloc_graph_n(alloc, &graph, 1, NULL, NULL); } -size_t ggml_allocr_max_size(struct ggml_allocr * alloc) { +size_t bark_ggml_allocr_max_size(struct bark_ggml_allocr * alloc) { return alloc->max_size; } diff --git a/cpp/ggml-alloc.h b/cpp/ggml-alloc.h index e387588..84a9c64 100644 --- a/cpp/ggml-alloc.h +++ b/cpp/ggml-alloc.h @@ -6,27 +6,27 @@ extern "C" { #endif -struct ggml_backend_buffer; +struct bark_ggml_backend_buffer; -GGML_API struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment); -GGML_API struct ggml_allocr * ggml_allocr_new_measure(size_t alignment); -GGML_API struct ggml_allocr * ggml_allocr_new_from_buffer(struct ggml_backend_buffer * buffer); +BARK_GGML_API struct bark_ggml_allocr * bark_ggml_allocr_new(void * data, size_t size, size_t alignment); +BARK_GGML_API struct bark_ggml_allocr * bark_ggml_allocr_new_measure(size_t alignment); +BARK_GGML_API struct bark_ggml_allocr * bark_ggml_allocr_new_from_buffer(struct bark_ggml_backend_buffer * buffer); // tell the allocator to parse nodes following the order described in the list // you should call this if your graph are optimized to execute out-of-order -GGML_API void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, const int * list, int n); - -GGML_API void ggml_allocr_free (struct ggml_allocr * alloc); -GGML_API bool ggml_allocr_is_measure (struct ggml_allocr * alloc); -GGML_API void ggml_allocr_reset (struct ggml_allocr * alloc); -GGML_API void ggml_allocr_alloc (struct ggml_allocr * alloc, struct ggml_tensor * tensor); -GGML_API size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph); -GGML_API size_t ggml_allocr_max_size (struct ggml_allocr * alloc); - -GGML_API size_t ggml_allocr_alloc_graph_n( - struct ggml_allocr * alloc, - struct ggml_cgraph ** graphs, int n_graphs, - struct ggml_tensor *** inputs, struct ggml_tensor *** outputs); +BARK_GGML_API void bark_ggml_allocr_set_parse_seq(struct bark_ggml_allocr * alloc, const int * list, int n); + +BARK_GGML_API void bark_ggml_allocr_free (struct bark_ggml_allocr * alloc); +BARK_GGML_API bool bark_ggml_allocr_is_measure (struct bark_ggml_allocr * alloc); +BARK_GGML_API void bark_ggml_allocr_reset (struct bark_ggml_allocr * alloc); +BARK_GGML_API void bark_ggml_allocr_alloc (struct bark_ggml_allocr * alloc, struct bark_ggml_tensor * tensor); +BARK_GGML_API size_t bark_ggml_allocr_alloc_graph(struct bark_ggml_allocr * alloc, struct bark_ggml_cgraph * graph); +BARK_GGML_API size_t bark_ggml_allocr_max_size (struct bark_ggml_allocr * alloc); + +BARK_GGML_API size_t bark_ggml_allocr_alloc_graph_n( + struct bark_ggml_allocr * alloc, + struct bark_ggml_cgraph ** graphs, int n_graphs, + struct bark_ggml_tensor *** inputs, struct bark_ggml_tensor *** outputs); #ifdef __cplusplus } diff --git a/cpp/ggml-backend.c b/cpp/ggml-backend.c index ca8d83d..e5bab04 100644 --- a/cpp/ggml-backend.c +++ b/cpp/ggml-backend.c @@ -7,22 +7,22 @@ #include #include -#define UNUSED GGML_UNUSED +#define UNUSED BARK_GGML_UNUSED #define MAX(a, b) ((a) > (b) ? (a) : (b)) // backend buffer -ggml_backend_buffer_t ggml_backend_buffer_init( - struct ggml_backend * backend, - struct ggml_backend_buffer_i iface, - ggml_backend_buffer_context_t context, +bark_ggml_backend_buffer_t bark_ggml_backend_buffer_init( + struct bark_ggml_backend * backend, + struct bark_ggml_backend_buffer_i iface, + bark_ggml_backend_buffer_context_t context, size_t size) { - ggml_backend_buffer_t buffer = malloc(sizeof(struct ggml_backend_buffer)); + bark_ggml_backend_buffer_t buffer = malloc(sizeof(struct bark_ggml_backend_buffer)); - GGML_ASSERT(iface.get_base != NULL); + BARK_GGML_ASSERT(iface.get_base != NULL); - (*buffer) = (struct ggml_backend_buffer) { + (*buffer) = (struct bark_ggml_backend_buffer) { /* .interface = */ iface, /* .backend = */ backend, /* .context = */ context, @@ -32,39 +32,39 @@ ggml_backend_buffer_t ggml_backend_buffer_init( return buffer; } -void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { +void bark_ggml_backend_buffer_free(bark_ggml_backend_buffer_t buffer) { if (buffer->iface.free_buffer != NULL) { buffer->iface.free_buffer(buffer); } free(buffer); } -size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) { - return ggml_backend_get_alignment(buffer->backend); +size_t bark_ggml_backend_buffer_get_alignment(bark_ggml_backend_buffer_t buffer) { + return bark_ggml_backend_get_alignment(buffer->backend); } -void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) { +void * bark_ggml_backend_buffer_get_base(bark_ggml_backend_buffer_t buffer) { return buffer->iface.get_base(buffer); } -size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) { +size_t bark_ggml_backend_buffer_get_size(bark_ggml_backend_buffer_t buffer) { return buffer->size; } -size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { +size_t bark_ggml_backend_buffer_get_alloc_size(bark_ggml_backend_buffer_t buffer, struct bark_ggml_tensor * tensor) { if (buffer->iface.get_alloc_size) { return buffer->iface.get_alloc_size(buffer, tensor); } - return ggml_nbytes(tensor); + return bark_ggml_nbytes(tensor); } -void ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { +void bark_ggml_backend_buffer_init_tensor(bark_ggml_backend_buffer_t buffer, struct bark_ggml_tensor * tensor) { if (buffer->iface.init_tensor) { buffer->iface.init_tensor(buffer, tensor); } } -void ggml_backend_buffer_free_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { +void bark_ggml_backend_buffer_free_tensor(bark_ggml_backend_buffer_t buffer, struct bark_ggml_tensor * tensor) { if (buffer->iface.free_tensor) { buffer->iface.free_tensor(buffer, tensor); } @@ -72,75 +72,75 @@ void ggml_backend_buffer_free_tensor(ggml_backend_buffer_t buffer, struct ggml_t // backend -ggml_backend_t ggml_get_backend(const struct ggml_tensor * tensor) { +bark_ggml_backend_t bark_ggml_get_backend(const struct bark_ggml_tensor * tensor) { return tensor->buffer->backend; } -const char * ggml_backend_name(ggml_backend_t backend) { +const char * bark_ggml_backend_name(bark_ggml_backend_t backend) { return backend->iface.get_name(backend); } -void ggml_backend_free(ggml_backend_t backend) { +void bark_ggml_backend_free(bark_ggml_backend_t backend) { backend->iface.free(backend); } -ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) { +bark_ggml_backend_buffer_t bark_ggml_backend_alloc_buffer(bark_ggml_backend_t backend, size_t size) { return backend->iface.alloc_buffer(backend, size); } -size_t ggml_backend_get_alignment(ggml_backend_t backend) { +size_t bark_ggml_backend_get_alignment(bark_ggml_backend_t backend) { return backend->iface.get_alignment(backend); } -void ggml_backend_tensor_set_async(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - ggml_get_backend(tensor)->iface.set_tensor_async(ggml_get_backend(tensor), tensor, data, offset, size); +void bark_ggml_backend_tensor_set_async(struct bark_ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + bark_ggml_get_backend(tensor)->iface.set_tensor_async(bark_ggml_get_backend(tensor), tensor, data, offset, size); } -void ggml_backend_tensor_get_async(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { - ggml_get_backend(tensor)->iface.get_tensor_async(ggml_get_backend(tensor), tensor, data, offset, size); +void bark_ggml_backend_tensor_get_async(const struct bark_ggml_tensor * tensor, void * data, size_t offset, size_t size) { + bark_ggml_get_backend(tensor)->iface.get_tensor_async(bark_ggml_get_backend(tensor), tensor, data, offset, size); } -void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - ggml_get_backend(tensor)->iface.set_tensor_async(ggml_get_backend(tensor), tensor, data, offset, size); - ggml_get_backend(tensor)->iface.synchronize(ggml_get_backend(tensor)); +void bark_ggml_backend_tensor_set(struct bark_ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + bark_ggml_get_backend(tensor)->iface.set_tensor_async(bark_ggml_get_backend(tensor), tensor, data, offset, size); + bark_ggml_get_backend(tensor)->iface.synchronize(bark_ggml_get_backend(tensor)); } -void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { - ggml_get_backend(tensor)->iface.get_tensor_async(ggml_get_backend(tensor), tensor, data, offset, size); - ggml_get_backend(tensor)->iface.synchronize(ggml_get_backend(tensor)); +void bark_ggml_backend_tensor_get(const struct bark_ggml_tensor * tensor, void * data, size_t offset, size_t size) { + bark_ggml_get_backend(tensor)->iface.get_tensor_async(bark_ggml_get_backend(tensor), tensor, data, offset, size); + bark_ggml_get_backend(tensor)->iface.synchronize(bark_ggml_get_backend(tensor)); } -void ggml_backend_synchronize(ggml_backend_t backend) { +void bark_ggml_backend_synchronize(bark_ggml_backend_t backend) { backend->iface.synchronize(backend); } -ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) { +bark_ggml_backend_graph_plan_t bark_ggml_backend_graph_plan_create(bark_ggml_backend_t backend, struct bark_ggml_cgraph * cgraph) { return backend->iface.graph_plan_create(backend, cgraph); } -void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { +void bark_ggml_backend_graph_plan_free(bark_ggml_backend_t backend, bark_ggml_backend_graph_plan_t plan) { backend->iface.graph_plan_free(backend, plan); } -void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { +void bark_ggml_backend_graph_plan_compute(bark_ggml_backend_t backend, bark_ggml_backend_graph_plan_t plan) { backend->iface.graph_plan_compute(backend, plan); } -void ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { +void bark_ggml_backend_graph_compute(bark_ggml_backend_t backend, struct bark_ggml_cgraph * cgraph) { backend->iface.graph_compute(backend, cgraph); } -bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) { +bool bark_ggml_backend_supports_op(bark_ggml_backend_t backend, const struct bark_ggml_tensor * op) { return backend->iface.supports_op(backend, op); } // backend copy -static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) { +static bool bark_ggml_are_same_layout(const struct bark_ggml_tensor * a, const struct bark_ggml_tensor * b) { if (a->type != b->type) { return false; } - for (int i = 0; i < GGML_MAX_DIMS; i++) { + for (int i = 0; i < BARK_GGML_MAX_DIMS; i++) { if (a->ne[i] != b->ne[i]) { return false; } @@ -151,12 +151,12 @@ static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml return true; } -void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) { +void bark_ggml_backend_tensor_copy(struct bark_ggml_tensor * src, struct bark_ggml_tensor * dst) { //printf("src: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", src->name, (int)src->ne[0], (int)src->ne[1], (int)src->ne[2], (int)src->ne[3], (int)src->nb[0], (int)src->nb[1], (int)src->nb[2], (int)src->nb[3]); //printf("dst: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", dst->name, (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], (int)dst->nb[0], (int)dst->nb[1], (int)dst->nb[2], (int)dst->nb[3]); - GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); + BARK_GGML_ASSERT(bark_ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); - // printf("cpy tensor %s from %s to %s (%lu bytes)\n", src->name, ggml_backend_name(src->backend), ggml_backend_name(dst->backend), ggml_nbytes(src)); + // printf("cpy tensor %s from %s to %s (%lu bytes)\n", src->name, bark_ggml_backend_name(src->backend), bark_ggml_backend_name(dst->backend), bark_ggml_nbytes(src)); if (src == dst) { return; @@ -164,130 +164,130 @@ void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst // TODO: allow backends to support copy to/from same backend - if (ggml_get_backend(dst)->iface.cpy_tensor_from != NULL) { - ggml_get_backend(dst)->iface.cpy_tensor_from(ggml_get_backend(dst)->context, src, dst); - } else if (ggml_get_backend(src)->iface.cpy_tensor_to != NULL) { - ggml_get_backend(src)->iface.cpy_tensor_to(ggml_get_backend(src)->context, src, dst); + if (bark_ggml_get_backend(dst)->iface.cpy_tensor_from != NULL) { + bark_ggml_get_backend(dst)->iface.cpy_tensor_from(bark_ggml_get_backend(dst)->context, src, dst); + } else if (bark_ggml_get_backend(src)->iface.cpy_tensor_to != NULL) { + bark_ggml_get_backend(src)->iface.cpy_tensor_to(bark_ggml_get_backend(src)->context, src, dst); } else { // shouldn't be hit when copying from/to CPU #ifndef NDEBUG - fprintf(stderr, "ggml_backend_tensor_copy: neither cpy_tensor_from nor cpy_tensor_to are implemented for backends %s and %s, falling back to get/set\n", ggml_backend_name(src->buffer->backend), ggml_backend_name(dst->buffer->backend)); + fprintf(stderr, "bark_ggml_backend_tensor_copy: neither cpy_tensor_from nor cpy_tensor_to are implemented for backends %s and %s, falling back to get/set\n", bark_ggml_backend_name(src->buffer->backend), bark_ggml_backend_name(dst->buffer->backend)); #endif - size_t nbytes = ggml_nbytes(src); + size_t nbytes = bark_ggml_nbytes(src); void * data = malloc(nbytes); - ggml_backend_tensor_get(src, data, 0, nbytes); - ggml_backend_tensor_set(dst, data, 0, nbytes); + bark_ggml_backend_tensor_get(src, data, 0, nbytes); + bark_ggml_backend_tensor_set(dst, data, 0, nbytes); free(data); } } // backend CPU -struct ggml_backend_cpu_context { +struct bark_ggml_backend_cpu_context { int n_threads; void * work_data; size_t work_size; }; -static const char * ggml_backend_cpu_name(ggml_backend_t backend) { +static const char * bark_ggml_backend_cpu_name(bark_ggml_backend_t backend) { return "CPU"; UNUSED(backend); } -static void ggml_backend_cpu_free(ggml_backend_t backend) { - struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context; +static void bark_ggml_backend_cpu_free(bark_ggml_backend_t backend) { + struct bark_ggml_backend_cpu_context * cpu_ctx = (struct bark_ggml_backend_cpu_context *)backend->context; free(cpu_ctx->work_data); free(cpu_ctx); free(backend); } -static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) { +static void * bark_ggml_backend_cpu_buffer_get_base(bark_ggml_backend_buffer_t buffer) { return (void *)buffer->context; } -static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) { +static void bark_ggml_backend_cpu_buffer_free_buffer(bark_ggml_backend_buffer_t buffer) { free(buffer->context); UNUSED(buffer); } -static struct ggml_backend_buffer_i cpu_backend_buffer_i = { - /* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer, - /* .get_base = */ ggml_backend_cpu_buffer_get_base, - /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes +static struct bark_ggml_backend_buffer_i cpu_backend_buffer_i = { + /* .free_buffer = */ bark_ggml_backend_cpu_buffer_free_buffer, + /* .get_base = */ bark_ggml_backend_cpu_buffer_get_base, + /* .get_alloc_size = */ NULL, // defaults to bark_ggml_nbytes /* .init_tensor = */ NULL, // no initialization required /* .free_tensor = */ NULL, // no cleanup required }; // for buffers from ptr, free is not called -static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = { +static struct bark_ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = { /* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed - /* .get_base = */ ggml_backend_cpu_buffer_get_base, - /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes + /* .get_base = */ bark_ggml_backend_cpu_buffer_get_base, + /* .get_alloc_size = */ NULL, // defaults to bark_ggml_nbytes /* .init_tensor = */ NULL, /* .free_tensor = */ NULL, }; static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512 -static ggml_backend_buffer_t ggml_backend_cpu_alloc_buffer(ggml_backend_t backend, size_t size) { +static bark_ggml_backend_buffer_t bark_ggml_backend_cpu_alloc_buffer(bark_ggml_backend_t backend, size_t size) { size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned - void * data = malloc(size); // TODO: maybe use GGML_ALIGNED_MALLOC? + void * data = malloc(size); // TODO: maybe use BARK_GGML_ALIGNED_MALLOC? - return ggml_backend_buffer_init(backend, cpu_backend_buffer_i, data, size); + return bark_ggml_backend_buffer_init(backend, cpu_backend_buffer_i, data, size); } -static size_t ggml_backend_cpu_get_alignment(ggml_backend_t backend) { +static size_t bark_ggml_backend_cpu_get_alignment(bark_ggml_backend_t backend) { return TENSOR_ALIGNMENT; UNUSED(backend); } -static void ggml_backend_cpu_set_tensor_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); +static void bark_ggml_backend_cpu_set_tensor_async(bark_ggml_backend_t backend, struct bark_ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + BARK_GGML_ASSERT(offset + size <= bark_ggml_nbytes(tensor) && "tensor write out of bounds"); + BARK_GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); memcpy((char *)tensor->data + offset, data, size); UNUSED(backend); } -static void ggml_backend_cpu_get_tensor_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); +static void bark_ggml_backend_cpu_get_tensor_async(bark_ggml_backend_t backend, const struct bark_ggml_tensor * tensor, void * data, size_t offset, size_t size) { + BARK_GGML_ASSERT(offset + size <= bark_ggml_nbytes(tensor) && "tensor read out of bounds"); + BARK_GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); memcpy(data, (const char *)tensor->data + offset, size); UNUSED(backend); } -static void ggml_backend_cpu_synchronize(ggml_backend_t backend) { +static void bark_ggml_backend_cpu_synchronize(bark_ggml_backend_t backend) { UNUSED(backend); } -static void ggml_backend_cpu_cpy_tensor_from(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) { - ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src)); +static void bark_ggml_backend_cpu_cpy_tensor_from(bark_ggml_backend_t backend, struct bark_ggml_tensor * src, struct bark_ggml_tensor * dst) { + bark_ggml_backend_tensor_get(src, dst->data, 0, bark_ggml_nbytes(src)); UNUSED(backend); } -static void ggml_backend_cpu_cpy_tensor_to(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) { +static void bark_ggml_backend_cpu_cpy_tensor_to(bark_ggml_backend_t backend, struct bark_ggml_tensor * src, struct bark_ggml_tensor * dst) { // for a backend such as CUDA that can queue async calls, it is ok to do this asynchronously, but it may not be the case for other backends - ggml_backend_tensor_set_async(dst, src->data, 0, ggml_nbytes(src)); + bark_ggml_backend_tensor_set_async(dst, src->data, 0, bark_ggml_nbytes(src)); UNUSED(backend); } -struct ggml_backend_plan_cpu { - struct ggml_cplan cplan; - struct ggml_cgraph cgraph; +struct bark_ggml_backend_plan_cpu { + struct bark_ggml_cplan cplan; + struct bark_ggml_cgraph cgraph; }; -static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) { - struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context; +static bark_ggml_backend_graph_plan_t bark_ggml_backend_cpu_graph_plan_create(bark_ggml_backend_t backend, struct bark_ggml_cgraph * cgraph) { + struct bark_ggml_backend_cpu_context * cpu_ctx = (struct bark_ggml_backend_cpu_context *)backend->context; - struct ggml_backend_plan_cpu * cpu_plan = malloc(sizeof(struct ggml_backend_plan_cpu)); + struct bark_ggml_backend_plan_cpu * cpu_plan = malloc(sizeof(struct bark_ggml_backend_plan_cpu)); - cpu_plan->cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads); + cpu_plan->cplan = bark_ggml_graph_plan(cgraph, cpu_ctx->n_threads); cpu_plan->cgraph = *cgraph; if (cpu_plan->cplan.work_size > 0) { @@ -297,8 +297,8 @@ static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend return cpu_plan; } -static void ggml_backend_cpu_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { - struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan; +static void bark_ggml_backend_cpu_graph_plan_free(bark_ggml_backend_t backend, bark_ggml_backend_graph_plan_t plan) { + struct bark_ggml_backend_plan_cpu * cpu_plan = (struct bark_ggml_backend_plan_cpu *)plan; free(cpu_plan->cplan.work_data); free(cpu_plan); @@ -306,18 +306,18 @@ static void ggml_backend_cpu_graph_plan_free(ggml_backend_t backend, ggml_backen UNUSED(backend); } -static void ggml_backend_cpu_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { - struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan; +static void bark_ggml_backend_cpu_graph_plan_compute(bark_ggml_backend_t backend, bark_ggml_backend_graph_plan_t plan) { + struct bark_ggml_backend_plan_cpu * cpu_plan = (struct bark_ggml_backend_plan_cpu *)plan; - ggml_graph_compute(&cpu_plan->cgraph, &cpu_plan->cplan); + bark_ggml_graph_compute(&cpu_plan->cgraph, &cpu_plan->cplan); UNUSED(backend); } -static void ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { - struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context; +static void bark_ggml_backend_cpu_graph_compute(bark_ggml_backend_t backend, struct bark_ggml_cgraph * cgraph) { + struct bark_ggml_backend_cpu_context * cpu_ctx = (struct bark_ggml_backend_cpu_context *)backend->context; - struct ggml_cplan cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads); + struct bark_ggml_cplan cplan = bark_ggml_graph_plan(cgraph, cpu_ctx->n_threads); if (cpu_ctx->work_size < cplan.work_size) { // TODO: may be faster to free and use malloc to avoid the copy @@ -327,59 +327,59 @@ static void ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_c cplan.work_data = cpu_ctx->work_data; - ggml_graph_compute(cgraph, &cplan); + bark_ggml_graph_compute(cgraph, &cplan); } -static bool ggml_backend_cpu_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) { +static bool bark_ggml_backend_cpu_supports_op(bark_ggml_backend_t backend, const struct bark_ggml_tensor * op) { return true; UNUSED(backend); UNUSED(op); } -static struct ggml_backend_i cpu_backend_i = { - /* .get_name = */ ggml_backend_cpu_name, - /* .free = */ ggml_backend_cpu_free, - /* .alloc_buffer = */ ggml_backend_cpu_alloc_buffer, - /* .get_alignment = */ ggml_backend_cpu_get_alignment, - /* .set_tensor_async = */ ggml_backend_cpu_set_tensor_async, - /* .get_tensor_async = */ ggml_backend_cpu_get_tensor_async, - /* .synchronize = */ ggml_backend_cpu_synchronize, - /* .cpy_tensor_from = */ ggml_backend_cpu_cpy_tensor_from, - /* .cpy_tensor_to = */ ggml_backend_cpu_cpy_tensor_to, - /* .graph_plan_create = */ ggml_backend_cpu_graph_plan_create, - /* .graph_plan_free = */ ggml_backend_cpu_graph_plan_free, - /* .graph_plan_compute = */ ggml_backend_cpu_graph_plan_compute, - /* .graph_compute = */ ggml_backend_cpu_graph_compute, - /* .supports_op = */ ggml_backend_cpu_supports_op, +static struct bark_ggml_backend_i cpu_backend_i = { + /* .get_name = */ bark_ggml_backend_cpu_name, + /* .free = */ bark_ggml_backend_cpu_free, + /* .alloc_buffer = */ bark_ggml_backend_cpu_alloc_buffer, + /* .get_alignment = */ bark_ggml_backend_cpu_get_alignment, + /* .set_tensor_async = */ bark_ggml_backend_cpu_set_tensor_async, + /* .get_tensor_async = */ bark_ggml_backend_cpu_get_tensor_async, + /* .synchronize = */ bark_ggml_backend_cpu_synchronize, + /* .cpy_tensor_from = */ bark_ggml_backend_cpu_cpy_tensor_from, + /* .cpy_tensor_to = */ bark_ggml_backend_cpu_cpy_tensor_to, + /* .graph_plan_create = */ bark_ggml_backend_cpu_graph_plan_create, + /* .graph_plan_free = */ bark_ggml_backend_cpu_graph_plan_free, + /* .graph_plan_compute = */ bark_ggml_backend_cpu_graph_plan_compute, + /* .graph_compute = */ bark_ggml_backend_cpu_graph_compute, + /* .supports_op = */ bark_ggml_backend_cpu_supports_op, }; -ggml_backend_t ggml_backend_cpu_init(void) { - struct ggml_backend_cpu_context * ctx = malloc(sizeof(struct ggml_backend_cpu_context)); +bark_ggml_backend_t bark_ggml_backend_cpu_init(void) { + struct bark_ggml_backend_cpu_context * ctx = malloc(sizeof(struct bark_ggml_backend_cpu_context)); - ctx->n_threads = GGML_DEFAULT_N_THREADS; + ctx->n_threads = BARK_GGML_DEFAULT_N_THREADS; ctx->work_data = NULL; ctx->work_size = 0; - ggml_backend_t cpu_backend = malloc(sizeof(struct ggml_backend)); + bark_ggml_backend_t cpu_backend = malloc(sizeof(struct bark_ggml_backend)); - *cpu_backend = (struct ggml_backend) { + *cpu_backend = (struct bark_ggml_backend) { /* .interface = */ cpu_backend_i, /* .context = */ ctx }; return cpu_backend; } -bool ggml_backend_is_cpu(ggml_backend_t backend) { - return backend->iface.get_name == ggml_backend_cpu_name; +bool bark_ggml_backend_is_cpu(bark_ggml_backend_t backend) { + return backend->iface.get_name == bark_ggml_backend_cpu_name; } -void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) { - GGML_ASSERT(ggml_backend_is_cpu(backend_cpu)); +void bark_ggml_backend_cpu_set_n_threads(bark_ggml_backend_t backend_cpu, int n_threads) { + BARK_GGML_ASSERT(bark_ggml_backend_is_cpu(backend_cpu)); - struct ggml_backend_cpu_context * ctx = (struct ggml_backend_cpu_context *)backend_cpu->context; + struct bark_ggml_backend_cpu_context * ctx = (struct bark_ggml_backend_cpu_context *)backend_cpu->context; ctx->n_threads = n_threads; } -ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(ggml_backend_t backend_cpu, void * ptr, size_t size) { - return ggml_backend_buffer_init(backend_cpu, cpu_backend_buffer_i_from_ptr, ptr, size); +bark_ggml_backend_buffer_t bark_ggml_backend_cpu_buffer_from_ptr(bark_ggml_backend_t backend_cpu, void * ptr, size_t size) { + return bark_ggml_backend_buffer_init(backend_cpu, cpu_backend_buffer_i_from_ptr, ptr, size); } diff --git a/cpp/ggml-backend.h b/cpp/ggml-backend.h index da134b0..8c722d3 100644 --- a/cpp/ggml-backend.h +++ b/cpp/ggml-backend.h @@ -5,138 +5,138 @@ #ifdef __cplusplus extern "C" { #endif - struct ggml_backend; - struct ggml_backend_buffer; + struct bark_ggml_backend; + struct bark_ggml_backend_buffer; // type-erased backend-specific types / wrappers - typedef void * ggml_backend_context_t; - typedef void * ggml_backend_graph_plan_t; - typedef void * ggml_backend_buffer_context_t; + typedef void * bark_ggml_backend_context_t; + typedef void * bark_ggml_backend_graph_plan_t; + typedef void * bark_ggml_backend_buffer_context_t; // avoid accessing internals of these types - typedef struct ggml_backend * ggml_backend_t; - typedef struct ggml_backend_buffer * ggml_backend_buffer_t; + typedef struct bark_ggml_backend * bark_ggml_backend_t; + typedef struct bark_ggml_backend_buffer * bark_ggml_backend_buffer_t; // // backend buffer // - struct ggml_backend_buffer_i { - void (*free_buffer) (ggml_backend_buffer_t buffer); - void * (*get_base) (ggml_backend_buffer_t buffer); // get base pointer - size_t (*get_alloc_size)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-allocation callback - void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // post-allocation callback - void (*free_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-free callback + struct bark_ggml_backend_buffer_i { + void (*free_buffer) (bark_ggml_backend_buffer_t buffer); + void * (*get_base) (bark_ggml_backend_buffer_t buffer); // get base pointer + size_t (*get_alloc_size)(bark_ggml_backend_buffer_t buffer, struct bark_ggml_tensor * tensor); // pre-allocation callback + void (*init_tensor) (bark_ggml_backend_buffer_t buffer, struct bark_ggml_tensor * tensor); // post-allocation callback + void (*free_tensor) (bark_ggml_backend_buffer_t buffer, struct bark_ggml_tensor * tensor); // pre-free callback }; // TODO: hide behind API - struct ggml_backend_buffer { - struct ggml_backend_buffer_i iface; + struct bark_ggml_backend_buffer { + struct bark_ggml_backend_buffer_i iface; - ggml_backend_t backend; - ggml_backend_buffer_context_t context; + bark_ggml_backend_t backend; + bark_ggml_backend_buffer_context_t context; size_t size; }; // backend buffer functions - GGML_API ggml_backend_buffer_t ggml_backend_buffer_init( - struct ggml_backend * backend, - struct ggml_backend_buffer_i iface, - ggml_backend_buffer_context_t context, + BARK_GGML_API bark_ggml_backend_buffer_t bark_ggml_backend_buffer_init( + struct bark_ggml_backend * backend, + struct bark_ggml_backend_buffer_i iface, + bark_ggml_backend_buffer_context_t context, size_t size); - GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer); - GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer); - GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer); - GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer); - GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - GGML_API void ggml_backend_buffer_free_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); + BARK_GGML_API void bark_ggml_backend_buffer_free (bark_ggml_backend_buffer_t buffer); + BARK_GGML_API size_t bark_ggml_backend_buffer_get_alignment (bark_ggml_backend_buffer_t buffer); + BARK_GGML_API void * bark_ggml_backend_buffer_get_base (bark_ggml_backend_buffer_t buffer); + BARK_GGML_API size_t bark_ggml_backend_buffer_get_size (bark_ggml_backend_buffer_t buffer); + BARK_GGML_API size_t bark_ggml_backend_buffer_get_alloc_size(bark_ggml_backend_buffer_t buffer, struct bark_ggml_tensor * tensor); + BARK_GGML_API void bark_ggml_backend_buffer_init_tensor (bark_ggml_backend_buffer_t buffer, struct bark_ggml_tensor * tensor); + BARK_GGML_API void bark_ggml_backend_buffer_free_tensor (bark_ggml_backend_buffer_t buffer, struct bark_ggml_tensor * tensor); // // backend // - struct ggml_backend_i { - const char * (*get_name)(ggml_backend_t backend); + struct bark_ggml_backend_i { + const char * (*get_name)(bark_ggml_backend_t backend); - void (*free)(ggml_backend_t backend); + void (*free)(bark_ggml_backend_t backend); // buffer allocation - ggml_backend_buffer_t (*alloc_buffer)(ggml_backend_t backend, size_t size); + bark_ggml_backend_buffer_t (*alloc_buffer)(bark_ggml_backend_t backend, size_t size); // get buffer alignment - size_t (*get_alignment)(ggml_backend_t backend); + size_t (*get_alignment)(bark_ggml_backend_t backend); // tensor data access // these functions can be asynchronous, helper functions are provided for synchronous access that automatically call synchronize - void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); - void (*synchronize) (ggml_backend_t backend); + void (*set_tensor_async)(bark_ggml_backend_t backend, struct bark_ggml_tensor * tensor, const void * data, size_t offset, size_t size); + void (*get_tensor_async)(bark_ggml_backend_t backend, const struct bark_ggml_tensor * tensor, void * data, size_t offset, size_t size); + void (*synchronize) (bark_ggml_backend_t backend); // (optional) copy tensor between different backends, allow for single-copy tranfers - void (*cpy_tensor_from)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); - void (*cpy_tensor_to) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); + void (*cpy_tensor_from)(bark_ggml_backend_t backend, struct bark_ggml_tensor * src, struct bark_ggml_tensor * dst); + void (*cpy_tensor_to) (bark_ggml_backend_t backend, struct bark_ggml_tensor * src, struct bark_ggml_tensor * dst); // compute graph with a plan - ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph); - void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan); - void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan); + bark_ggml_backend_graph_plan_t (*graph_plan_create) (bark_ggml_backend_t backend, struct bark_ggml_cgraph * cgraph); + void (*graph_plan_free) (bark_ggml_backend_t backend, bark_ggml_backend_graph_plan_t plan); + void (*graph_plan_compute)(bark_ggml_backend_t backend, bark_ggml_backend_graph_plan_t plan); // compute graph without a plan - void (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph); + void (*graph_compute)(bark_ggml_backend_t backend, struct bark_ggml_cgraph * cgraph); // check if the backend supports an operation - bool (*supports_op)(ggml_backend_t backend, const struct ggml_tensor * op); + bool (*supports_op)(bark_ggml_backend_t backend, const struct bark_ggml_tensor * op); }; // TODO: hide behind API - struct ggml_backend { - struct ggml_backend_i iface; + struct bark_ggml_backend { + struct bark_ggml_backend_i iface; - ggml_backend_context_t context; + bark_ggml_backend_context_t context; }; // backend helper functions - GGML_API ggml_backend_t ggml_get_backend(const struct ggml_tensor * tensor); + BARK_GGML_API bark_ggml_backend_t bark_ggml_get_backend(const struct bark_ggml_tensor * tensor); - GGML_API const char * ggml_backend_name(ggml_backend_t backend); - GGML_API void ggml_backend_free(ggml_backend_t backend); + BARK_GGML_API const char * bark_ggml_backend_name(bark_ggml_backend_t backend); + BARK_GGML_API void bark_ggml_backend_free(bark_ggml_backend_t backend); - GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size); + BARK_GGML_API bark_ggml_backend_buffer_t bark_ggml_backend_alloc_buffer(bark_ggml_backend_t backend, size_t size); - GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend); + BARK_GGML_API size_t bark_ggml_backend_get_alignment(bark_ggml_backend_t backend); - GGML_API void ggml_backend_tensor_set_async( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - GGML_API void ggml_backend_tensor_get_async(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); + BARK_GGML_API void bark_ggml_backend_tensor_set_async( struct bark_ggml_tensor * tensor, const void * data, size_t offset, size_t size); + BARK_GGML_API void bark_ggml_backend_tensor_get_async(const struct bark_ggml_tensor * tensor, void * data, size_t offset, size_t size); - GGML_API void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - GGML_API void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); + BARK_GGML_API void bark_ggml_backend_tensor_set( struct bark_ggml_tensor * tensor, const void * data, size_t offset, size_t size); + BARK_GGML_API void bark_ggml_backend_tensor_get(const struct bark_ggml_tensor * tensor, void * data, size_t offset, size_t size); - GGML_API void ggml_backend_synchronize(ggml_backend_t backend); + BARK_GGML_API void bark_ggml_backend_synchronize(bark_ggml_backend_t backend); - GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create (ggml_backend_t backend, struct ggml_cgraph * cgraph); + BARK_GGML_API bark_ggml_backend_graph_plan_t bark_ggml_backend_graph_plan_create (bark_ggml_backend_t backend, struct bark_ggml_cgraph * cgraph); - GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan); - GGML_API void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan); - GGML_API void ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph); - GGML_API bool ggml_backend_supports_op (ggml_backend_t backend, const struct ggml_tensor * op); + BARK_GGML_API void bark_ggml_backend_graph_plan_free (bark_ggml_backend_t backend, bark_ggml_backend_graph_plan_t plan); + BARK_GGML_API void bark_ggml_backend_graph_plan_compute(bark_ggml_backend_t backend, bark_ggml_backend_graph_plan_t plan); + BARK_GGML_API void bark_ggml_backend_graph_compute (bark_ggml_backend_t backend, struct bark_ggml_cgraph * cgraph); + BARK_GGML_API bool bark_ggml_backend_supports_op (bark_ggml_backend_t backend, const struct bark_ggml_tensor * op); // tensor copy between different backends - GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst); + BARK_GGML_API void bark_ggml_backend_tensor_copy(struct bark_ggml_tensor * src, struct bark_ggml_tensor * dst); // // CPU backend // - GGML_API ggml_backend_t ggml_backend_cpu_init(void); + BARK_GGML_API bark_ggml_backend_t bark_ggml_backend_cpu_init(void); - GGML_API bool ggml_backend_is_cpu(ggml_backend_t backend); + BARK_GGML_API bool bark_ggml_backend_is_cpu(bark_ggml_backend_t backend); - GGML_API void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads); + BARK_GGML_API void bark_ggml_backend_cpu_set_n_threads(bark_ggml_backend_t backend_cpu, int n_threads); - GGML_API ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(ggml_backend_t backend_cpu, void * ptr, size_t size); + BARK_GGML_API bark_ggml_backend_buffer_t bark_ggml_backend_cpu_buffer_from_ptr(bark_ggml_backend_t backend_cpu, void * ptr, size_t size); #ifdef __cplusplus } diff --git a/cpp/ggml-metal.h b/cpp/ggml-metal.h index 096b844..af649fd 100644 --- a/cpp/ggml-metal.h +++ b/cpp/ggml-metal.h @@ -1,20 +1,20 @@ -// An interface allowing to compute ggml_cgraph with Metal +// An interface allowing to compute bark_ggml_cgraph with Metal // // This is a fully functional interface that extends ggml with GPU support for Apple devices. // A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.) // // How it works? // -// As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this -// interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you -// use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.) +// As long as your program can create and evaluate a bark_ggml_cgraph on the CPU, you can use this +// interface to evaluate the same graph on the GPU. Instead of using bark_ggml_graph_compute(), you +// use bark_ggml_metal_graph_compute() (or bark_ggml_vulkan_graph_compute(), etc.) // // You only need to make sure that all memory buffers that you used during the graph creation -// are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is +// are mapped to the device memory with the bark_ggml_metal_add_buffer() function. This mapping is // used during the graph evaluation to determine the arguments of the compute kernels. // // Synchronization between device and host memory (for example for input and output tensors) -// is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions. +// is done with the bark_ggml_metal_set_tensor() and bark_ggml_metal_get_tensor() functions. // #pragma once @@ -26,11 +26,11 @@ #include // max memory buffers that can be mapped to the device -#define GGML_METAL_MAX_BUFFERS 16 -#define GGML_METAL_MAX_COMMAND_BUFFERS 32 +#define BARK_GGML_METAL_MAX_BUFFERS 16 +#define BARK_GGML_METAL_MAX_COMMAND_BUFFERS 32 -struct ggml_tensor; -struct ggml_cgraph; +struct bark_ggml_tensor; +struct bark_ggml_cgraph; #ifdef __cplusplus extern "C" { @@ -41,64 +41,64 @@ extern "C" { // temporary exposed to user-code // -struct ggml_metal_context; +struct bark_ggml_metal_context; -void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_data); +void bark_ggml_metal_log_set_callback(bark_ggml_log_callback log_callback, void * user_data); // number of command buffers to use -struct ggml_metal_context * ggml_metal_init(int n_cb); -void ggml_metal_free(struct ggml_metal_context * ctx); +struct bark_ggml_metal_context * bark_ggml_metal_init(int n_cb); +void bark_ggml_metal_free(struct bark_ggml_metal_context * ctx); -void * ggml_metal_host_malloc(size_t n); -void ggml_metal_host_free (void * data); +void * bark_ggml_metal_host_malloc(size_t n); +void bark_ggml_metal_host_free (void * data); // set the number of command buffers to use -void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb); +void bark_ggml_metal_set_n_cb(struct bark_ggml_metal_context * ctx, int n_cb); // creates a mapping between a host memory buffer and a device memory buffer -// - make sure to map all buffers used in the graph before calling ggml_metal_graph_compute +// - make sure to map all buffers used in the graph before calling bark_ggml_metal_graph_compute // - the mapping is used during computation to determine the arguments of the compute kernels // - you don't need to keep the host memory buffer allocated as it is never accessed by Metal // - max_size specifies the maximum size of a tensor and is used to create shared views such // that it is guaranteed that the tensor will fit in at least one of the views // -bool ggml_metal_add_buffer( - struct ggml_metal_context * ctx, +bool bark_ggml_metal_add_buffer( + struct bark_ggml_metal_context * ctx, const char * name, void * data, size_t size, size_t max_size); // set data from host memory into the device -void ggml_metal_set_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t); +void bark_ggml_metal_set_tensor(struct bark_ggml_metal_context * ctx, struct bark_ggml_tensor * t); // get data from the device into host memory -void ggml_metal_get_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t); +void bark_ggml_metal_get_tensor(struct bark_ggml_metal_context * ctx, struct bark_ggml_tensor * t); // try to find operations that can be run concurrently in the graph // you should run it again if the topology of your graph changes -void ggml_metal_graph_find_concurrency(struct ggml_metal_context * ctx, struct ggml_cgraph * gf, bool check_mem); +void bark_ggml_metal_graph_find_concurrency(struct bark_ggml_metal_context * ctx, struct bark_ggml_cgraph * gf, bool check_mem); // if the graph has been optimized for concurrently dispatch, return length of the concur_list if optimized -int ggml_metal_if_optimized(struct ggml_metal_context * ctx); +int bark_ggml_metal_if_optimized(struct bark_ggml_metal_context * ctx); -// output the concur_list for ggml_alloc -int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx); +// output the concur_list for bark_ggml_alloc +int * bark_ggml_metal_get_concur_list(struct bark_ggml_metal_context * ctx); -// same as ggml_graph_compute but uses Metal +// same as bark_ggml_graph_compute but uses Metal // creates gf->n_threads command buffers in parallel -void ggml_metal_graph_compute(struct ggml_metal_context * ctx, struct ggml_cgraph * gf); +void bark_ggml_metal_graph_compute(struct bark_ggml_metal_context * ctx, struct bark_ggml_cgraph * gf); // // backend API // user-code should use only these functions // -GGML_API ggml_backend_t ggml_backend_metal_init(void); +BARK_GGML_API bark_ggml_backend_t bark_ggml_backend_metal_init(void); -GGML_API bool ggml_backend_is_metal(ggml_backend_t backend); +BARK_GGML_API bool bark_ggml_backend_is_metal(bark_ggml_backend_t backend); -GGML_API void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb); +BARK_GGML_API void bark_ggml_backend_metal_set_n_cb(bark_ggml_backend_t backend, int n_cb); #ifdef __cplusplus } diff --git a/cpp/ggml-metal.m b/cpp/ggml-metal.m index 29cb3c9..ea1cd0c 100644 --- a/cpp/ggml-metal.m +++ b/cpp/ggml-metal.m @@ -11,21 +11,21 @@ #define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) -#ifdef GGML_METAL_NDEBUG -#define GGML_METAL_LOG_INFO(...) -#define GGML_METAL_LOG_WARN(...) -#define GGML_METAL_LOG_ERROR(...) +#ifdef BARK_GGML_METAL_NDEBUG +#define BARK_GGML_METAL_LOG_INFO(...) +#define BARK_GGML_METAL_LOG_WARN(...) +#define BARK_GGML_METAL_LOG_ERROR(...) #else -#define GGML_METAL_LOG_INFO(...) ggml_metal_log(GGML_LOG_LEVEL_INFO, __VA_ARGS__) -#define GGML_METAL_LOG_WARN(...) ggml_metal_log(GGML_LOG_LEVEL_WARN, __VA_ARGS__) -#define GGML_METAL_LOG_ERROR(...) ggml_metal_log(GGML_LOG_LEVEL_ERROR, __VA_ARGS__) +#define BARK_GGML_METAL_LOG_INFO(...) bark_ggml_metal_log(BARK_GGML_LOG_LEVEL_INFO, __VA_ARGS__) +#define BARK_GGML_METAL_LOG_WARN(...) bark_ggml_metal_log(BARK_GGML_LOG_LEVEL_WARN, __VA_ARGS__) +#define BARK_GGML_METAL_LOG_ERROR(...) bark_ggml_metal_log(BARK_GGML_LOG_LEVEL_ERROR, __VA_ARGS__) #endif #define UNUSED(x) (void)(x) -#define GGML_MAX_CONCUR (2*GGML_MAX_NODES) +#define BARK_GGML_MAX_CONCUR (2*BARK_GGML_MAX_NODES) -struct ggml_metal_buffer { +struct bark_ggml_metal_buffer { const char * name; void * data; @@ -34,85 +34,85 @@ id metal; }; -struct ggml_metal_context { +struct bark_ggml_metal_context { int n_cb; id device; id queue; id library; - id command_buffers [GGML_METAL_MAX_COMMAND_BUFFERS]; - id command_encoders[GGML_METAL_MAX_COMMAND_BUFFERS]; + id command_buffers [BARK_GGML_METAL_MAX_COMMAND_BUFFERS]; + id command_encoders[BARK_GGML_METAL_MAX_COMMAND_BUFFERS]; dispatch_queue_t d_queue; int n_buffers; - struct ggml_metal_buffer buffers[GGML_METAL_MAX_BUFFERS]; + struct bark_ggml_metal_buffer buffers[BARK_GGML_METAL_MAX_BUFFERS]; - int concur_list[GGML_MAX_CONCUR]; + int concur_list[BARK_GGML_MAX_CONCUR]; int concur_list_len; // custom kernels -#define GGML_METAL_DECL_KERNEL(name) \ +#define BARK_GGML_METAL_DECL_KERNEL(name) \ id function_##name; \ id pipeline_##name - GGML_METAL_DECL_KERNEL(add); - GGML_METAL_DECL_KERNEL(add_row); // TODO: avoid this extra kernel, instead extend the "add" kernel to support broadcast - GGML_METAL_DECL_KERNEL(mul); - GGML_METAL_DECL_KERNEL(mul_row); // TODO: avoid this extra kernel, instead extend the "mul" kernel to support broadcast - GGML_METAL_DECL_KERNEL(scale); - GGML_METAL_DECL_KERNEL(silu); - GGML_METAL_DECL_KERNEL(relu); - GGML_METAL_DECL_KERNEL(gelu); - GGML_METAL_DECL_KERNEL(soft_max); - GGML_METAL_DECL_KERNEL(soft_max_4); - GGML_METAL_DECL_KERNEL(diag_mask_inf); - GGML_METAL_DECL_KERNEL(diag_mask_inf_8); - GGML_METAL_DECL_KERNEL(get_rows_f32); - GGML_METAL_DECL_KERNEL(get_rows_f16); - GGML_METAL_DECL_KERNEL(get_rows_q4_0); - GGML_METAL_DECL_KERNEL(get_rows_q4_1); - GGML_METAL_DECL_KERNEL(get_rows_q8_0); - GGML_METAL_DECL_KERNEL(get_rows_q2_K); - GGML_METAL_DECL_KERNEL(get_rows_q3_K); - GGML_METAL_DECL_KERNEL(get_rows_q4_K); - GGML_METAL_DECL_KERNEL(get_rows_q5_K); - GGML_METAL_DECL_KERNEL(get_rows_q6_K); - GGML_METAL_DECL_KERNEL(rms_norm); - GGML_METAL_DECL_KERNEL(norm); - GGML_METAL_DECL_KERNEL(mul_mv_f32_f32); - GGML_METAL_DECL_KERNEL(mul_mv_f16_f32); - GGML_METAL_DECL_KERNEL(mul_mv_f16_f32_1row); - GGML_METAL_DECL_KERNEL(mul_mv_f16_f32_l4); - GGML_METAL_DECL_KERNEL(mul_mv_q4_0_f32); - GGML_METAL_DECL_KERNEL(mul_mv_q4_1_f32); - GGML_METAL_DECL_KERNEL(mul_mv_q8_0_f32); - GGML_METAL_DECL_KERNEL(mul_mv_q2_K_f32); - GGML_METAL_DECL_KERNEL(mul_mv_q3_K_f32); - GGML_METAL_DECL_KERNEL(mul_mv_q4_K_f32); - GGML_METAL_DECL_KERNEL(mul_mv_q5_K_f32); - GGML_METAL_DECL_KERNEL(mul_mv_q6_K_f32); - GGML_METAL_DECL_KERNEL(mul_mm_f32_f32); - GGML_METAL_DECL_KERNEL(mul_mm_f16_f32); - GGML_METAL_DECL_KERNEL(mul_mm_q4_0_f32); - GGML_METAL_DECL_KERNEL(mul_mm_q4_1_f32); - GGML_METAL_DECL_KERNEL(mul_mm_q8_0_f32); - GGML_METAL_DECL_KERNEL(mul_mm_q2_K_f32); - GGML_METAL_DECL_KERNEL(mul_mm_q3_K_f32); - GGML_METAL_DECL_KERNEL(mul_mm_q4_K_f32); - GGML_METAL_DECL_KERNEL(mul_mm_q5_K_f32); - GGML_METAL_DECL_KERNEL(mul_mm_q6_K_f32); - GGML_METAL_DECL_KERNEL(rope_f32); - GGML_METAL_DECL_KERNEL(rope_f16); - GGML_METAL_DECL_KERNEL(alibi_f32); - GGML_METAL_DECL_KERNEL(cpy_f32_f16); - GGML_METAL_DECL_KERNEL(cpy_f32_f32); - GGML_METAL_DECL_KERNEL(cpy_f16_f16); - GGML_METAL_DECL_KERNEL(concat); - GGML_METAL_DECL_KERNEL(sqr); - -#undef GGML_METAL_DECL_KERNEL + BARK_GGML_METAL_DECL_KERNEL(add); + BARK_GGML_METAL_DECL_KERNEL(add_row); // TODO: avoid this extra kernel, instead extend the "add" kernel to support broadcast + BARK_GGML_METAL_DECL_KERNEL(mul); + BARK_GGML_METAL_DECL_KERNEL(mul_row); // TODO: avoid this extra kernel, instead extend the "mul" kernel to support broadcast + BARK_GGML_METAL_DECL_KERNEL(scale); + BARK_GGML_METAL_DECL_KERNEL(silu); + BARK_GGML_METAL_DECL_KERNEL(relu); + BARK_GGML_METAL_DECL_KERNEL(gelu); + BARK_GGML_METAL_DECL_KERNEL(soft_max); + BARK_GGML_METAL_DECL_KERNEL(soft_max_4); + BARK_GGML_METAL_DECL_KERNEL(diag_mask_inf); + BARK_GGML_METAL_DECL_KERNEL(diag_mask_inf_8); + BARK_GGML_METAL_DECL_KERNEL(get_rows_f32); + BARK_GGML_METAL_DECL_KERNEL(get_rows_f16); + BARK_GGML_METAL_DECL_KERNEL(get_rows_q4_0); + BARK_GGML_METAL_DECL_KERNEL(get_rows_q4_1); + BARK_GGML_METAL_DECL_KERNEL(get_rows_q8_0); + BARK_GGML_METAL_DECL_KERNEL(get_rows_q2_K); + BARK_GGML_METAL_DECL_KERNEL(get_rows_q3_K); + BARK_GGML_METAL_DECL_KERNEL(get_rows_q4_K); + BARK_GGML_METAL_DECL_KERNEL(get_rows_q5_K); + BARK_GGML_METAL_DECL_KERNEL(get_rows_q6_K); + BARK_GGML_METAL_DECL_KERNEL(rms_norm); + BARK_GGML_METAL_DECL_KERNEL(norm); + BARK_GGML_METAL_DECL_KERNEL(mul_mv_f32_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mv_f16_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mv_f16_f32_1row); + BARK_GGML_METAL_DECL_KERNEL(mul_mv_f16_f32_l4); + BARK_GGML_METAL_DECL_KERNEL(mul_mv_q4_0_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mv_q4_1_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mv_q8_0_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mv_q2_K_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mv_q3_K_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mv_q4_K_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mv_q5_K_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mv_q6_K_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mm_f32_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mm_f16_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mm_q4_0_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mm_q4_1_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mm_q8_0_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mm_q2_K_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mm_q3_K_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mm_q4_K_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mm_q5_K_f32); + BARK_GGML_METAL_DECL_KERNEL(mul_mm_q6_K_f32); + BARK_GGML_METAL_DECL_KERNEL(rope_f32); + BARK_GGML_METAL_DECL_KERNEL(rope_f16); + BARK_GGML_METAL_DECL_KERNEL(alibi_f32); + BARK_GGML_METAL_DECL_KERNEL(cpy_f32_f16); + BARK_GGML_METAL_DECL_KERNEL(cpy_f32_f32); + BARK_GGML_METAL_DECL_KERNEL(cpy_f16_f16); + BARK_GGML_METAL_DECL_KERNEL(concat); + BARK_GGML_METAL_DECL_KERNEL(sqr); + +#undef BARK_GGML_METAL_DECL_KERNEL }; // MSL code @@ -121,32 +121,32 @@ static NSString * const msl_library_source = @"see metal.metal"; // Here to assist with NSBundle Path Hack -@interface GGMLMetalClass : NSObject +@interface BARKGGMLMetalClass : NSObject @end -@implementation GGMLMetalClass +@implementation BARKGGMLMetalClass @end -ggml_log_callback ggml_metal_log_callback = NULL; -void * ggml_metal_log_user_data = NULL; +bark_ggml_log_callback bark_ggml_metal_log_callback = NULL; +void * bark_ggml_metal_log_user_data = NULL; -void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_data) { - ggml_metal_log_callback = log_callback; - ggml_metal_log_user_data = user_data; +void bark_ggml_metal_log_set_callback(bark_ggml_log_callback log_callback, void * user_data) { + bark_ggml_metal_log_callback = log_callback; + bark_ggml_metal_log_user_data = user_data; } -static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ - if (ggml_metal_log_callback != NULL) { +static void bark_ggml_metal_log(enum bark_ggml_log_level level, const char* format, ...){ + if (bark_ggml_metal_log_callback != NULL) { va_list args; va_start(args, format); char buffer[128]; int len = vsnprintf(buffer, 128, format, args); if (len < 128) { - ggml_metal_log_callback(level, buffer, ggml_metal_log_user_data); + bark_ggml_metal_log_callback(level, buffer, bark_ggml_metal_log_user_data); } else { char* buffer2 = malloc(len+1); vsnprintf(buffer2, len+1, format, args); buffer2[len] = 0; - ggml_metal_log_callback(level, buffer2, ggml_metal_log_user_data); + bark_ggml_metal_log_callback(level, buffer2, bark_ggml_metal_log_user_data); free(buffer2); } va_end(args); @@ -155,8 +155,8 @@ static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ -struct ggml_metal_context * ggml_metal_init(int n_cb) { - GGML_METAL_LOG_INFO("%s: allocating\n", __func__); +struct bark_ggml_metal_context * bark_ggml_metal_init(int n_cb) { + BARK_GGML_METAL_LOG_INFO("%s: allocating\n", __func__); id device; NSString * s; @@ -166,19 +166,19 @@ static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ NSArray * devices = MTLCopyAllDevices(); for (device in devices) { s = [device name]; - GGML_METAL_LOG_INFO("%s: found device: %s\n", __func__, [s UTF8String]); + BARK_GGML_METAL_LOG_INFO("%s: found device: %s\n", __func__, [s UTF8String]); } #endif // Pick and show default Metal device device = MTLCreateSystemDefaultDevice(); s = [device name]; - GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [s UTF8String]); + BARK_GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [s UTF8String]); // Configure context - struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context)); + struct bark_ggml_metal_context * ctx = malloc(sizeof(struct bark_ggml_metal_context)); ctx->device = device; - ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS); + ctx->n_cb = MIN(n_cb, BARK_GGML_METAL_MAX_BUFFERS); ctx->queue = [ctx->device newCommandQueue]; ctx->n_buffers = 0; ctx->concur_list_len = 0; @@ -191,27 +191,27 @@ static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ #ifdef SWIFT_PACKAGE bundle = SWIFTPM_MODULE_BUNDLE; #else - bundle = [NSBundle bundleForClass:[GGMLMetalClass class]]; + bundle = [NSBundle bundleForClass:[BARKGGMLMetalClass class]]; #endif NSError * error = nil; NSString * libPath = [bundle pathForResource:@"default" ofType:@"metallib"]; if (libPath != nil) { NSURL * libURL = [NSURL fileURLWithPath:libPath]; - GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [libPath UTF8String]); + BARK_GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [libPath UTF8String]); ctx->library = [ctx->device newLibraryWithURL:libURL error:&error]; } else { - GGML_METAL_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__); + BARK_GGML_METAL_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__); NSString * sourcePath = [bundle pathForResource:@"ggml-metal" ofType:@"metal"]; - GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [sourcePath UTF8String]); + BARK_GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [sourcePath UTF8String]); NSString * src = [NSString stringWithContentsOfFile:sourcePath encoding:NSUTF8StringEncoding error:&error]; if (error) { - GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); + BARK_GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); return NULL; } MTLCompileOptions* options = nil; -#ifdef GGML_QKK_64 +#ifdef BARK_GGML_QKK_64 options = [MTLCompileOptions new]; options.preprocessorMacros = @{ @"QK_K" : @(64) }; #endif @@ -219,7 +219,7 @@ static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ } if (error) { - GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); + BARK_GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); return NULL; } } @@ -227,167 +227,167 @@ static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ // load kernels { NSError * error = nil; -#define GGML_METAL_ADD_KERNEL(name) \ +#define BARK_GGML_METAL_ADD_KERNEL(name) \ ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \ ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \ - GGML_METAL_LOG_INFO("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name, \ + BARK_GGML_METAL_LOG_INFO("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name, \ (int) ctx->pipeline_##name.maxTotalThreadsPerThreadgroup, \ (int) ctx->pipeline_##name.threadExecutionWidth); \ if (error) { \ - GGML_METAL_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \ + BARK_GGML_METAL_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \ return NULL; \ } - GGML_METAL_ADD_KERNEL(add); - GGML_METAL_ADD_KERNEL(add_row); - GGML_METAL_ADD_KERNEL(mul); - GGML_METAL_ADD_KERNEL(mul_row); - GGML_METAL_ADD_KERNEL(scale); - GGML_METAL_ADD_KERNEL(silu); - GGML_METAL_ADD_KERNEL(relu); - GGML_METAL_ADD_KERNEL(gelu); - GGML_METAL_ADD_KERNEL(soft_max); - GGML_METAL_ADD_KERNEL(soft_max_4); - GGML_METAL_ADD_KERNEL(diag_mask_inf); - GGML_METAL_ADD_KERNEL(diag_mask_inf_8); - GGML_METAL_ADD_KERNEL(get_rows_f32); - GGML_METAL_ADD_KERNEL(get_rows_f16); - GGML_METAL_ADD_KERNEL(get_rows_q4_0); - GGML_METAL_ADD_KERNEL(get_rows_q4_1); - GGML_METAL_ADD_KERNEL(get_rows_q8_0); - GGML_METAL_ADD_KERNEL(get_rows_q2_K); - GGML_METAL_ADD_KERNEL(get_rows_q3_K); - GGML_METAL_ADD_KERNEL(get_rows_q4_K); - GGML_METAL_ADD_KERNEL(get_rows_q5_K); - GGML_METAL_ADD_KERNEL(get_rows_q6_K); - GGML_METAL_ADD_KERNEL(rms_norm); - GGML_METAL_ADD_KERNEL(norm); - GGML_METAL_ADD_KERNEL(mul_mv_f32_f32); - GGML_METAL_ADD_KERNEL(mul_mv_f16_f32); - GGML_METAL_ADD_KERNEL(mul_mv_f16_f32_1row); - GGML_METAL_ADD_KERNEL(mul_mv_f16_f32_l4); - GGML_METAL_ADD_KERNEL(mul_mv_q4_0_f32); - GGML_METAL_ADD_KERNEL(mul_mv_q4_1_f32); - GGML_METAL_ADD_KERNEL(mul_mv_q8_0_f32); - GGML_METAL_ADD_KERNEL(mul_mv_q2_K_f32); - GGML_METAL_ADD_KERNEL(mul_mv_q3_K_f32); - GGML_METAL_ADD_KERNEL(mul_mv_q4_K_f32); - GGML_METAL_ADD_KERNEL(mul_mv_q5_K_f32); - GGML_METAL_ADD_KERNEL(mul_mv_q6_K_f32); + BARK_GGML_METAL_ADD_KERNEL(add); + BARK_GGML_METAL_ADD_KERNEL(add_row); + BARK_GGML_METAL_ADD_KERNEL(mul); + BARK_GGML_METAL_ADD_KERNEL(mul_row); + BARK_GGML_METAL_ADD_KERNEL(scale); + BARK_GGML_METAL_ADD_KERNEL(silu); + BARK_GGML_METAL_ADD_KERNEL(relu); + BARK_GGML_METAL_ADD_KERNEL(gelu); + BARK_GGML_METAL_ADD_KERNEL(soft_max); + BARK_GGML_METAL_ADD_KERNEL(soft_max_4); + BARK_GGML_METAL_ADD_KERNEL(diag_mask_inf); + BARK_GGML_METAL_ADD_KERNEL(diag_mask_inf_8); + BARK_GGML_METAL_ADD_KERNEL(get_rows_f32); + BARK_GGML_METAL_ADD_KERNEL(get_rows_f16); + BARK_GGML_METAL_ADD_KERNEL(get_rows_q4_0); + BARK_GGML_METAL_ADD_KERNEL(get_rows_q4_1); + BARK_GGML_METAL_ADD_KERNEL(get_rows_q8_0); + BARK_GGML_METAL_ADD_KERNEL(get_rows_q2_K); + BARK_GGML_METAL_ADD_KERNEL(get_rows_q3_K); + BARK_GGML_METAL_ADD_KERNEL(get_rows_q4_K); + BARK_GGML_METAL_ADD_KERNEL(get_rows_q5_K); + BARK_GGML_METAL_ADD_KERNEL(get_rows_q6_K); + BARK_GGML_METAL_ADD_KERNEL(rms_norm); + BARK_GGML_METAL_ADD_KERNEL(norm); + BARK_GGML_METAL_ADD_KERNEL(mul_mv_f32_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mv_f16_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mv_f16_f32_1row); + BARK_GGML_METAL_ADD_KERNEL(mul_mv_f16_f32_l4); + BARK_GGML_METAL_ADD_KERNEL(mul_mv_q4_0_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mv_q4_1_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mv_q8_0_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mv_q2_K_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mv_q3_K_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mv_q4_K_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mv_q5_K_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mv_q6_K_f32); if ([ctx->device supportsFamily:MTLGPUFamilyApple7]) { - GGML_METAL_ADD_KERNEL(mul_mm_f32_f32); - GGML_METAL_ADD_KERNEL(mul_mm_f16_f32); - GGML_METAL_ADD_KERNEL(mul_mm_q4_0_f32); - GGML_METAL_ADD_KERNEL(mul_mm_q8_0_f32); - GGML_METAL_ADD_KERNEL(mul_mm_q4_1_f32); - GGML_METAL_ADD_KERNEL(mul_mm_q2_K_f32); - GGML_METAL_ADD_KERNEL(mul_mm_q3_K_f32); - GGML_METAL_ADD_KERNEL(mul_mm_q4_K_f32); - GGML_METAL_ADD_KERNEL(mul_mm_q5_K_f32); - GGML_METAL_ADD_KERNEL(mul_mm_q6_K_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mm_f32_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mm_f16_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mm_q4_0_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mm_q8_0_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mm_q4_1_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mm_q2_K_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mm_q3_K_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mm_q4_K_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mm_q5_K_f32); + BARK_GGML_METAL_ADD_KERNEL(mul_mm_q6_K_f32); } - GGML_METAL_ADD_KERNEL(rope_f32); - GGML_METAL_ADD_KERNEL(rope_f16); - GGML_METAL_ADD_KERNEL(alibi_f32); - GGML_METAL_ADD_KERNEL(cpy_f32_f16); - GGML_METAL_ADD_KERNEL(cpy_f32_f32); - GGML_METAL_ADD_KERNEL(cpy_f16_f16); - GGML_METAL_ADD_KERNEL(concat); - GGML_METAL_ADD_KERNEL(sqr); - -#undef GGML_METAL_ADD_KERNEL + BARK_GGML_METAL_ADD_KERNEL(rope_f32); + BARK_GGML_METAL_ADD_KERNEL(rope_f16); + BARK_GGML_METAL_ADD_KERNEL(alibi_f32); + BARK_GGML_METAL_ADD_KERNEL(cpy_f32_f16); + BARK_GGML_METAL_ADD_KERNEL(cpy_f32_f32); + BARK_GGML_METAL_ADD_KERNEL(cpy_f16_f16); + BARK_GGML_METAL_ADD_KERNEL(concat); + BARK_GGML_METAL_ADD_KERNEL(sqr); + +#undef BARK_GGML_METAL_ADD_KERNEL } #if TARGET_OS_OSX // print MTL GPU family: - GGML_METAL_LOG_INFO("%s: GPU name: %s\n", __func__, [[ctx->device name] UTF8String]); + BARK_GGML_METAL_LOG_INFO("%s: GPU name: %s\n", __func__, [[ctx->device name] UTF8String]); // determine max supported GPU family // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf for (int i = MTLGPUFamilyApple1 + 20; i >= MTLGPUFamilyApple1; --i) { if ([ctx->device supportsFamily:i]) { - GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d (%d)\n", __func__, i - MTLGPUFamilyApple1 + 1, i); + BARK_GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d (%d)\n", __func__, i - MTLGPUFamilyApple1 + 1, i); break; } } - GGML_METAL_LOG_INFO("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false"); - GGML_METAL_LOG_INFO("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); + BARK_GGML_METAL_LOG_INFO("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false"); + BARK_GGML_METAL_LOG_INFO("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); if (ctx->device.maxTransferRate != 0) { - GGML_METAL_LOG_INFO("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0); + BARK_GGML_METAL_LOG_INFO("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0); } else { - GGML_METAL_LOG_INFO("%s: maxTransferRate = built-in GPU\n", __func__); + BARK_GGML_METAL_LOG_INFO("%s: maxTransferRate = built-in GPU\n", __func__); } #endif return ctx; } -void ggml_metal_free(struct ggml_metal_context * ctx) { - GGML_METAL_LOG_INFO("%s: deallocating\n", __func__); -#define GGML_METAL_DEL_KERNEL(name) \ +void bark_ggml_metal_free(struct bark_ggml_metal_context * ctx) { + BARK_GGML_METAL_LOG_INFO("%s: deallocating\n", __func__); +#define BARK_GGML_METAL_DEL_KERNEL(name) \ [ctx->function_##name release]; \ [ctx->pipeline_##name release]; - GGML_METAL_DEL_KERNEL(add); - GGML_METAL_DEL_KERNEL(add_row); - GGML_METAL_DEL_KERNEL(mul); - GGML_METAL_DEL_KERNEL(mul_row); - GGML_METAL_DEL_KERNEL(scale); - GGML_METAL_DEL_KERNEL(silu); - GGML_METAL_DEL_KERNEL(relu); - GGML_METAL_DEL_KERNEL(gelu); - GGML_METAL_DEL_KERNEL(soft_max); - GGML_METAL_DEL_KERNEL(soft_max_4); - GGML_METAL_DEL_KERNEL(diag_mask_inf); - GGML_METAL_DEL_KERNEL(diag_mask_inf_8); - GGML_METAL_DEL_KERNEL(get_rows_f32); - GGML_METAL_DEL_KERNEL(get_rows_f16); - GGML_METAL_DEL_KERNEL(get_rows_q4_0); - GGML_METAL_DEL_KERNEL(get_rows_q4_1); - GGML_METAL_DEL_KERNEL(get_rows_q8_0); - GGML_METAL_DEL_KERNEL(get_rows_q2_K); - GGML_METAL_DEL_KERNEL(get_rows_q3_K); - GGML_METAL_DEL_KERNEL(get_rows_q4_K); - GGML_METAL_DEL_KERNEL(get_rows_q5_K); - GGML_METAL_DEL_KERNEL(get_rows_q6_K); - GGML_METAL_DEL_KERNEL(rms_norm); - GGML_METAL_DEL_KERNEL(norm); - GGML_METAL_DEL_KERNEL(mul_mv_f32_f32); - GGML_METAL_DEL_KERNEL(mul_mv_f16_f32); - GGML_METAL_DEL_KERNEL(mul_mv_f16_f32_1row); - GGML_METAL_DEL_KERNEL(mul_mv_f16_f32_l4); - GGML_METAL_DEL_KERNEL(mul_mv_q4_0_f32); - GGML_METAL_DEL_KERNEL(mul_mv_q4_1_f32); - GGML_METAL_DEL_KERNEL(mul_mv_q8_0_f32); - GGML_METAL_DEL_KERNEL(mul_mv_q2_K_f32); - GGML_METAL_DEL_KERNEL(mul_mv_q3_K_f32); - GGML_METAL_DEL_KERNEL(mul_mv_q4_K_f32); - GGML_METAL_DEL_KERNEL(mul_mv_q5_K_f32); - GGML_METAL_DEL_KERNEL(mul_mv_q6_K_f32); + BARK_GGML_METAL_DEL_KERNEL(add); + BARK_GGML_METAL_DEL_KERNEL(add_row); + BARK_GGML_METAL_DEL_KERNEL(mul); + BARK_GGML_METAL_DEL_KERNEL(mul_row); + BARK_GGML_METAL_DEL_KERNEL(scale); + BARK_GGML_METAL_DEL_KERNEL(silu); + BARK_GGML_METAL_DEL_KERNEL(relu); + BARK_GGML_METAL_DEL_KERNEL(gelu); + BARK_GGML_METAL_DEL_KERNEL(soft_max); + BARK_GGML_METAL_DEL_KERNEL(soft_max_4); + BARK_GGML_METAL_DEL_KERNEL(diag_mask_inf); + BARK_GGML_METAL_DEL_KERNEL(diag_mask_inf_8); + BARK_GGML_METAL_DEL_KERNEL(get_rows_f32); + BARK_GGML_METAL_DEL_KERNEL(get_rows_f16); + BARK_GGML_METAL_DEL_KERNEL(get_rows_q4_0); + BARK_GGML_METAL_DEL_KERNEL(get_rows_q4_1); + BARK_GGML_METAL_DEL_KERNEL(get_rows_q8_0); + BARK_GGML_METAL_DEL_KERNEL(get_rows_q2_K); + BARK_GGML_METAL_DEL_KERNEL(get_rows_q3_K); + BARK_GGML_METAL_DEL_KERNEL(get_rows_q4_K); + BARK_GGML_METAL_DEL_KERNEL(get_rows_q5_K); + BARK_GGML_METAL_DEL_KERNEL(get_rows_q6_K); + BARK_GGML_METAL_DEL_KERNEL(rms_norm); + BARK_GGML_METAL_DEL_KERNEL(norm); + BARK_GGML_METAL_DEL_KERNEL(mul_mv_f32_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mv_f16_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mv_f16_f32_1row); + BARK_GGML_METAL_DEL_KERNEL(mul_mv_f16_f32_l4); + BARK_GGML_METAL_DEL_KERNEL(mul_mv_q4_0_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mv_q4_1_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mv_q8_0_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mv_q2_K_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mv_q3_K_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mv_q4_K_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mv_q5_K_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mv_q6_K_f32); if ([ctx->device supportsFamily:MTLGPUFamilyApple7]) { - GGML_METAL_DEL_KERNEL(mul_mm_f32_f32); - GGML_METAL_DEL_KERNEL(mul_mm_f16_f32); - GGML_METAL_DEL_KERNEL(mul_mm_q4_0_f32); - GGML_METAL_DEL_KERNEL(mul_mm_q8_0_f32); - GGML_METAL_DEL_KERNEL(mul_mm_q4_1_f32); - GGML_METAL_DEL_KERNEL(mul_mm_q2_K_f32); - GGML_METAL_DEL_KERNEL(mul_mm_q3_K_f32); - GGML_METAL_DEL_KERNEL(mul_mm_q4_K_f32); - GGML_METAL_DEL_KERNEL(mul_mm_q5_K_f32); - GGML_METAL_DEL_KERNEL(mul_mm_q6_K_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mm_f32_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mm_f16_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mm_q4_0_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mm_q8_0_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mm_q4_1_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mm_q2_K_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mm_q3_K_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mm_q4_K_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mm_q5_K_f32); + BARK_GGML_METAL_DEL_KERNEL(mul_mm_q6_K_f32); } - GGML_METAL_DEL_KERNEL(rope_f32); - GGML_METAL_DEL_KERNEL(rope_f16); - GGML_METAL_DEL_KERNEL(alibi_f32); - GGML_METAL_DEL_KERNEL(cpy_f32_f16); - GGML_METAL_DEL_KERNEL(cpy_f32_f32); - GGML_METAL_DEL_KERNEL(cpy_f16_f16); - GGML_METAL_DEL_KERNEL(concat); - GGML_METAL_DEL_KERNEL(sqr); + BARK_GGML_METAL_DEL_KERNEL(rope_f32); + BARK_GGML_METAL_DEL_KERNEL(rope_f16); + BARK_GGML_METAL_DEL_KERNEL(alibi_f32); + BARK_GGML_METAL_DEL_KERNEL(cpy_f32_f16); + BARK_GGML_METAL_DEL_KERNEL(cpy_f32_f32); + BARK_GGML_METAL_DEL_KERNEL(cpy_f16_f16); + BARK_GGML_METAL_DEL_KERNEL(concat); + BARK_GGML_METAL_DEL_KERNEL(sqr); -#undef GGML_METAL_DEL_KERNEL +#undef BARK_GGML_METAL_DEL_KERNEL for (int i = 0; i < ctx->n_buffers; ++i) { [ctx->buffers[i].metal release]; @@ -402,30 +402,30 @@ void ggml_metal_free(struct ggml_metal_context * ctx) { free(ctx); } -void * ggml_metal_host_malloc(size_t n) { +void * bark_ggml_metal_host_malloc(size_t n) { void * data = NULL; const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n); if (result != 0) { - GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__); + BARK_GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__); return NULL; } return data; } -void ggml_metal_host_free(void * data) { +void bark_ggml_metal_host_free(void * data) { free(data); } -void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb) { - ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS); +void bark_ggml_metal_set_n_cb(struct bark_ggml_metal_context * ctx, int n_cb) { + ctx->n_cb = MIN(n_cb, BARK_GGML_METAL_MAX_BUFFERS); } -int ggml_metal_if_optimized(struct ggml_metal_context * ctx) { +int bark_ggml_metal_if_optimized(struct bark_ggml_metal_context * ctx) { return ctx->concur_list_len; } -int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx) { +int * bark_ggml_metal_get_concur_list(struct bark_ggml_metal_context * ctx) { return ctx->concur_list; } @@ -433,38 +433,38 @@ int ggml_metal_if_optimized(struct ggml_metal_context * ctx) { // the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the // Metal buffer based on the host memory pointer // -static id ggml_metal_get_buffer(struct ggml_metal_context * ctx, struct ggml_tensor * t, size_t * offs) { - //GGML_METAL_LOG_INFO("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach); +static id bark_ggml_metal_get_buffer(struct bark_ggml_metal_context * ctx, struct bark_ggml_tensor * t, size_t * offs) { + //BARK_GGML_METAL_LOG_INFO("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach); - const int64_t tsize = ggml_nbytes(t); + const int64_t tsize = bark_ggml_nbytes(t); // find the view that contains the tensor fully for (int i = 0; i < ctx->n_buffers; ++i) { const int64_t ioffs = (int64_t) t->data - (int64_t) ctx->buffers[i].data; - //GGML_METAL_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, ctx->buffers[%d].size = %10ld, name = %s\n", ioffs, tsize, ioffs + tsize, i, ctx->buffers[i].size, ctx->buffers[i].name); + //BARK_GGML_METAL_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, ctx->buffers[%d].size = %10ld, name = %s\n", ioffs, tsize, ioffs + tsize, i, ctx->buffers[i].size, ctx->buffers[i].name); if (ioffs >= 0 && ioffs + tsize <= (int64_t) ctx->buffers[i].size) { *offs = (size_t) ioffs; - //GGML_METAL_LOG_INFO("%s: '%s' tensor '%16s', offs = %8ld\n", __func__, ctx->buffers[i].name, t->name, *offs); + //BARK_GGML_METAL_LOG_INFO("%s: '%s' tensor '%16s', offs = %8ld\n", __func__, ctx->buffers[i].name, t->name, *offs); return ctx->buffers[i].metal; } } - GGML_METAL_LOG_ERROR("%s: error: buffer is nil\n", __func__); + BARK_GGML_METAL_LOG_ERROR("%s: error: buffer is nil\n", __func__); return nil; } -bool ggml_metal_add_buffer( - struct ggml_metal_context * ctx, +bool bark_ggml_metal_add_buffer( + struct bark_ggml_metal_context * ctx, const char * name, void * data, size_t size, size_t max_size) { - if (ctx->n_buffers >= GGML_METAL_MAX_BUFFERS) { - GGML_METAL_LOG_ERROR("%s: error: too many buffers\n", __func__); + if (ctx->n_buffers >= BARK_GGML_METAL_MAX_BUFFERS) { + BARK_GGML_METAL_LOG_ERROR("%s: error: too many buffers\n", __func__); return false; } @@ -474,7 +474,7 @@ bool ggml_metal_add_buffer( const int64_t ioffs = (int64_t) data - (int64_t) ctx->buffers[i].data; if (ioffs >= 0 && ioffs < (int64_t) ctx->buffers[i].size) { - GGML_METAL_LOG_ERROR("%s: error: buffer '%s' overlaps with '%s'\n", __func__, name, ctx->buffers[i].name); + BARK_GGML_METAL_LOG_ERROR("%s: error: buffer '%s' overlaps with '%s'\n", __func__, name, ctx->buffers[i].name); return false; } } @@ -495,11 +495,11 @@ bool ggml_metal_add_buffer( ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil]; if (ctx->buffers[ctx->n_buffers].metal == nil) { - GGML_METAL_LOG_ERROR("%s: error: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_aligned / 1024.0 / 1024.0); + BARK_GGML_METAL_LOG_ERROR("%s: error: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_aligned / 1024.0 / 1024.0); return false; } - GGML_METAL_LOG_INFO("%s: allocated '%-16s' buffer, size = %8.2f MB", __func__, name, size_aligned / 1024.0 / 1024.0); + BARK_GGML_METAL_LOG_INFO("%s: allocated '%-16s' buffer, size = %8.2f MB", __func__, name, size_aligned / 1024.0 / 1024.0); ++ctx->n_buffers; } else { @@ -519,13 +519,13 @@ bool ggml_metal_add_buffer( ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil]; if (ctx->buffers[ctx->n_buffers].metal == nil) { - GGML_METAL_LOG_ERROR("%s: error: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_step_aligned / 1024.0 / 1024.0); + BARK_GGML_METAL_LOG_ERROR("%s: error: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_step_aligned / 1024.0 / 1024.0); return false; } - GGML_METAL_LOG_INFO("%s: allocated '%-16s' buffer, size = %8.2f MB, offs = %12ld", __func__, name, size_step_aligned / 1024.0 / 1024.0, i); + BARK_GGML_METAL_LOG_INFO("%s: allocated '%-16s' buffer, size = %8.2f MB, offs = %12ld", __func__, name, size_step_aligned / 1024.0 / 1024.0, i); if (i + size_step < size) { - GGML_METAL_LOG_INFO("\n"); + BARK_GGML_METAL_LOG_INFO("\n"); } ++ctx->n_buffers; @@ -533,48 +533,48 @@ bool ggml_metal_add_buffer( } #if TARGET_OS_OSX - GGML_METAL_LOG_INFO(", (%8.2f / %8.2f)", + BARK_GGML_METAL_LOG_INFO(", (%8.2f / %8.2f)", ctx->device.currentAllocatedSize / 1024.0 / 1024.0, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); if (ctx->device.currentAllocatedSize > ctx->device.recommendedMaxWorkingSetSize) { - GGML_METAL_LOG_WARN(", warning: current allocated size is greater than the recommended max working set size\n", __func__); + BARK_GGML_METAL_LOG_WARN(", warning: current allocated size is greater than the recommended max working set size\n", __func__); } else { - GGML_METAL_LOG_INFO("\n"); + BARK_GGML_METAL_LOG_INFO("\n"); } #else - GGML_METAL_LOG_INFO(", (%8.2f)\n", ctx->device.currentAllocatedSize / 1024.0 / 1024.0); + BARK_GGML_METAL_LOG_INFO(", (%8.2f)\n", ctx->device.currentAllocatedSize / 1024.0 / 1024.0); #endif } return true; } -void ggml_metal_set_tensor( - struct ggml_metal_context * ctx, - struct ggml_tensor * t) { +void bark_ggml_metal_set_tensor( + struct bark_ggml_metal_context * ctx, + struct bark_ggml_tensor * t) { size_t offs; - id id_dst = ggml_metal_get_buffer(ctx, t, &offs); + id id_dst = bark_ggml_metal_get_buffer(ctx, t, &offs); - memcpy((void *) ((uint8_t *) id_dst.contents + offs), t->data, ggml_nbytes(t)); + memcpy((void *) ((uint8_t *) id_dst.contents + offs), t->data, bark_ggml_nbytes(t)); } -void ggml_metal_get_tensor( - struct ggml_metal_context * ctx, - struct ggml_tensor * t) { +void bark_ggml_metal_get_tensor( + struct bark_ggml_metal_context * ctx, + struct bark_ggml_tensor * t) { size_t offs; - id id_src = ggml_metal_get_buffer(ctx, t, &offs); + id id_src = bark_ggml_metal_get_buffer(ctx, t, &offs); - memcpy(t->data, (void *) ((uint8_t *) id_src.contents + offs), ggml_nbytes(t)); + memcpy(t->data, (void *) ((uint8_t *) id_src.contents + offs), bark_ggml_nbytes(t)); } -void ggml_metal_graph_find_concurrency( - struct ggml_metal_context * ctx, - struct ggml_cgraph * gf, bool check_mem) { +void bark_ggml_metal_graph_find_concurrency( + struct bark_ggml_metal_context * ctx, + struct bark_ggml_cgraph * gf, bool check_mem) { int search_depth = gf->n_nodes; //we only find concurrency in this range to avoid wasting too much time - int nodes_unused[GGML_MAX_CONCUR]; + int nodes_unused[BARK_GGML_MAX_CONCUR]; - for (int i = 0; i < GGML_MAX_CONCUR; i++) { ctx->concur_list[i] = 0; } + for (int i = 0; i < BARK_GGML_MAX_CONCUR; i++) { ctx->concur_list[i] = 0; } for (int i = 0; i < gf->n_nodes; i++) { nodes_unused[i] = 1; } ctx->concur_list_len = 0; @@ -591,12 +591,12 @@ void ggml_metal_graph_find_concurrency( int exe_flag = 1; // scan all srcs - for (int src_ind = 0; src_ind < GGML_MAX_SRC; src_ind++) { - struct ggml_tensor * src_cur = gf->nodes[i]->src[src_ind]; + for (int src_ind = 0; src_ind < BARK_GGML_MAX_SRC; src_ind++) { + struct bark_ggml_tensor * src_cur = gf->nodes[i]->src[src_ind]; if (src_cur) { // if is leaf nodes it's satisfied. - // TODO: ggml_is_leaf() - if (src_cur->op == GGML_OP_NONE && src_cur->grad == NULL) { + // TODO: bark_ggml_is_leaf() + if (src_cur->op == BARK_GGML_OP_NONE && src_cur->grad == NULL) { continue; } @@ -621,14 +621,14 @@ void ggml_metal_graph_find_concurrency( // check if nodes[i]'s data will be overwritten by a node before nodes[i]. // if node[5] and node[3] write to the same memory region, then we can't issue node[5] before node[3] int64_t data_start = (int64_t) gf->nodes[i]->data; - int64_t length = (int64_t) ggml_nbytes(gf->nodes[i]); + int64_t length = (int64_t) bark_ggml_nbytes(gf->nodes[i]); for (int j = n_start; j < i; j++) { - if (nodes_unused[j] && gf->nodes[j]->op != GGML_OP_RESHAPE \ - && gf->nodes[j]->op != GGML_OP_VIEW \ - && gf->nodes[j]->op != GGML_OP_TRANSPOSE \ - && gf->nodes[j]->op != GGML_OP_PERMUTE) { + if (nodes_unused[j] && gf->nodes[j]->op != BARK_GGML_OP_RESHAPE \ + && gf->nodes[j]->op != BARK_GGML_OP_VIEW \ + && gf->nodes[j]->op != BARK_GGML_OP_TRANSPOSE \ + && gf->nodes[j]->op != BARK_GGML_OP_PERMUTE) { if (((int64_t)gf->nodes[j]->data) >= data_start + length || \ - ((int64_t)gf->nodes[j]->data) + (int64_t) ggml_nbytes(gf->nodes[j]) <= data_start) { + ((int64_t)gf->nodes[j]->data) + (int64_t) bark_ggml_nbytes(gf->nodes[j]) <= data_start) { continue; } @@ -655,21 +655,21 @@ void ggml_metal_graph_find_concurrency( level_pos += concurrency + 1; } - if (ctx->concur_list_len > GGML_MAX_CONCUR) { - GGML_METAL_LOG_WARN("%s: too many elements for metal ctx->concur_list!\n", __func__); + if (ctx->concur_list_len > BARK_GGML_MAX_CONCUR) { + BARK_GGML_METAL_LOG_WARN("%s: too many elements for metal ctx->concur_list!\n", __func__); } } -void ggml_metal_graph_compute( - struct ggml_metal_context * ctx, - struct ggml_cgraph * gf) { +void bark_ggml_metal_graph_compute( + struct bark_ggml_metal_context * ctx, + struct bark_ggml_cgraph * gf) { @autoreleasepool { // if there is ctx->concur_list, dispatch concurrently // else fallback to serial dispatch MTLComputePassDescriptor * edesc = MTLComputePassDescriptor.computePassDescriptor; - const bool has_concur = ctx->concur_list_len && ctx->concur_list_len <= GGML_MAX_CONCUR; + const bool has_concur = ctx->concur_list_len && ctx->concur_list_len <= BARK_GGML_MAX_CONCUR; const int n_nodes = has_concur ? ctx->concur_list_len : gf->n_nodes; edesc.dispatchType = has_concur ? MTLDispatchTypeConcurrent : MTLDispatchTypeSerial; @@ -710,11 +710,11 @@ void ggml_metal_graph_compute( continue; } - //GGML_METAL_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op)); + //BARK_GGML_METAL_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, i, bark_ggml_op_name(gf->nodes[i]->op)); - struct ggml_tensor * src0 = gf->nodes[i]->src[0]; - struct ggml_tensor * src1 = gf->nodes[i]->src[1]; - struct ggml_tensor * dst = gf->nodes[i]; + struct bark_ggml_tensor * src0 = gf->nodes[i]->src[0]; + struct bark_ggml_tensor * src1 = gf->nodes[i]->src[1]; + struct bark_ggml_tensor * dst = gf->nodes[i]; const int64_t ne00 = src0 ? src0->ne[0] : 0; const int64_t ne01 = src0 ? src0->ne[1] : 0; @@ -746,38 +746,38 @@ void ggml_metal_graph_compute( const uint64_t nb2 = dst ? dst->nb[2] : 0; const uint64_t nb3 = dst ? dst->nb[3] : 0; - const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT; - const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT; - const enum ggml_type dstt = dst ? dst->type : GGML_TYPE_COUNT; + const enum bark_ggml_type src0t = src0 ? src0->type : BARK_GGML_TYPE_COUNT; + const enum bark_ggml_type src1t = src1 ? src1->type : BARK_GGML_TYPE_COUNT; + const enum bark_ggml_type dstt = dst ? dst->type : BARK_GGML_TYPE_COUNT; - id id_src0 = src0 ? ggml_metal_get_buffer(ctx, src0, &offs_src0) : nil; - id id_src1 = src1 ? ggml_metal_get_buffer(ctx, src1, &offs_src1) : nil; - id id_dst = dst ? ggml_metal_get_buffer(ctx, dst, &offs_dst) : nil; + id id_src0 = src0 ? bark_ggml_metal_get_buffer(ctx, src0, &offs_src0) : nil; + id id_src1 = src1 ? bark_ggml_metal_get_buffer(ctx, src1, &offs_src1) : nil; + id id_dst = dst ? bark_ggml_metal_get_buffer(ctx, dst, &offs_dst) : nil; - //GGML_METAL_LOG_INFO("%s: op - %s\n", __func__, ggml_op_name(dst->op)); + //BARK_GGML_METAL_LOG_INFO("%s: op - %s\n", __func__, bark_ggml_op_name(dst->op)); //if (src0) { - // GGML_METAL_LOG_INFO("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02, - // ggml_is_contiguous(src0), src0->name); + // BARK_GGML_METAL_LOG_INFO("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, bark_ggml_type_name(src0t), ne00, ne01, ne02, + // bark_ggml_is_contiguous(src0), src0->name); //} //if (src1) { - // GGML_METAL_LOG_INFO("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12, - // ggml_is_contiguous(src1), src1->name); + // BARK_GGML_METAL_LOG_INFO("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, bark_ggml_type_name(src1t), ne10, ne11, ne12, + // bark_ggml_is_contiguous(src1), src1->name); //} //if (dst) { - // GGML_METAL_LOG_INFO("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2, + // BARK_GGML_METAL_LOG_INFO("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, bark_ggml_type_name(dstt), ne0, ne1, ne2, // dst->name); //} switch (dst->op) { - case GGML_OP_NONE: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_TRANSPOSE: - case GGML_OP_PERMUTE: + case BARK_GGML_OP_NONE: + case BARK_GGML_OP_RESHAPE: + case BARK_GGML_OP_VIEW: + case BARK_GGML_OP_TRANSPOSE: + case BARK_GGML_OP_PERMUTE: { // noop } break; - case GGML_OP_CONCAT: + case BARK_GGML_OP_CONCAT: { int64_t nb = ne00; @@ -814,18 +814,18 @@ void ggml_metal_graph_compute( const int nth = MIN(1024, ne0); [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; - case GGML_OP_ADD: + case BARK_GGML_OP_ADD: { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(src1)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src1)); bool bcast_row = false; int64_t nb = ne00; - if (ggml_nelements(src1) == ne10 && ne00 % 4 == 0) { + if (bark_ggml_nelements(src1) == ne10 && ne00 % 4 == 0) { // src1 is a row - GGML_ASSERT(ne11 == 1); + BARK_GGML_ASSERT(ne11 == 1); nb = ne00 / 4; [encoder setComputePipelineState:ctx->pipeline_add_row]; @@ -864,7 +864,7 @@ void ggml_metal_graph_compute( [encoder setBytes:&nb length:sizeof(nb) atIndex:27]; if (bcast_row) { - const int64_t n = ggml_nelements(dst)/4; + const int64_t n = bark_ggml_nelements(dst)/4; [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } else { @@ -873,18 +873,18 @@ void ggml_metal_graph_compute( [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } } break; - case GGML_OP_MUL: + case BARK_GGML_OP_MUL: { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(src1)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src1)); // utilize float4 - GGML_ASSERT(ne00 % 4 == 0); + BARK_GGML_ASSERT(ne00 % 4 == 0); const int64_t nb = ne00/4; - if (ggml_nelements(src1) == ne10) { + if (bark_ggml_nelements(src1) == ne10) { // src1 is a row - GGML_ASSERT(ne11 == 1); + BARK_GGML_ASSERT(ne11 == 1); [encoder setComputePipelineState:ctx->pipeline_mul_row]; } else { [encoder setComputePipelineState:ctx->pipeline_mul]; @@ -894,13 +894,13 @@ void ggml_metal_graph_compute( [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; [encoder setBytes:&nb length:sizeof(nb) atIndex:3]; - const int64_t n = ggml_nelements(dst)/4; + const int64_t n = bark_ggml_nelements(dst)/4; [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; - case GGML_OP_SCALE: + case BARK_GGML_OP_SCALE: { - GGML_ASSERT(ggml_is_contiguous(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src0)); const float scale = *(const float *) src1->data; @@ -909,60 +909,60 @@ void ggml_metal_graph_compute( [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; [encoder setBytes:&scale length:sizeof(scale) atIndex:2]; - const int64_t n = ggml_nelements(dst)/4; + const int64_t n = bark_ggml_nelements(dst)/4; [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; - case GGML_OP_UNARY: - switch (ggml_get_unary_op(gf->nodes[i])) { - case GGML_UNARY_OP_SILU: + case BARK_GGML_OP_UNARY: + switch (bark_ggml_get_unary_op(gf->nodes[i])) { + case BARK_GGML_UNARY_OP_SILU: { [encoder setComputePipelineState:ctx->pipeline_silu]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - const int64_t n = ggml_nelements(dst)/4; + const int64_t n = bark_ggml_nelements(dst)/4; [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; - case GGML_UNARY_OP_RELU: + case BARK_GGML_UNARY_OP_RELU: { [encoder setComputePipelineState:ctx->pipeline_relu]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - const int64_t n = ggml_nelements(dst); + const int64_t n = bark_ggml_nelements(dst); [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; - case GGML_UNARY_OP_GELU: + case BARK_GGML_UNARY_OP_GELU: { [encoder setComputePipelineState:ctx->pipeline_gelu]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - const int64_t n = ggml_nelements(dst)/4; + const int64_t n = bark_ggml_nelements(dst)/4; [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; default: { - GGML_METAL_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op)); - GGML_ASSERT(false); + BARK_GGML_METAL_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, i, bark_ggml_op_name(dst->op)); + BARK_GGML_ASSERT(false); } } break; - case GGML_OP_SQR: + case BARK_GGML_OP_SQR: { - GGML_ASSERT(ggml_is_contiguous(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src0)); [encoder setComputePipelineState:ctx->pipeline_sqr]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; - const int64_t n = ggml_nelements(dst); + const int64_t n = bark_ggml_nelements(dst); [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; - case GGML_OP_SOFT_MAX: + case BARK_GGML_OP_SOFT_MAX: { const int nth = MIN(32, ne00); @@ -979,7 +979,7 @@ void ggml_metal_graph_compute( [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; - case GGML_OP_DIAG_MASK_INF: + case BARK_GGML_OP_DIAG_MASK_INF: { const int n_past = ((int32_t *)(dst->op_params))[0]; @@ -1001,10 +1001,10 @@ void ggml_metal_graph_compute( [encoder dispatchThreadgroups:MTLSizeMake(ne00, ne01, ne02) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } } break; - case GGML_OP_MUL_MAT: + case BARK_GGML_OP_MUL_MAT: { - GGML_ASSERT(ne00 == ne10); - GGML_ASSERT(ne03 == ne13); + BARK_GGML_ASSERT(ne00 == ne10); + BARK_GGML_ASSERT(ne03 == ne13); const uint gqa = ne12/ne02; @@ -1018,17 +1018,17 @@ void ggml_metal_graph_compute( // TODO: need to find a better approach if ([ctx->device.name isEqualToString:@"Apple M2 Ultra"]) { switch (src0t) { - case GGML_TYPE_F16: ne11_mm_min = 2; break; - case GGML_TYPE_Q8_0: ne11_mm_min = 7; break; - case GGML_TYPE_Q2_K: ne11_mm_min = 15; break; - case GGML_TYPE_Q3_K: ne11_mm_min = 7; break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: ne11_mm_min = 15; break; - case GGML_TYPE_Q4_K: ne11_mm_min = 11; break; - case GGML_TYPE_Q5_0: // not tested yet - case GGML_TYPE_Q5_1: ne11_mm_min = 13; break; // not tested yet - case GGML_TYPE_Q5_K: ne11_mm_min = 7; break; - case GGML_TYPE_Q6_K: ne11_mm_min = 7; break; + case BARK_GGML_TYPE_F16: ne11_mm_min = 2; break; + case BARK_GGML_TYPE_Q8_0: ne11_mm_min = 7; break; + case BARK_GGML_TYPE_Q2_K: ne11_mm_min = 15; break; + case BARK_GGML_TYPE_Q3_K: ne11_mm_min = 7; break; + case BARK_GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_1: ne11_mm_min = 15; break; + case BARK_GGML_TYPE_Q4_K: ne11_mm_min = 11; break; + case BARK_GGML_TYPE_Q5_0: // not tested yet + case BARK_GGML_TYPE_Q5_1: ne11_mm_min = 13; break; // not tested yet + case BARK_GGML_TYPE_Q5_K: ne11_mm_min = 7; break; + case BARK_GGML_TYPE_Q6_K: ne11_mm_min = 7; break; default: ne11_mm_min = 1; break; } } @@ -1037,24 +1037,24 @@ void ggml_metal_graph_compute( // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel if ([ctx->device supportsFamily:MTLGPUFamilyApple7] && - !ggml_is_transposed(src0) && - !ggml_is_transposed(src1) && - src1t == GGML_TYPE_F32 && + !bark_ggml_is_transposed(src0) && + !bark_ggml_is_transposed(src1) && + src1t == BARK_GGML_TYPE_F32 && ne00 % 32 == 0 && ne11 > ne11_mm_min) { //printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12); switch (src0->type) { - case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f32_f32]; break; - case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f16_f32]; break; - case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_0_f32]; break; - case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_1_f32]; break; - case GGML_TYPE_Q8_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q8_0_f32]; break; - case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q2_K_f32]; break; - case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q3_K_f32]; break; - case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_K_f32]; break; - case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q5_K_f32]; break; - case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q6_K_f32]; break; - default: GGML_ASSERT(false && "MUL MAT-MAT not implemented"); + case BARK_GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f32_f32]; break; + case BARK_GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f16_f32]; break; + case BARK_GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_0_f32]; break; + case BARK_GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_1_f32]; break; + case BARK_GGML_TYPE_Q8_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q8_0_f32]; break; + case BARK_GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q2_K_f32]; break; + case BARK_GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q3_K_f32]; break; + case BARK_GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_K_f32]; break; + case BARK_GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q5_K_f32]; break; + case BARK_GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q6_K_f32]; break; + default: BARK_GGML_ASSERT(false && "MUL MAT-MAT not implemented"); } [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; @@ -1080,12 +1080,12 @@ void ggml_metal_graph_compute( // use custom matrix x vector kernel switch (src0t) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { [encoder setComputePipelineState:ctx->pipeline_mul_mv_f32_f32]; nrows = 4; } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { nth0 = 32; nth1 = 1; @@ -1099,73 +1099,73 @@ void ggml_metal_graph_compute( nrows = 4; } } break; - case GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_0: { - GGML_ASSERT(ne02 == 1); - GGML_ASSERT(ne12 == 1); + BARK_GGML_ASSERT(ne02 == 1); + BARK_GGML_ASSERT(ne12 == 1); nth0 = 8; nth1 = 8; [encoder setComputePipelineState:ctx->pipeline_mul_mv_q4_0_f32]; } break; - case GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q4_1: { - GGML_ASSERT(ne02 == 1); - GGML_ASSERT(ne12 == 1); + BARK_GGML_ASSERT(ne02 == 1); + BARK_GGML_ASSERT(ne12 == 1); nth0 = 8; nth1 = 8; [encoder setComputePipelineState:ctx->pipeline_mul_mv_q4_1_f32]; } break; - case GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q8_0: { - GGML_ASSERT(ne02 == 1); - GGML_ASSERT(ne12 == 1); + BARK_GGML_ASSERT(ne02 == 1); + BARK_GGML_ASSERT(ne12 == 1); nth0 = 8; nth1 = 8; [encoder setComputePipelineState:ctx->pipeline_mul_mv_q8_0_f32]; } break; - case GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q2_K: { - GGML_ASSERT(ne02 == 1); - GGML_ASSERT(ne12 == 1); + BARK_GGML_ASSERT(ne02 == 1); + BARK_GGML_ASSERT(ne12 == 1); nth0 = 2; nth1 = 32; [encoder setComputePipelineState:ctx->pipeline_mul_mv_q2_K_f32]; } break; - case GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q3_K: { - GGML_ASSERT(ne02 == 1); - GGML_ASSERT(ne12 == 1); + BARK_GGML_ASSERT(ne02 == 1); + BARK_GGML_ASSERT(ne12 == 1); nth0 = 2; nth1 = 32; [encoder setComputePipelineState:ctx->pipeline_mul_mv_q3_K_f32]; } break; - case GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q4_K: { - GGML_ASSERT(ne02 == 1); - GGML_ASSERT(ne12 == 1); + BARK_GGML_ASSERT(ne02 == 1); + BARK_GGML_ASSERT(ne12 == 1); nth0 = 4; //1; nth1 = 8; //32; [encoder setComputePipelineState:ctx->pipeline_mul_mv_q4_K_f32]; } break; - case GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q5_K: { - GGML_ASSERT(ne02 == 1); - GGML_ASSERT(ne12 == 1); + BARK_GGML_ASSERT(ne02 == 1); + BARK_GGML_ASSERT(ne12 == 1); nth0 = 2; nth1 = 32; [encoder setComputePipelineState:ctx->pipeline_mul_mv_q5_K_f32]; } break; - case GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q6_K: { - GGML_ASSERT(ne02 == 1); - GGML_ASSERT(ne12 == 1); + BARK_GGML_ASSERT(ne02 == 1); + BARK_GGML_ASSERT(ne12 == 1); nth0 = 2; nth1 = 32; @@ -1173,8 +1173,8 @@ void ggml_metal_graph_compute( } break; default: { - GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src0t); - GGML_ASSERT(false && "not implemented"); + BARK_GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src0t); + BARK_GGML_ASSERT(false && "not implemented"); } }; @@ -1197,24 +1197,24 @@ void ggml_metal_graph_compute( [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:16]; [encoder setBytes:&gqa length:sizeof(gqa) atIndex:17]; - if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q8_0 || - src0t == GGML_TYPE_Q2_K) { // || src0t == GGML_TYPE_Q4_K) { + if (src0t == BARK_GGML_TYPE_Q4_0 || src0t == BARK_GGML_TYPE_Q4_1 || src0t == BARK_GGML_TYPE_Q8_0 || + src0t == BARK_GGML_TYPE_Q2_K) { // || src0t == BARK_GGML_TYPE_Q4_K) { [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } - else if (src0t == GGML_TYPE_Q4_K) { + else if (src0t == BARK_GGML_TYPE_Q4_K) { [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } - else if (src0t == GGML_TYPE_Q3_K) { -#ifdef GGML_QKK_64 + else if (src0t == BARK_GGML_TYPE_Q3_K) { +#ifdef BARK_GGML_QKK_64 [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; #else [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; #endif } - else if (src0t == GGML_TYPE_Q5_K) { + else if (src0t == BARK_GGML_TYPE_Q5_K) { [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } - else if (src0t == GGML_TYPE_Q6_K) { + else if (src0t == BARK_GGML_TYPE_Q6_K) { [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; } else { int64_t ny = (ne11 + nrows - 1)/nrows; @@ -1222,20 +1222,20 @@ void ggml_metal_graph_compute( } } } break; - case GGML_OP_GET_ROWS: + case BARK_GGML_OP_GET_ROWS: { switch (src0->type) { - case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_get_rows_f32]; break; - case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_get_rows_f16]; break; - case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_0]; break; - case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_1]; break; - case GGML_TYPE_Q8_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q8_0]; break; - case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q2_K]; break; - case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q3_K]; break; - case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_K]; break; - case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q5_K]; break; - case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q6_K]; break; - default: GGML_ASSERT(false && "not implemented"); + case BARK_GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_get_rows_f32]; break; + case BARK_GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_get_rows_f16]; break; + case BARK_GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_0]; break; + case BARK_GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_1]; break; + case BARK_GGML_TYPE_Q8_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q8_0]; break; + case BARK_GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q2_K]; break; + case BARK_GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q3_K]; break; + case BARK_GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_K]; break; + case BARK_GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q5_K]; break; + case BARK_GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q6_K]; break; + default: BARK_GGML_ASSERT(false && "not implemented"); } [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; @@ -1245,11 +1245,11 @@ void ggml_metal_graph_compute( [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:4]; [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:5]; - const int64_t n = ggml_nelements(src1); + const int64_t n = bark_ggml_nelements(src1); [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; - case GGML_OP_RMS_NORM: + case BARK_GGML_OP_RMS_NORM: { float eps; memcpy(&eps, dst->op_params, sizeof(float)); @@ -1264,11 +1264,11 @@ void ggml_metal_graph_compute( [encoder setBytes:&eps length:sizeof( float) atIndex:4]; [encoder setThreadgroupMemoryLength:nth/32*sizeof(float) atIndex:0]; - const int64_t nrows = ggml_nrows(src0); + const int64_t nrows = bark_ggml_nrows(src0); [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; - case GGML_OP_NORM: + case BARK_GGML_OP_NORM: { float eps; memcpy(&eps, dst->op_params, sizeof(float)); @@ -1283,13 +1283,13 @@ void ggml_metal_graph_compute( [encoder setBytes:&eps length:sizeof( float) atIndex:4]; [encoder setThreadgroupMemoryLength:nth*sizeof(float) atIndex:0]; - const int64_t nrows = ggml_nrows(src0); + const int64_t nrows = bark_ggml_nrows(src0); [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; - case GGML_OP_ALIBI: + case BARK_GGML_OP_ALIBI: { - GGML_ASSERT((src0t == GGML_TYPE_F32)); + BARK_GGML_ASSERT((src0t == BARK_GGML_TYPE_F32)); const int nth = MIN(1024, ne00); @@ -1327,9 +1327,9 @@ void ggml_metal_graph_compute( [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; - case GGML_OP_ROPE: + case BARK_GGML_OP_ROPE: { - GGML_ASSERT(ne10 == ne02); + BARK_GGML_ASSERT(ne10 == ne02); const int nth = MIN(1024, ne00); @@ -1343,9 +1343,9 @@ void ggml_metal_graph_compute( memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float)); switch (src0->type) { - case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_rope_f32]; break; - case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_rope_f16]; break; - default: GGML_ASSERT(false); + case BARK_GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_rope_f32]; break; + case BARK_GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_rope_f16]; break; + default: BARK_GGML_ASSERT(false); }; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; @@ -1375,30 +1375,30 @@ void ggml_metal_graph_compute( [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; - case GGML_OP_DUP: - case GGML_OP_CPY: - case GGML_OP_CONT: + case BARK_GGML_OP_DUP: + case BARK_GGML_OP_CPY: + case BARK_GGML_OP_CONT: { const int nth = MIN(1024, ne00); switch (src0t) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { switch (dstt) { - case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_cpy_f32_f16]; break; - case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_cpy_f32_f32]; break; - default: GGML_ASSERT(false && "not implemented"); + case BARK_GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_cpy_f32_f16]; break; + case BARK_GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_cpy_f32_f32]; break; + default: BARK_GGML_ASSERT(false && "not implemented"); }; } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { switch (dstt) { - case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_cpy_f16_f16]; break; - case GGML_TYPE_F32: GGML_ASSERT(false && "cpy_f16_f32 not implemented"); break; - default: GGML_ASSERT(false && "not implemented"); + case BARK_GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_cpy_f16_f16]; break; + case BARK_GGML_TYPE_F32: BARK_GGML_ASSERT(false && "cpy_f16_f32 not implemented"); break; + default: BARK_GGML_ASSERT(false && "not implemented"); }; } break; - default: GGML_ASSERT(false && "not implemented"); + default: BARK_GGML_ASSERT(false && "not implemented"); } [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; @@ -1424,8 +1424,8 @@ void ggml_metal_graph_compute( } break; default: { - GGML_METAL_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op)); - GGML_ASSERT(false); + BARK_GGML_METAL_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, i, bark_ggml_op_name(dst->op)); + BARK_GGML_ASSERT(false); } } } @@ -1449,8 +1449,8 @@ void ggml_metal_graph_compute( MTLCommandBufferStatus status = (MTLCommandBufferStatus) [ctx->command_buffers[i] status]; if (status != MTLCommandBufferStatusCompleted) { - GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status); - GGML_ASSERT(false); + BARK_GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status); + BARK_GGML_ASSERT(false); } } @@ -1461,122 +1461,122 @@ void ggml_metal_graph_compute( // backend interface -static const char * ggml_backend_metal_name(ggml_backend_t backend) { +static const char * bark_ggml_backend_metal_name(bark_ggml_backend_t backend) { return "Metal"; UNUSED(backend); } -static void ggml_backend_metal_free(ggml_backend_t backend) { - struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context; - ggml_metal_free(ctx); +static void bark_ggml_backend_metal_free(bark_ggml_backend_t backend) { + struct bark_ggml_metal_context * ctx = (struct bark_ggml_metal_context *)backend->context; + bark_ggml_metal_free(ctx); free(backend); } -static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) { +static void * bark_ggml_backend_metal_buffer_get_base(bark_ggml_backend_buffer_t buffer) { return (void *)buffer->context; } -static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) { +static void bark_ggml_backend_metal_buffer_free_buffer(bark_ggml_backend_buffer_t buffer) { free(buffer->context); UNUSED(buffer); } -static struct ggml_backend_buffer_i metal_backend_buffer_i = { - /* .free_buffer = */ ggml_backend_metal_buffer_free_buffer, - /* .get_base = */ ggml_backend_metal_buffer_get_base, - /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes +static struct bark_ggml_backend_buffer_i metal_backend_buffer_i = { + /* .free_buffer = */ bark_ggml_backend_metal_buffer_free_buffer, + /* .get_base = */ bark_ggml_backend_metal_buffer_get_base, + /* .get_alloc_size = */ NULL, // defaults to bark_ggml_nbytes /* .init_tensor = */ NULL, // no initialization required /* .free_tensor = */ NULL, // no cleanup required }; -static ggml_backend_buffer_t ggml_backend_metal_alloc_buffer(ggml_backend_t backend, size_t size) { - struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context; +static bark_ggml_backend_buffer_t bark_ggml_backend_metal_alloc_buffer(bark_ggml_backend_t backend, size_t size) { + struct bark_ggml_metal_context * ctx = (struct bark_ggml_metal_context *)backend->context; - void * data = ggml_metal_host_malloc(size); + void * data = bark_ggml_metal_host_malloc(size); // TODO: set proper name of the buffers - ggml_metal_add_buffer(ctx, "backend", data, size, 0); + bark_ggml_metal_add_buffer(ctx, "backend", data, size, 0); - return ggml_backend_buffer_init(backend, metal_backend_buffer_i, data, size); + return bark_ggml_backend_buffer_init(backend, metal_backend_buffer_i, data, size); } -static size_t ggml_backend_metal_get_alignment(ggml_backend_t backend) { +static size_t bark_ggml_backend_metal_get_alignment(bark_ggml_backend_t backend) { return 32; UNUSED(backend); } -static void ggml_backend_metal_set_tensor_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); +static void bark_ggml_backend_metal_set_tensor_async(bark_ggml_backend_t backend, struct bark_ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + BARK_GGML_ASSERT(offset + size <= bark_ggml_nbytes(tensor) && "tensor write out of bounds"); + BARK_GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); memcpy((char *)tensor->data + offset, data, size); UNUSED(backend); } -static void ggml_backend_metal_get_tensor_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { - GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); - GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); +static void bark_ggml_backend_metal_get_tensor_async(bark_ggml_backend_t backend, const struct bark_ggml_tensor * tensor, void * data, size_t offset, size_t size) { + BARK_GGML_ASSERT(offset + size <= bark_ggml_nbytes(tensor) && "tensor read out of bounds"); + BARK_GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); memcpy(data, (const char *)tensor->data + offset, size); UNUSED(backend); } -static void ggml_backend_metal_synchronize(ggml_backend_t backend) { +static void bark_ggml_backend_metal_synchronize(bark_ggml_backend_t backend) { UNUSED(backend); } -static void ggml_backend_metal_cpy_tensor_from(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) { - ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src)); +static void bark_ggml_backend_metal_cpy_tensor_from(bark_ggml_backend_t backend, struct bark_ggml_tensor * src, struct bark_ggml_tensor * dst) { + bark_ggml_backend_tensor_get(src, dst->data, 0, bark_ggml_nbytes(src)); UNUSED(backend); } -static void ggml_backend_metal_cpy_tensor_to(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) { - ggml_backend_tensor_set_async(dst, src->data, 0, ggml_nbytes(src)); +static void bark_ggml_backend_metal_cpy_tensor_to(bark_ggml_backend_t backend, struct bark_ggml_tensor * src, struct bark_ggml_tensor * dst) { + bark_ggml_backend_tensor_set_async(dst, src->data, 0, bark_ggml_nbytes(src)); UNUSED(backend); } -static void ggml_backend_metal_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { - struct ggml_metal_context * metal_ctx = (struct ggml_metal_context *)backend->context; +static void bark_ggml_backend_metal_graph_compute(bark_ggml_backend_t backend, struct bark_ggml_cgraph * cgraph) { + struct bark_ggml_metal_context * metal_ctx = (struct bark_ggml_metal_context *)backend->context; - ggml_metal_graph_compute(metal_ctx, cgraph); + bark_ggml_metal_graph_compute(metal_ctx, cgraph); } -static bool ggml_backend_metal_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) { +static bool bark_ggml_backend_metal_supports_op(bark_ggml_backend_t backend, const struct bark_ggml_tensor * op) { return true; UNUSED(backend); UNUSED(op); } -static struct ggml_backend_i metal_backend_i = { - /* .get_name = */ ggml_backend_metal_name, - /* .free = */ ggml_backend_metal_free, - /* .alloc_buffer = */ ggml_backend_metal_alloc_buffer, - /* .get_alignment = */ ggml_backend_metal_get_alignment, - /* .set_tensor_async = */ ggml_backend_metal_set_tensor_async, - /* .get_tensor_async = */ ggml_backend_metal_get_tensor_async, - /* .synchronize = */ ggml_backend_metal_synchronize, - /* .cpy_tensor_from = */ ggml_backend_metal_cpy_tensor_from, - /* .cpy_tensor_to = */ ggml_backend_metal_cpy_tensor_to, +static struct bark_ggml_backend_i metal_backend_i = { + /* .get_name = */ bark_ggml_backend_metal_name, + /* .free = */ bark_ggml_backend_metal_free, + /* .alloc_buffer = */ bark_ggml_backend_metal_alloc_buffer, + /* .get_alignment = */ bark_ggml_backend_metal_get_alignment, + /* .set_tensor_async = */ bark_ggml_backend_metal_set_tensor_async, + /* .get_tensor_async = */ bark_ggml_backend_metal_get_tensor_async, + /* .synchronize = */ bark_ggml_backend_metal_synchronize, + /* .cpy_tensor_from = */ bark_ggml_backend_metal_cpy_tensor_from, + /* .cpy_tensor_to = */ bark_ggml_backend_metal_cpy_tensor_to, /* .graph_plan_create = */ NULL, // the metal implementation does not require creating graph plans atm /* .graph_plan_free = */ NULL, /* .graph_plan_compute = */ NULL, - /* .graph_compute = */ ggml_backend_metal_graph_compute, - /* .supports_op = */ ggml_backend_metal_supports_op, + /* .graph_compute = */ bark_ggml_backend_metal_graph_compute, + /* .supports_op = */ bark_ggml_backend_metal_supports_op, }; -ggml_backend_t ggml_backend_metal_init(void) { - struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context)); +bark_ggml_backend_t bark_ggml_backend_metal_init(void) { + struct bark_ggml_metal_context * ctx = malloc(sizeof(struct bark_ggml_metal_context)); - ctx = ggml_metal_init(GGML_DEFAULT_N_THREADS); + ctx = bark_ggml_metal_init(BARK_GGML_DEFAULT_N_THREADS); - ggml_backend_t metal_backend = malloc(sizeof(struct ggml_backend)); + bark_ggml_backend_t metal_backend = malloc(sizeof(struct bark_ggml_backend)); - *metal_backend = (struct ggml_backend) { + *metal_backend = (struct bark_ggml_backend) { /* .interface = */ metal_backend_i, /* .context = */ ctx, }; @@ -1584,12 +1584,12 @@ ggml_backend_t ggml_backend_metal_init(void) { return metal_backend; } -bool ggml_backend_is_metal(ggml_backend_t backend) { - return backend->iface.get_name == ggml_backend_metal_name; +bool bark_ggml_backend_is_metal(bark_ggml_backend_t backend) { + return backend->iface.get_name == bark_ggml_backend_metal_name; } -void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) { - struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context; +void bark_ggml_backend_metal_set_n_cb(bark_ggml_backend_t backend, int n_cb) { + struct bark_ggml_metal_context * ctx = (struct bark_ggml_metal_context *)backend->context; - ggml_metal_set_n_cb(ctx, n_cb); + bark_ggml_metal_set_n_cb(ctx, n_cb); } diff --git a/cpp/ggml-opencl.cpp b/cpp/ggml-opencl.cpp index 4a331f2..59d4bd9 100644 --- a/cpp/ggml-opencl.cpp +++ b/cpp/ggml-opencl.cpp @@ -802,7 +802,7 @@ __kernel void KERNEL_NAME(__global TYPE* x, const int x_offset, __global TYPE* y do { \ cl_int err_ = (err); \ if (err_ != CL_SUCCESS) { \ - fprintf(stderr, "ggml_opencl: %s error %d at %s:%d\n", \ + fprintf(stderr, "bark_ggml_opencl: %s error %d at %s:%d\n", \ #err, err_, __FILE__, __LINE__); \ exit(1); \ } \ @@ -812,7 +812,7 @@ __kernel void KERNEL_NAME(__global TYPE* x, const int x_offset, __global TYPE* y do { \ CLBlastStatusCode err_ = (err); \ if (err_ != CLBlastSuccess) { \ - fprintf(stderr, "ggml_opencl: %s error %d at %s:%d\n", \ + fprintf(stderr, "bark_ggml_opencl: %s error %d at %s:%d\n", \ #err, err_, __FILE__, __LINE__); \ exit(1); \ } \ @@ -920,7 +920,7 @@ static cl_program build_program_from_source(cl_context ctx, cl_device_id dev, co program_log = (char*) malloc(log_size + 1); program_log[log_size] = '\0'; clGetProgramBuildInfo(p, dev, CL_PROGRAM_BUILD_LOG, log_size + 1, program_log, NULL); - fprintf(stderr, "ggml_opencl: kernel compile error:\n\n%s\n", program_log); + fprintf(stderr, "bark_ggml_opencl: kernel compile error:\n\n%s\n", program_log); free(program_log); exit(1); } @@ -928,7 +928,7 @@ static cl_program build_program_from_source(cl_context ctx, cl_device_id dev, co return p; } -void ggml_cl_init(void) { +void bark_ggml_cl_init(void) { cl_int err; struct cl_device; @@ -1000,12 +1000,12 @@ void ggml_cl_init(void) { } if (n_devices == 0) { - fprintf(stderr, "ggml_opencl: could find any OpenCL devices.\n"); + fprintf(stderr, "bark_ggml_opencl: could find any OpenCL devices.\n"); exit(1); } - char * user_platform_string = getenv("GGML_OPENCL_PLATFORM"); - char * user_device_string = getenv("GGML_OPENCL_DEVICE"); + char * user_platform_string = getenv("BARK_GGML_OPENCL_PLATFORM"); + char * user_device_string = getenv("BARK_GGML_OPENCL_DEVICE"); int user_platform_number = -1; int user_device_number = -1; @@ -1019,7 +1019,7 @@ void ggml_cl_init(void) { if (user_platform_number != -1 && user_device_number != -1) { cl_platform* platform = &platforms[user_platform_number]; if ((unsigned)user_device_number >= platform->n_devices) { - fprintf(stderr, "ggml_opencl: invalid device number %d\n", user_device_number); + fprintf(stderr, "bark_ggml_opencl: invalid device number %d\n", user_device_number); exit(1); } default_device = &platform->devices[user_device_number]; @@ -1038,7 +1038,7 @@ void ggml_cl_init(void) { } } if (user_platform_number == -1) { - fprintf(stderr, "ggml_opencl: no platform matching '%s' was found.\n", user_platform_string); + fprintf(stderr, "bark_ggml_opencl: no platform matching '%s' was found.\n", user_platform_string); exit(1); } } @@ -1048,7 +1048,7 @@ void ggml_cl_init(void) { n_selected_devices = p->n_devices; default_device = p->default_device; if (n_selected_devices == 0) { - fprintf(stderr, "ggml_opencl: selected platform '%s' does not have any devices.\n", p->name); + fprintf(stderr, "bark_ggml_opencl: selected platform '%s' does not have any devices.\n", p->name); exit(1); } } @@ -1062,7 +1062,7 @@ void ggml_cl_init(void) { } } if (user_device_number == -1) { - fprintf(stderr, "ggml_opencl: no device matching '%s' was found.\n", user_device_string); + fprintf(stderr, "bark_ggml_opencl: no device matching '%s' was found.\n", user_device_string); exit(1); } } @@ -1072,17 +1072,17 @@ void ggml_cl_init(void) { default_device = &selected_devices[0]; } - GGML_ASSERT(n_selected_devices > 0); + BARK_GGML_ASSERT(n_selected_devices > 0); if (default_device == NULL) { default_device = &selected_devices[0]; } } - fprintf(stderr, "ggml_opencl: selecting platform: '%s'\n", default_device->platform->name); - fprintf(stderr, "ggml_opencl: selecting device: '%s'\n", default_device->name); + fprintf(stderr, "bark_ggml_opencl: selecting platform: '%s'\n", default_device->platform->name); + fprintf(stderr, "bark_ggml_opencl: selecting device: '%s'\n", default_device->name); if (default_device->type != CL_DEVICE_TYPE_GPU) { - fprintf(stderr, "ggml_opencl: warning, not a GPU: '%s'.\n", default_device->name); + fprintf(stderr, "bark_ggml_opencl: warning, not a GPU: '%s'.\n", default_device->name); } platform = default_device->platform->id; @@ -1095,7 +1095,7 @@ void ggml_cl_init(void) { ext_buffer[ext_str_size] = '\0'; // ensure it is null terminated // Check if ext_buffer contains cl_khr_fp16 fp16_support = strstr(ext_buffer, "cl_khr_fp16") != NULL; - fprintf(stderr, "ggml_opencl: device FP16 support: %s\n", fp16_support ? "true" : "false"); + fprintf(stderr, "bark_ggml_opencl: device FP16 support: %s\n", fp16_support ? "true" : "false"); cl_context_properties properties[] = { (intptr_t)CL_CONTEXT_PLATFORM, (intptr_t)platform, 0 @@ -1145,102 +1145,102 @@ void ggml_cl_init(void) { CL_CHECK((mul_f32_cl = clCreateKernel(program, "mul_f32", &err), err)); } -static cl_kernel* ggml_get_to_fp32_cl(ggml_type type) { +static cl_kernel* bark_ggml_get_to_fp32_cl(bark_ggml_type type) { switch (type) { - case GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_0: return &dequantize_row_q4_0_cl; - case GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q4_1: return &dequantize_row_q4_1_cl; - case GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_0: return &dequantize_row_q5_0_cl; - case GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q5_1: return &dequantize_row_q5_1_cl; - case GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q8_0: return &dequantize_row_q8_0_cl; - case GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q2_K: return &dequantize_block_q2_k_cl; - case GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q3_K: return &dequantize_block_q3_k_cl; - case GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q4_K: return &dequantize_block_q4_k_cl; - case GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q5_K: return &dequantize_block_q5_k_cl; - case GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q6_K: return &dequantize_block_q6_k_cl; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: return &convert_row_f16_cl; default: return nullptr; } } -static size_t ggml_cl_global_denom(ggml_type type) { +static size_t bark_ggml_cl_global_denom(bark_ggml_type type) { switch (type) { - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q8_0: return 1; - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q3_K: return 4; - case GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q4_K: return 8; - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q6_K: return 4; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: default: return 1; } } -static size_t ggml_cl_local_size(ggml_type type) { +static size_t bark_ggml_cl_local_size(bark_ggml_type type) { switch (type) { - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q8_0: return 0; - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q3_K: return 64; - case GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q4_K: return 32; - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q6_K: return 64; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: default: return 0; } } -static cl_kernel* ggml_get_dequantize_mul_mat_vec_cl(ggml_type type) { +static cl_kernel* bark_ggml_get_dequantize_mul_mat_vec_cl(bark_ggml_type type) { switch (type) { - case GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_0: return &dequantize_mul_mat_vec_q4_0_cl; - case GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q4_1: return &dequantize_mul_mat_vec_q4_1_cl; - case GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_0: return &dequantize_mul_mat_vec_q5_0_cl; - case GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q5_1: return &dequantize_mul_mat_vec_q5_1_cl; - case GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q8_0: return &dequantize_mul_mat_vec_q8_0_cl; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: return &convert_mul_mat_vec_f16_cl; - case GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q2_K: return &dequantize_mul_mat_vec_q2_K_cl; - case GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q3_K: return &dequantize_mul_mat_vec_q3_K_cl; - case GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q4_K: return &dequantize_mul_mat_vec_q4_K_cl; - case GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q5_K: return &dequantize_mul_mat_vec_q5_K_cl; - case GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q6_K: return &dequantize_mul_mat_vec_q6_K_cl; default: return nullptr; @@ -1272,7 +1272,7 @@ struct cl_buffer { static cl_buffer g_cl_buffer_pool[MAX_CL_BUFFERS]; static std::atomic_flag g_cl_pool_lock = ATOMIC_FLAG_INIT; -static cl_mem ggml_cl_pool_malloc(size_t size, size_t * actual_size) { +static cl_mem bark_ggml_cl_pool_malloc(size_t size, size_t * actual_size) { scoped_spin_lock lock(g_cl_pool_lock); cl_int err; @@ -1314,7 +1314,7 @@ static cl_mem ggml_cl_pool_malloc(size_t size, size_t * actual_size) { return mem; } -static void ggml_cl_pool_free(cl_mem mem, size_t size) { +static void bark_ggml_cl_pool_free(cl_mem mem, size_t size) { scoped_spin_lock lock(g_cl_pool_lock); for (int i = 0; i < MAX_CL_BUFFERS; ++i) { @@ -1329,8 +1329,8 @@ static void ggml_cl_pool_free(cl_mem mem, size_t size) { clReleaseMemObject(mem); } -void ggml_cl_free_data(const struct ggml_tensor* tensor) { - if (tensor->backend != GGML_BACKEND_GPU) { +void bark_ggml_cl_free_data(const struct bark_ggml_tensor* tensor) { + if (tensor->backend != BARK_GGML_BACKEND_GPU) { return; } @@ -1338,7 +1338,7 @@ void ggml_cl_free_data(const struct ggml_tensor* tensor) { clReleaseMemObject(mem); } -static cl_int ggml_cl_h2d_tensor_2d(cl_command_queue queue, cl_mem dst, size_t offset, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cl_event* ev) { +static cl_int bark_ggml_cl_h2d_tensor_2d(cl_command_queue queue, cl_mem dst, size_t offset, const struct bark_ggml_tensor * src, uint64_t i3, uint64_t i2, cl_event* ev) { cl_int err; const uint64_t ne0 = src->ne[0]; const uint64_t ne1 = src->ne[1]; @@ -1346,9 +1346,9 @@ static cl_int ggml_cl_h2d_tensor_2d(cl_command_queue queue, cl_mem dst, size_t o const uint64_t nb1 = src->nb[1]; const uint64_t nb2 = src->nb[2]; const uint64_t nb3 = src->nb[3]; - const enum ggml_type type = src->type; - const size_t ts = ggml_type_size(type); - const size_t bs = ggml_blck_size(type); + const enum bark_ggml_type type = src->type; + const size_t ts = bark_ggml_type_size(type); + const size_t bs = bark_ggml_blck_size(type); const uint64_t row_size = ts*ne0/bs; const char * x = (const char *) src->data + i2*nb2 + i3*nb3; @@ -1387,8 +1387,8 @@ static cl_int ggml_cl_h2d_tensor_2d(cl_command_queue queue, cl_mem dst, size_t o return CL_SUCCESS; } -static void ggml_cl_mul_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - GGML_ASSERT(src1->backend == GGML_BACKEND_GPU); +static void bark_ggml_cl_mul_f32(const bark_ggml_tensor * src0, const bark_ggml_tensor * src1, bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src1->backend == BARK_GGML_BACKEND_GPU); const int64_t ne00 = src0->ne[0]; const int64_t ne01 = src0->ne[1]; const int64_t ne02 = src0->ne[2]; @@ -1404,9 +1404,9 @@ static void ggml_cl_mul_f32(const ggml_tensor * src0, const ggml_tensor * src1, size_t x_size; size_t d_size; - cl_mem d_X = ggml_cl_pool_malloc(ne0 * sizeof(float), &x_size); // src0 + cl_mem d_X = bark_ggml_cl_pool_malloc(ne0 * sizeof(float), &x_size); // src0 cl_mem d_Y = (cl_mem) src1->extra; // src1 is already on device, broadcasted. - cl_mem d_D = ggml_cl_pool_malloc(ne0 * sizeof(float), &d_size); // dst + cl_mem d_D = bark_ggml_cl_pool_malloc(ne0 * sizeof(float), &d_size); // dst for (int64_t i03 = 0; i03 < ne03; i03++) { @@ -1416,7 +1416,7 @@ static void ggml_cl_mul_f32(const ggml_tensor * src0, const ggml_tensor * src1, cl_event ev; // copy src0 to device - CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, i0, src0, i03, i02, &ev)); + CL_CHECK(bark_ggml_cl_h2d_tensor_2d(queue, d_X, i0, src0, i03, i02, &ev)); if (nb10 == sizeof(float)) { // Contiguous, avoid overhead from queueing many kernel runs @@ -1471,16 +1471,16 @@ static void ggml_cl_mul_f32(const ggml_tensor * src0, const ggml_tensor * src1, CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * ne00*ne01, d, 0, NULL, NULL)); } } - ggml_cl_pool_free(d_X, x_size); - ggml_cl_pool_free(d_D, d_size); + bark_ggml_cl_pool_free(d_X, x_size); + bark_ggml_cl_pool_free(d_D, d_size); } -void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32); - ggml_cl_mul_f32(src0, src1, dst); +void bark_ggml_cl_mul(const struct bark_ggml_tensor * src0, const struct bark_ggml_tensor * src1, struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F32 && src1->type == BARK_GGML_TYPE_F32 && dst->type == BARK_GGML_TYPE_F32); + bark_ggml_cl_mul_f32(src0, src1, dst); } -static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +static void bark_ggml_cl_mul_mat_f32(const bark_ggml_tensor * src0, const bark_ggml_tensor * src1, bark_ggml_tensor * dst) { const int64_t ne00 = src0->ne[0]; const int64_t ne01 = src0->ne[1]; const int64_t ne02 = src0->ne[2]; @@ -1507,13 +1507,13 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr size_t y_size; size_t d_size; cl_mem d_X; - if (src0->backend == GGML_BACKEND_GPU) { // NOLINT + if (src0->backend == BARK_GGML_BACKEND_GPU) { // NOLINT d_X = (cl_mem) src0->extra; } else { - d_X = ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size); + d_X = bark_ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size); } - cl_mem d_Y = ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size); - cl_mem d_D = ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size); + cl_mem d_Y = bark_ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size); + cl_mem d_D = bark_ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size); size_t x_offset = 0; int64_t pi02 = -1; @@ -1526,14 +1526,14 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr int64_t i02 = i12 / r2; // copy data to device - if (src0->backend == GGML_BACKEND_GPU) { + if (src0->backend == BARK_GGML_BACKEND_GPU) { x_offset = (i03 * ne02 + i02) * x_ne; } else if (i02 != pi02 || i03 != pi03) { - CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL)); + CL_CHECK(bark_ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL)); pi02 = i02; pi03 = i03; } - CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL)); + CL_CHECK(bark_ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL)); CL_CHECK(clFinish(queue)); @@ -1550,7 +1550,7 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr &queue, &ev_sgemm); if (status != clblast::StatusCode::kSuccess) { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } // copy dst to host @@ -1559,15 +1559,15 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr } } - if (src0->backend != GGML_BACKEND_GPU) { - ggml_cl_pool_free(d_X, x_size); + if (src0->backend != BARK_GGML_BACKEND_GPU) { + bark_ggml_cl_pool_free(d_X, x_size); } - ggml_cl_pool_free(d_Y, y_size); - ggml_cl_pool_free(d_D, d_size); + bark_ggml_cl_pool_free(d_Y, y_size); + bark_ggml_cl_pool_free(d_D, d_size); } -static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, void * wdata, size_t /* wsize */) { - GGML_ASSERT(fp16_support); +static void bark_ggml_cl_mul_mat_f16(const bark_ggml_tensor * src0, const bark_ggml_tensor * src1, bark_ggml_tensor * dst, void * wdata, size_t /* wsize */) { + BARK_GGML_ASSERT(fp16_support); const int64_t ne00 = src0->ne[0]; const int64_t ne01 = src0->ne[1]; @@ -1590,8 +1590,8 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr const int64_t r2 = ne12 / ne02; const int64_t r3 = ne13 / ne03; - const ggml_fp16_t alpha = ggml_fp32_to_fp16(1.0f); - const ggml_fp16_t beta = ggml_fp32_to_fp16(0.0f); + const bark_ggml_fp16_t alpha = bark_ggml_fp32_to_fp16(1.0f); + const bark_ggml_fp16_t beta = bark_ggml_fp32_to_fp16(0.0f); const int x_ne = ne01 * ne00; const int y_ne = ne11 * ne10; const int d_ne = ne11 * ne01; @@ -1600,13 +1600,13 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr size_t y_size; size_t d_size; cl_mem d_X; - if (src0->backend == GGML_BACKEND_GPU) { // NOLINT + if (src0->backend == BARK_GGML_BACKEND_GPU) { // NOLINT d_X = (cl_mem) src0->extra; } else { - d_X = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * x_ne, &x_size); + d_X = bark_ggml_cl_pool_malloc(sizeof(bark_ggml_fp16_t) * x_ne, &x_size); } - cl_mem d_Y = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * y_ne, &y_size); - cl_mem d_D = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * d_ne, &d_size); + cl_mem d_Y = bark_ggml_cl_pool_malloc(sizeof(bark_ggml_fp16_t) * y_ne, &y_size); + cl_mem d_D = bark_ggml_cl_pool_malloc(sizeof(bark_ggml_fp16_t) * d_ne, &d_size); bool src1_cont_rows = nb10 == sizeof(float); bool src1_cont_cols = (size_t)nb11 == ne11*sizeof(float); @@ -1622,25 +1622,25 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr int64_t i02 = i12 / r2; // copy src0 to device - if (src0->backend == GGML_BACKEND_GPU) { + if (src0->backend == BARK_GGML_BACKEND_GPU) { x_offset = (i03 * ne02 + i02) * x_ne; } else if (i02 != pi02 || i03 != pi03) { - CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL)); + CL_CHECK(bark_ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL)); pi02 = i02; pi03 = i03; } // convert src1 to fp16 // TODO: use multiple threads - ggml_fp16_t * const tmp = (ggml_fp16_t *) wdata + (ne11 * ne10) * (i13 * ne12 + i12); + bark_ggml_fp16_t * const tmp = (bark_ggml_fp16_t *) wdata + (ne11 * ne10) * (i13 * ne12 + i12); char * src1i = (char *) src1->data + i13*nb13 + i12*nb12; if (src1_cont_rows) { if (src1_cont_cols) { - ggml_fp32_to_fp16_row((float *) src1i, tmp, ne10*ne11); + bark_ggml_fp32_to_fp16_row((float *) src1i, tmp, ne10*ne11); } else { for (int64_t i11 = 0; i11 < ne11; i11++) { - ggml_fp32_to_fp16_row((float *) (src1i + i11*nb11), tmp + i11*ne10, ne10); + bark_ggml_fp32_to_fp16_row((float *) (src1i + i11*nb11), tmp + i11*ne10, ne10); } } } @@ -1648,13 +1648,13 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr for (int64_t i11 = 0; i11 < ne11; i11++) { for (int64_t i10 = 0; i10 < ne10; i10++) { // very slow due to no inlining - tmp[i11*ne10 + i10] = ggml_fp32_to_fp16(*(float *) (src1i + i11*nb11 + i10*nb10)); + tmp[i11*ne10 + i10] = bark_ggml_fp32_to_fp16(*(float *) (src1i + i11*nb11 + i10*nb10)); } } } // copy src1 to device - CL_CHECK(clEnqueueWriteBuffer(queue, d_Y, false, 0, sizeof(ggml_fp16_t) * y_ne, tmp, 0, NULL, NULL)); + CL_CHECK(clEnqueueWriteBuffer(queue, d_Y, false, 0, sizeof(bark_ggml_fp16_t) * y_ne, tmp, 0, NULL, NULL)); CL_CHECK(clFinish(queue)); @@ -1671,26 +1671,26 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr &queue, &ev_sgemm); if (status != clblast::StatusCode::kSuccess) { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } // copy dst to host, then convert to float - CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(ggml_fp16_t) * d_ne, tmp, 1, &ev_sgemm, NULL)); + CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(bark_ggml_fp16_t) * d_ne, tmp, 1, &ev_sgemm, NULL)); float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); - ggml_fp16_to_fp32_row(tmp, d, d_ne); + bark_ggml_fp16_to_fp32_row(tmp, d, d_ne); } } - if (src0->backend != GGML_BACKEND_GPU) { - ggml_cl_pool_free(d_X, x_size); + if (src0->backend != BARK_GGML_BACKEND_GPU) { + bark_ggml_cl_pool_free(d_X, x_size); } - ggml_cl_pool_free(d_Y, y_size); - ggml_cl_pool_free(d_D, d_size); + bark_ggml_cl_pool_free(d_Y, y_size); + bark_ggml_cl_pool_free(d_D, d_size); } -static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +static void bark_ggml_cl_mul_mat_q_f32(const bark_ggml_tensor * src0, const bark_ggml_tensor * src1, bark_ggml_tensor * dst) { const int64_t ne00 = src0->ne[0]; const int64_t ne01 = src0->ne[1]; const int64_t ne02 = src0->ne[2]; @@ -1703,7 +1703,7 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * const int nb2 = dst->nb[2]; const int nb3 = dst->nb[3]; - const ggml_type type = src0->type; + const bark_ggml_type type = src0->type; const bool mul_mat_vec = ne11 == 1; const int64_t r2 = ne12 / ne02; @@ -1714,8 +1714,8 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * const int x_ne = ne01 * ne00; const int y_ne = ne11 * ne10; const int d_ne = ne11 * ne01; - const int x_bps = x_ne / ggml_blck_size(type); // blocks per 2D slice - const size_t q_sz = ggml_type_size(type) * x_bps; + const int x_bps = x_ne / bark_ggml_blck_size(type); // blocks per 2D slice + const size_t q_sz = bark_ggml_type_size(type) * x_bps; size_t x_size; size_t y_size; @@ -1723,21 +1723,21 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * size_t q_size; cl_mem d_X; if (!mul_mat_vec) { - d_X = ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size); + d_X = bark_ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size); } - cl_mem d_Y = ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size); - cl_mem d_D = ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size); + cl_mem d_Y = bark_ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size); + cl_mem d_D = bark_ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size); cl_mem d_Q; - if (src0->backend == GGML_BACKEND_CPU) { - d_Q = ggml_cl_pool_malloc(q_sz, &q_size); + if (src0->backend == BARK_GGML_BACKEND_CPU) { + d_Q = bark_ggml_cl_pool_malloc(q_sz, &q_size); } - cl_kernel* to_fp32_cl = ggml_get_to_fp32_cl(type); - cl_kernel* dmmv = ggml_get_dequantize_mul_mat_vec_cl(type); - GGML_ASSERT(to_fp32_cl != nullptr); + cl_kernel* to_fp32_cl = bark_ggml_get_to_fp32_cl(type); + cl_kernel* dmmv = bark_ggml_get_dequantize_mul_mat_vec_cl(type); + BARK_GGML_ASSERT(to_fp32_cl != nullptr); - const size_t global_denom = ggml_cl_global_denom(type); - const size_t local = ggml_cl_local_size(type); + const size_t global_denom = bark_ggml_cl_global_denom(type); + const size_t local = bark_ggml_cl_local_size(type); size_t ev_idx = 0; std::vector events; @@ -1752,22 +1752,22 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * int64_t i02 = i12 / r2; // copy src0 to device if necessary - if (src0->backend == GGML_BACKEND_CPU) { + if (src0->backend == BARK_GGML_BACKEND_CPU) { if (i02 != pi02 || i03 != pi03) { events.emplace_back(); - CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, events.data() + ev_idx++)); + CL_CHECK(bark_ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, events.data() + ev_idx++)); pi02 = i02; pi03 = i03; } - } else if (src0->backend == GGML_BACKEND_GPU) { + } else if (src0->backend == BARK_GGML_BACKEND_GPU) { d_Q = (cl_mem) src0->extra; } else { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } if (mul_mat_vec) { // specialized dequantize_mul_mat_vec kernel // copy src1 to device events.emplace_back(); - CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, events.data() + ev_idx++)); + CL_CHECK(bark_ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, events.data() + ev_idx++)); // compute const size_t global = ne01 * CL_DMMV_BLOCK_SIZE; @@ -1783,13 +1783,13 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * } else { // general dequantization kernel + CLBlast matrix matrix multiplication // convert src0 to fp32 on device const size_t global = x_ne / global_denom; - const size_t offset = src0->backend == GGML_BACKEND_GPU ? (i03 * ne02 + i02) * x_bps : 0; + const size_t offset = src0->backend == BARK_GGML_BACKEND_GPU ? (i03 * ne02 + i02) * x_bps : 0; CL_CHECK(clSetKernelArg(*to_fp32_cl, 0, sizeof(cl_mem), &d_Q)); CL_CHECK(clSetKernelArg(*to_fp32_cl, 1, sizeof(cl_mem), &d_X)); CL_CHECK(clEnqueueNDRangeKernel(queue, *to_fp32_cl, 1, offset > 0 ? &offset : NULL, &global, local > 0 ? &local : NULL, events.size(), !events.empty() ? events.data() : NULL, NULL)); // copy src1 to device - CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL)); + CL_CHECK(bark_ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL)); events.emplace_back(); @@ -1808,7 +1808,7 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * &queue, events.data() + ev_idx++); if (status != clblast::StatusCode::kSuccess) { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } } @@ -1825,101 +1825,101 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * } if (!mul_mat_vec) { - ggml_cl_pool_free(d_X, x_size); + bark_ggml_cl_pool_free(d_X, x_size); } - ggml_cl_pool_free(d_Y, y_size); - ggml_cl_pool_free(d_D, d_size); - if (src0->backend == GGML_BACKEND_CPU) { - ggml_cl_pool_free(d_Q, q_size); + bark_ggml_cl_pool_free(d_Y, y_size); + bark_ggml_cl_pool_free(d_D, d_size); + if (src0->backend == BARK_GGML_BACKEND_CPU) { + bark_ggml_cl_pool_free(d_Q, q_size); } } -bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { +bool bark_ggml_cl_can_mul_mat(const struct bark_ggml_tensor * src0, const struct bark_ggml_tensor * src1, struct bark_ggml_tensor * dst) { const int64_t ne10 = src1->ne[0]; const int64_t ne0 = dst->ne[0]; const int64_t ne1 = dst->ne[1]; // TODO: find the optimal values for these - if ((src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && - src1->type == GGML_TYPE_F32 && - dst->type == GGML_TYPE_F32 && - ((ne0 >= 32 && ne1 >= 32 && ne10 >= 32) || src0->backend == GGML_BACKEND_GPU)) { + if ((src0->type == BARK_GGML_TYPE_F32 || src0->type == BARK_GGML_TYPE_F16 || bark_ggml_is_quantized(src0->type)) && + src1->type == BARK_GGML_TYPE_F32 && + dst->type == BARK_GGML_TYPE_F32 && + ((ne0 >= 32 && ne1 >= 32 && ne10 >= 32) || src0->backend == BARK_GGML_BACKEND_GPU)) { return true; } return false; } -static bool ggml_cl_mul_mat_use_f16(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * /* dst */) { +static bool bark_ggml_cl_mul_mat_use_f16(const struct bark_ggml_tensor * src0, const struct bark_ggml_tensor * src1, struct bark_ggml_tensor * /* dst */) { // If device doesn't support FP16 if (!fp16_support) { return false; } - size_t src0_sz = ggml_nbytes(src0); - size_t src1_sz = ggml_nbytes(src1); + size_t src0_sz = bark_ggml_nbytes(src0); + size_t src1_sz = bark_ggml_nbytes(src1); // mul_mat_q: src0 is converted to fp32 on device size_t mul_mat_q_transfer = src0_sz + src1_sz; // mul_mat_f16: src1 is converted to fp16 on cpu - size_t mul_mat_f16_transfer = src0_sz + sizeof(ggml_fp16_t) * ggml_nelements(src1); + size_t mul_mat_f16_transfer = src0_sz + sizeof(bark_ggml_fp16_t) * bark_ggml_nelements(src1); // choose the smaller one to transfer to the device // TODO: this is not always the best choice due to the overhead of converting to fp16 return mul_mat_f16_transfer < mul_mat_q_transfer; } -void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize) { - GGML_ASSERT(ggml_cl_can_mul_mat(src0, src1, dst)); +void bark_ggml_cl_mul_mat(const struct bark_ggml_tensor * src0, const struct bark_ggml_tensor * src1, struct bark_ggml_tensor * dst, void * wdata, size_t wsize) { + BARK_GGML_ASSERT(bark_ggml_cl_can_mul_mat(src0, src1, dst)); - if (src0->type == GGML_TYPE_F32) { - ggml_cl_mul_mat_f32(src0, src1, dst); + if (src0->type == BARK_GGML_TYPE_F32) { + bark_ggml_cl_mul_mat_f32(src0, src1, dst); } - else if (src0->type == GGML_TYPE_F16) { - if (ggml_cl_mul_mat_use_f16(src0, src1, dst)) { - ggml_cl_mul_mat_f16(src0, src1, dst, wdata, wsize); + else if (src0->type == BARK_GGML_TYPE_F16) { + if (bark_ggml_cl_mul_mat_use_f16(src0, src1, dst)) { + bark_ggml_cl_mul_mat_f16(src0, src1, dst, wdata, wsize); } else { - ggml_cl_mul_mat_q_f32(src0, src1, dst); + bark_ggml_cl_mul_mat_q_f32(src0, src1, dst); } } - else if (ggml_is_quantized(src0->type)) { - ggml_cl_mul_mat_q_f32(src0, src1, dst); + else if (bark_ggml_is_quantized(src0->type)) { + bark_ggml_cl_mul_mat_q_f32(src0, src1, dst); } else { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } } -size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { - if (ggml_cl_mul_mat_use_f16(src0, src1, dst)) { - return ggml_nelements(src1) * sizeof(ggml_fp16_t); +size_t bark_ggml_cl_mul_mat_get_wsize(const struct bark_ggml_tensor * src0, const struct bark_ggml_tensor * src1, struct bark_ggml_tensor * dst) { + if (bark_ggml_cl_mul_mat_use_f16(src0, src1, dst)) { + return bark_ggml_nelements(src1) * sizeof(bark_ggml_fp16_t); } return 0; } -void ggml_cl_transform_tensor(void * data, ggml_tensor * tensor) { +void bark_ggml_cl_transform_tensor(void * data, bark_ggml_tensor * tensor) { const int64_t ne0 = tensor->ne[0]; const int64_t ne1 = tensor->ne[1]; const int64_t ne2 = tensor->ne[2]; const int64_t ne3 = tensor->ne[3]; - const ggml_type type = tensor->type; - const size_t s_sz = ggml_type_size(type) * (size_t) (ne0 * ne1 / ggml_blck_size(type)); + const bark_ggml_type type = tensor->type; + const size_t s_sz = bark_ggml_type_size(type) * (size_t) (ne0 * ne1 / bark_ggml_blck_size(type)); const size_t q_sz = s_sz * (size_t) (ne2 * ne3); size_t q_size; - cl_mem dst = ggml_cl_pool_malloc(q_sz, &q_size); + cl_mem dst = bark_ggml_cl_pool_malloc(q_sz, &q_size); tensor->data = data; // copy tensor to device size_t offset = 0; for (int64_t i3 = 0; i3 < ne3; i3++) { for (int64_t i2 = 0; i2 < ne2; i2++) { - CL_CHECK(ggml_cl_h2d_tensor_2d(queue, dst, offset, tensor, i3, i2, NULL)); + CL_CHECK(bark_ggml_cl_h2d_tensor_2d(queue, dst, offset, tensor, i3, i2, NULL)); offset += s_sz; } } @@ -1927,5 +1927,5 @@ void ggml_cl_transform_tensor(void * data, ggml_tensor * tensor) { CL_CHECK(clFinish(queue)); tensor->extra = dst; - GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); + BARK_GGML_ASSERT(tensor->backend == BARK_GGML_BACKEND_GPU); } diff --git a/cpp/ggml-opencl.h b/cpp/ggml-opencl.h index a92b445..544efbb 100644 --- a/cpp/ggml-opencl.h +++ b/cpp/ggml-opencl.h @@ -6,19 +6,19 @@ extern "C" { #endif -void ggml_cl_init(void); +void bark_ggml_cl_init(void); -void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); -bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); -size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); -void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize); +void bark_ggml_cl_mul(const struct bark_ggml_tensor * src0, const struct bark_ggml_tensor * src1, struct bark_ggml_tensor * dst); +bool bark_ggml_cl_can_mul_mat(const struct bark_ggml_tensor * src0, const struct bark_ggml_tensor * src1, struct bark_ggml_tensor * dst); +size_t bark_ggml_cl_mul_mat_get_wsize(const struct bark_ggml_tensor * src0, const struct bark_ggml_tensor * src1, struct bark_ggml_tensor * dst); +void bark_ggml_cl_mul_mat(const struct bark_ggml_tensor * src0, const struct bark_ggml_tensor * src1, struct bark_ggml_tensor * dst, void * wdata, size_t wsize); -void * ggml_cl_host_malloc(size_t size); -void ggml_cl_host_free(void * ptr); +void * bark_ggml_cl_host_malloc(size_t size); +void bark_ggml_cl_host_free(void * ptr); -void ggml_cl_free_data(const struct ggml_tensor* tensor); +void bark_ggml_cl_free_data(const struct bark_ggml_tensor* tensor); -void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor); +void bark_ggml_cl_transform_tensor(void * data, struct bark_ggml_tensor * tensor); #ifdef __cplusplus } diff --git a/cpp/ggml.c b/cpp/ggml.c index e7aa543..0bd0dbd 100644 --- a/cpp/ggml.c +++ b/cpp/ggml.c @@ -2,7 +2,7 @@ #include "ggml.h" -#ifdef GGML_USE_K_QUANTS +#ifdef BARK_GGML_USE_K_QUANTS #include "k_quants.h" #endif @@ -26,7 +26,7 @@ #include #include -#ifdef GGML_USE_METAL +#ifdef BARK_GGML_USE_METAL #include #endif @@ -109,7 +109,7 @@ typedef void * thread_ret_t; #include #endif -#ifdef GGML_USE_CPU_HBM +#ifdef BARK_GGML_USE_CPU_HBM #include #endif @@ -126,68 +126,68 @@ typedef void * thread_ret_t; #endif #endif -/*#define GGML_PERF*/ -#define GGML_DEBUG 0 -#define GGML_GELU_FP16 -#define GGML_GELU_QUICK_FP16 -#define GGML_SILU_FP16 -// #define GGML_CROSS_ENTROPY_EXP_FP16 -// #define GGML_FLASH_ATTN_EXP_FP16 +/*#define BARK_GGML_PERF*/ +#define BARK_GGML_DEBUG 0 +#define BARK_GGML_GELU_FP16 +#define BARK_GGML_GELU_QUICK_FP16 +#define BARK_GGML_SILU_FP16 +// #define BARK_GGML_CROSS_ENTROPY_EXP_FP16 +// #define BARK_GGML_FLASH_ATTN_EXP_FP16 -#define GGML_SOFT_MAX_UNROLL 4 -#define GGML_VEC_DOT_UNROLL 2 -#define GGML_VEC_MAD_UNROLL 32 +#define BARK_GGML_SOFT_MAX_UNROLL 4 +#define BARK_GGML_VEC_DOT_UNROLL 2 +#define BARK_GGML_VEC_MAD_UNROLL 32 // // logging // -#if (GGML_DEBUG >= 1) -#define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__) +#if (BARK_GGML_DEBUG >= 1) +#define BARK_GGML_PRINT_DEBUG(...) printf(__VA_ARGS__) #else -#define GGML_PRINT_DEBUG(...) +#define BARK_GGML_PRINT_DEBUG(...) #endif -#if (GGML_DEBUG >= 5) -#define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__) +#if (BARK_GGML_DEBUG >= 5) +#define BARK_GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__) #else -#define GGML_PRINT_DEBUG_5(...) +#define BARK_GGML_PRINT_DEBUG_5(...) #endif -#if (GGML_DEBUG >= 10) -#define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__) +#if (BARK_GGML_DEBUG >= 10) +#define BARK_GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__) #else -#define GGML_PRINT_DEBUG_10(...) +#define BARK_GGML_PRINT_DEBUG_10(...) #endif -#define GGML_PRINT(...) printf(__VA_ARGS__) +#define BARK_GGML_PRINT(...) printf(__VA_ARGS__) // // end of logging block // -#ifdef GGML_USE_ACCELERATE +#ifdef BARK_GGML_USE_ACCELERATE // uncomment to use vDSP for soft max computation // note: not sure if it is actually faster -//#define GGML_SOFT_MAX_ACCELERATE +//#define BARK_GGML_SOFT_MAX_ACCELERATE #endif #if defined(_MSC_VER) || defined(__MINGW32__) -#define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN) -#define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr) +#define BARK_GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, BARK_GGML_MEM_ALIGN) +#define BARK_GGML_ALIGNED_FREE(ptr) _aligned_free(ptr) #else -inline static void * ggml_aligned_malloc(size_t size) { +inline static void * bark_ggml_aligned_malloc(size_t size) { if (size == 0) { - GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n"); + BARK_GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for bark_ggml_aligned_malloc!\n"); return NULL; } void * aligned_memory = NULL; -#ifdef GGML_USE_CPU_HBM +#ifdef BARK_GGML_USE_CPU_HBM int result = hbw_posix_memalign(&aligned_memory, 16, size); -#elif GGML_USE_METAL +#elif BARK_GGML_USE_METAL int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size); #else - int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size); + int result = posix_memalign(&aligned_memory, BARK_GGML_MEM_ALIGN, size); #endif if (result != 0) { // Handle allocation failure @@ -200,54 +200,54 @@ inline static void * ggml_aligned_malloc(size_t size) { error_desc = "insufficient memory"; break; } - GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0)); + BARK_GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0)); return NULL; } return aligned_memory; } -#define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size) -#ifdef GGML_USE_CPU_HBM -#define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr) +#define BARK_GGML_ALIGNED_MALLOC(size) bark_ggml_aligned_malloc(size) +#ifdef BARK_GGML_USE_CPU_HBM +#define BARK_GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr) #else -#define GGML_ALIGNED_FREE(ptr) free(ptr) +#define BARK_GGML_ALIGNED_FREE(ptr) free(ptr) #endif #endif -#define UNUSED GGML_UNUSED +#define UNUSED BARK_GGML_UNUSED #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0) // // tensor access macros // -#define GGML_TENSOR_UNARY_OP_LOCALS \ - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \ - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) - -#define GGML_TENSOR_BINARY_OP_LOCALS \ - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \ - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \ - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \ - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) - -#if defined(GGML_USE_ACCELERATE) +#define BARK_GGML_TENSOR_UNARY_OP_LOCALS \ + BARK_GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ + BARK_GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ + BARK_GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \ + BARK_GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + +#define BARK_GGML_TENSOR_BINARY_OP_LOCALS \ + BARK_GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ + BARK_GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ + BARK_GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \ + BARK_GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \ + BARK_GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \ + BARK_GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + +#if defined(BARK_GGML_USE_ACCELERATE) #include -#if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions +#if defined(BARK_GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions #include "ggml-opencl.h" #endif -#elif defined(GGML_USE_OPENBLAS) -#if defined(GGML_BLAS_USE_MKL) +#elif defined(BARK_GGML_USE_OPENBLAS) +#if defined(BARK_GGML_BLAS_USE_MKL) #include #else #include #endif -#elif defined(GGML_USE_CUBLAS) +#elif defined(BARK_GGML_USE_CUBLAS) #include "ggml-cuda.h" -#elif defined(GGML_USE_CLBLAST) +#elif defined(BARK_GGML_USE_CLBLAST) #include "ggml-opencl.h" #endif @@ -257,7 +257,7 @@ inline static void * ggml_aligned_malloc(size_t size) { #define MAX(a, b) ((a) > (b) ? (a) : (b)) // floating point type used to accumulate sums -typedef double ggml_float; +typedef double bark_ggml_float; // 16-bit float // on Arm, we use __fp16 @@ -270,11 +270,11 @@ typedef double ggml_float; // #include -#define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x)) -#define GGML_COMPUTE_FP32_TO_FP16(x) (x) +#define BARK_GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x)) +#define BARK_GGML_COMPUTE_FP32_TO_FP16(x) (x) -#define GGML_FP16_TO_FP32(x) ((float) (x)) -#define GGML_FP32_TO_FP16(x) (x) +#define BARK_GGML_FP16_TO_FP32(x) ((float) (x)) +#define BARK_GGML_FP32_TO_FP16(x) (x) #else @@ -305,22 +305,22 @@ typedef double ggml_float; #ifdef __F16C__ #ifdef _MSC_VER -#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) -#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) +#define BARK_GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) +#define BARK_GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) #else -#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) -#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) +#define BARK_GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) +#define BARK_GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) #endif #elif defined(__POWER9_VECTOR__) -#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) -#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) +#define BARK_GGML_COMPUTE_FP16_TO_FP32(x) bark_ggml_compute_fp16_to_fp32(x) +#define BARK_GGML_COMPUTE_FP32_TO_FP16(x) bark_ggml_compute_fp32_to_fp16(x) /* the inline asm below is about 12% faster than the lookup method */ -#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) -#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) +#define BARK_GGML_FP16_TO_FP32(x) BARK_GGML_COMPUTE_FP16_TO_FP32(x) +#define BARK_GGML_FP32_TO_FP16(x) BARK_GGML_COMPUTE_FP32_TO_FP16(x) -static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { +static inline float bark_ggml_compute_fp16_to_fp32(bark_ggml_fp16_t h) { register float f; register double d; __asm__( @@ -333,9 +333,9 @@ static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { return f; } -static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { +static inline bark_ggml_fp16_t bark_ggml_compute_fp32_to_fp16(float f) { register double d; - register ggml_fp16_t r; + register bark_ggml_fp16_t r; __asm__( /* xscvdphp can work on double or single precision */ "xscvdphp %0,%2\n" "mffprd %1,%0\n" : @@ -368,7 +368,7 @@ static inline uint32_t fp32_to_bits(float f) { return fp32.as_bits; } -static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { +static inline float bark_ggml_compute_fp16_to_fp32(bark_ggml_fp16_t h) { const uint32_t w = (uint32_t) h << 16; const uint32_t sign = w & UINT32_C(0x80000000); const uint32_t two_w = w + w; @@ -391,7 +391,7 @@ static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { return fp32_from_bits(result); } -static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { +static inline bark_ggml_fp16_t bark_ggml_compute_fp32_to_fp16(float f) { #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) const float scale_to_inf = 0x1.0p+112f; const float scale_to_zero = 0x1.0p-110f; @@ -417,8 +417,8 @@ static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); } -#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) -#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) +#define BARK_GGML_COMPUTE_FP16_TO_FP32(x) bark_ggml_compute_fp16_to_fp32(x) +#define BARK_GGML_COMPUTE_FP32_TO_FP16(x) bark_ggml_compute_fp32_to_fp16(x) #endif // __F16C__ @@ -429,16 +429,16 @@ static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { // // precomputed gelu table for f16 (128 KB) -static ggml_fp16_t table_gelu_f16[1 << 16]; +static bark_ggml_fp16_t table_gelu_f16[1 << 16]; // precomputed quick gelu table for f16 (128 KB) -static ggml_fp16_t table_gelu_quick_f16[1 << 16]; +static bark_ggml_fp16_t table_gelu_quick_f16[1 << 16]; // precomputed silu table for f16 (128 KB) -static ggml_fp16_t table_silu_f16[1 << 16]; +static bark_ggml_fp16_t table_silu_f16[1 << 16]; // precomputed exp table for f16 (128 KB) -static ggml_fp16_t table_exp_f16[1 << 16]; +static bark_ggml_fp16_t table_exp_f16[1 << 16]; // precomputed f32 table for f16 (256 KB) static float table_f32_f16[1 << 16]; @@ -458,39 +458,39 @@ static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 #endif -// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, -// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON. +// On ARM NEON, it's quicker to directly convert x -> x instead of calling into bark_ggml_lookup_fp16_to_fp32, +// so we define BARK_GGML_FP16_TO_FP32 and BARK_GGML_FP32_TO_FP16 elsewhere for NEON. // This is also true for POWER9. -#if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16) +#if !defined(BARK_GGML_FP16_TO_FP32) || !defined(BARK_GGML_FP32_TO_FP16) -inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { +inline static float bark_ggml_lookup_fp16_to_fp32(bark_ggml_fp16_t f) { uint16_t s; memcpy(&s, &f, sizeof(uint16_t)); return table_f32_f16[s]; } -#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x) -#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) +#define BARK_GGML_FP16_TO_FP32(x) bark_ggml_lookup_fp16_to_fp32(x) +#define BARK_GGML_FP32_TO_FP16(x) BARK_GGML_COMPUTE_FP32_TO_FP16(x) #endif // note: do not use these inside ggml.c // these are meant to be used via the ggml.h API -float ggml_fp16_to_fp32(ggml_fp16_t x) { - return (float) GGML_FP16_TO_FP32(x); +float bark_ggml_fp16_to_fp32(bark_ggml_fp16_t x) { + return (float) BARK_GGML_FP16_TO_FP32(x); } -ggml_fp16_t ggml_fp32_to_fp16(float x) { - return GGML_FP32_TO_FP16(x); +bark_ggml_fp16_t bark_ggml_fp32_to_fp16(float x) { + return BARK_GGML_FP32_TO_FP16(x); } -void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n) { +void bark_ggml_fp16_to_fp32_row(const bark_ggml_fp16_t * x, float * y, int n) { for (int i = 0; i < n; i++) { - y[i] = GGML_FP16_TO_FP32(x[i]); + y[i] = BARK_GGML_FP16_TO_FP32(x[i]); } } -void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n) { +void bark_ggml_fp32_to_fp16_row(const float * x, bark_ggml_fp16_t * y, int n) { int i = 0; #if defined(__F16C__) for (; i + 7 < n; i += 8) { @@ -505,7 +505,7 @@ void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n) { } #endif for (; i < n; i++) { - y[i] = GGML_FP32_TO_FP16(x[i]); + y[i] = BARK_GGML_FP32_TO_FP16(x[i]); } } @@ -515,7 +515,7 @@ void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n) { #if defined(_MSC_VER) || defined(__MINGW32__) static int64_t timer_freq, timer_start; -void ggml_time_init(void) { +void bark_ggml_time_init(void) { LARGE_INTEGER t; QueryPerformanceFrequency(&t); timer_freq = t.QuadPart; @@ -526,49 +526,49 @@ void ggml_time_init(void) { QueryPerformanceCounter(&t); timer_start = t.QuadPart; } -int64_t ggml_time_ms(void) { +int64_t bark_ggml_time_ms(void) { LARGE_INTEGER t; QueryPerformanceCounter(&t); return ((t.QuadPart-timer_start) * 1000) / timer_freq; } -int64_t ggml_time_us(void) { +int64_t bark_ggml_time_us(void) { LARGE_INTEGER t; QueryPerformanceCounter(&t); return ((t.QuadPart-timer_start) * 1000000) / timer_freq; } #else -void ggml_time_init(void) {} -int64_t ggml_time_ms(void) { +void bark_ggml_time_init(void) {} +int64_t bark_ggml_time_ms(void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000; } -int64_t ggml_time_us(void) { +int64_t bark_ggml_time_us(void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000; } #endif -int64_t ggml_cycles(void) { +int64_t bark_ggml_cycles(void) { return clock(); } -int64_t ggml_cycles_per_ms(void) { +int64_t bark_ggml_cycles_per_ms(void) { return CLOCKS_PER_SEC/1000; } -#ifdef GGML_PERF -#define ggml_perf_time_ms() ggml_time_ms() -#define ggml_perf_time_us() ggml_time_us() -#define ggml_perf_cycles() ggml_cycles() -#define ggml_perf_cycles_per_ms() ggml_cycles_per_ms() +#ifdef BARK_GGML_PERF +#define bark_ggml_perf_time_ms() bark_ggml_time_ms() +#define bark_ggml_perf_time_us() bark_ggml_time_us() +#define bark_ggml_perf_cycles() bark_ggml_cycles() +#define bark_ggml_perf_cycles_per_ms() bark_ggml_cycles_per_ms() #else -#define ggml_perf_time_ms() 0 -#define ggml_perf_time_us() 0 -#define ggml_perf_cycles() 0 -#define ggml_perf_cycles_per_ms() 0 +#define bark_ggml_perf_time_ms() 0 +#define bark_ggml_perf_time_us() 0 +#define bark_ggml_perf_cycles() 0 +#define bark_ggml_perf_cycles_per_ms() 0 #endif // @@ -843,42 +843,42 @@ inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) { #define QK4_0 32 typedef struct { - ggml_fp16_t d; // delta + bark_ggml_fp16_t d; // delta uint8_t qs[QK4_0 / 2]; // nibbles / quants } block_q4_0; -static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding"); +static_assert(sizeof(block_q4_0) == sizeof(bark_ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding"); #define QK4_1 32 typedef struct { - ggml_fp16_t d; // delta - ggml_fp16_t m; // min + bark_ggml_fp16_t d; // delta + bark_ggml_fp16_t m; // min uint8_t qs[QK4_1 / 2]; // nibbles / quants } block_q4_1; -static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding"); +static_assert(sizeof(block_q4_1) == 2 * sizeof(bark_ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding"); #define QK5_0 32 typedef struct { - ggml_fp16_t d; // delta + bark_ggml_fp16_t d; // delta uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_0 / 2]; // nibbles / quants } block_q5_0; -static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding"); +static_assert(sizeof(block_q5_0) == sizeof(bark_ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding"); #define QK5_1 32 typedef struct { - ggml_fp16_t d; // delta - ggml_fp16_t m; // min + bark_ggml_fp16_t d; // delta + bark_ggml_fp16_t m; // min uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_1 / 2]; // nibbles / quants } block_q5_1; -static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding"); +static_assert(sizeof(block_q5_1) == 2 * sizeof(bark_ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding"); #define QK8_0 32 typedef struct { - ggml_fp16_t d; // delta + bark_ggml_fp16_t d; // delta int8_t qs[QK8_0]; // quants } block_q8_0; -static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding"); +static_assert(sizeof(block_q8_0) == sizeof(bark_ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding"); #define QK8_1 32 typedef struct { @@ -911,7 +911,7 @@ static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * r const float d = max / -8; const float id = d ? 1.0f/d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = BARK_GGML_FP32_TO_FP16(d); for (int j = 0; j < qk/2; ++j) { const float x0 = x[i*qk + 0 + j]*id; @@ -951,8 +951,8 @@ static void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * r const float d = (max - min) / ((1 << 4) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); - y[i].m = GGML_FP32_TO_FP16(min); + y[i].d = BARK_GGML_FP32_TO_FP16(d); + y[i].m = BARK_GGML_FP32_TO_FP16(min); for (int j = 0; j < qk/2; ++j) { const float x0 = (x[i*qk + 0 + j] - min)*id; @@ -993,7 +993,7 @@ static void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * r const float d = max / -16; const float id = d ? 1.0f/d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = BARK_GGML_FP32_TO_FP16(d); uint32_t qh = 0; @@ -1040,8 +1040,8 @@ static void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * r const float d = (max - min) / ((1 << 5) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); - y[i].m = GGML_FP32_TO_FP16(min); + y[i].d = BARK_GGML_FP32_TO_FP16(d); + y[i].m = BARK_GGML_FP32_TO_FP16(min); uint32_t qh = 0; @@ -1083,7 +1083,7 @@ static void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * r const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = BARK_GGML_FP32_TO_FP16(d); for (int j = 0; j < QK8_0; ++j) { const float x0 = x[i*QK8_0 + j]*id; @@ -1118,7 +1118,7 @@ static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = BARK_GGML_FP32_TO_FP16(d); for (int j = 0; j < 8; j++) { const float32x4_t v = vmulq_n_f32(srcv[j], id); @@ -1151,7 +1151,7 @@ static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = BARK_GGML_FP32_TO_FP16(d); for (int j = 0; j < 8; j++) { const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); @@ -1186,7 +1186,7 @@ static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int // Quantize these floats const float d = maxScalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = BARK_GGML_FP32_TO_FP16(d); const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; const __m256 mul = _mm256_set1_ps( id ); @@ -1263,7 +1263,7 @@ static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = GGML_FP32_TO_FP16(d); + y[i].d = BARK_GGML_FP32_TO_FP16(d); vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl); @@ -1541,7 +1541,7 @@ static void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict const int nb = k / qk; for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d); + const float d = BARK_GGML_FP16_TO_FP32(x[i].d); for (int j = 0; j < qk/2; ++j) { const int x0 = (x[i].qs[j] & 0x0F) - 8; @@ -1561,8 +1561,8 @@ static void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict const int nb = k / qk; for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d); - const float m = GGML_FP16_TO_FP32(x[i].m); + const float d = BARK_GGML_FP16_TO_FP32(x[i].d); + const float m = BARK_GGML_FP16_TO_FP32(x[i].m); for (int j = 0; j < qk/2; ++j) { const int x0 = (x[i].qs[j] & 0x0F); @@ -1582,7 +1582,7 @@ static void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict const int nb = k / qk; for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d); + const float d = BARK_GGML_FP16_TO_FP32(x[i].d); uint32_t qh; memcpy(&qh, x[i].qh, sizeof(qh)); @@ -1608,8 +1608,8 @@ static void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict const int nb = k / qk; for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d); - const float m = GGML_FP16_TO_FP32(x[i].m); + const float d = BARK_GGML_FP16_TO_FP32(x[i].d); + const float m = BARK_GGML_FP16_TO_FP32(x[i].m); uint32_t qh; memcpy(&qh, x[i].qh, sizeof(qh)); @@ -1637,7 +1637,7 @@ static void dequantize_row_q8_0(const void * restrict vx, float * restrict y, in const block_q8_0 * restrict x = vx; for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d); + const float d = BARK_GGML_FP16_TO_FP32(x[i].d); for (int j = 0; j < qk; ++j) { y[i*qk + j] = x[i].qs[j]*d; @@ -1645,173 +1645,173 @@ static void dequantize_row_q8_0(const void * restrict vx, float * restrict y, in } } -static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y); -static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y); -static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); -static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); -static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); -static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); -static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); +static void bark_ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y); +static void bark_ggml_vec_dot_f16(const int n, float * restrict s, bark_ggml_fp16_t * restrict x, bark_ggml_fp16_t * restrict y); +static void bark_ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); +static void bark_ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); +static void bark_ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); +static void bark_ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); +static void bark_ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); -static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { - [GGML_TYPE_I8] = { +static const bark_ggml_type_traits_t type_traits[BARK_GGML_TYPE_COUNT] = { + [BARK_GGML_TYPE_I8] = { .type_name = "i8", .blck_size = 1, .type_size = sizeof(int8_t), .is_quantized = false, }, - [GGML_TYPE_I16] = { + [BARK_GGML_TYPE_I16] = { .type_name = "i16", .blck_size = 1, .type_size = sizeof(int16_t), .is_quantized = false, }, - [GGML_TYPE_I32] = { + [BARK_GGML_TYPE_I32] = { .type_name = "i32", .blck_size = 1, .type_size = sizeof(int32_t), .is_quantized = false, }, - [GGML_TYPE_F32] = { + [BARK_GGML_TYPE_F32] = { .type_name = "f32", .blck_size = 1, .type_size = sizeof(float), .is_quantized = false, - .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32, - .vec_dot_type = GGML_TYPE_F32, + .vec_dot = (bark_ggml_vec_dot_t) bark_ggml_vec_dot_f32, + .vec_dot_type = BARK_GGML_TYPE_F32, }, - [GGML_TYPE_F16] = { + [BARK_GGML_TYPE_F16] = { .type_name = "f16", .blck_size = 1, - .type_size = sizeof(ggml_fp16_t), + .type_size = sizeof(bark_ggml_fp16_t), .is_quantized = false, - .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row, - .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row, - .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row, - .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16, - .vec_dot_type = GGML_TYPE_F16, + .to_float = (bark_ggml_to_float_t) bark_ggml_fp16_to_fp32_row, + .from_float = (bark_ggml_from_float_t) bark_ggml_fp32_to_fp16_row, + .from_float_reference = (bark_ggml_from_float_t) bark_ggml_fp32_to_fp16_row, + .vec_dot = (bark_ggml_vec_dot_t) bark_ggml_vec_dot_f16, + .vec_dot_type = BARK_GGML_TYPE_F16, }, - [GGML_TYPE_Q4_0] = { + [BARK_GGML_TYPE_Q4_0] = { .type_name = "q4_0", .blck_size = QK4_0, .type_size = sizeof(block_q4_0), .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q4_0, + .to_float = (bark_ggml_to_float_t) dequantize_row_q4_0, .from_float = quantize_row_q4_0, - .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference, - .vec_dot = ggml_vec_dot_q4_0_q8_0, - .vec_dot_type = GGML_TYPE_Q8_0, + .from_float_reference = (bark_ggml_from_float_t) quantize_row_q4_0_reference, + .vec_dot = bark_ggml_vec_dot_q4_0_q8_0, + .vec_dot_type = BARK_GGML_TYPE_Q8_0, }, - [GGML_TYPE_Q4_1] = { + [BARK_GGML_TYPE_Q4_1] = { .type_name = "q4_1", .blck_size = QK4_1, .type_size = sizeof(block_q4_1), .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q4_1, + .to_float = (bark_ggml_to_float_t) dequantize_row_q4_1, .from_float = quantize_row_q4_1, - .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference, - .vec_dot = ggml_vec_dot_q4_1_q8_1, - .vec_dot_type = GGML_TYPE_Q8_1, + .from_float_reference = (bark_ggml_from_float_t) quantize_row_q4_1_reference, + .vec_dot = bark_ggml_vec_dot_q4_1_q8_1, + .vec_dot_type = BARK_GGML_TYPE_Q8_1, }, - [GGML_TYPE_Q5_0] = { + [BARK_GGML_TYPE_Q5_0] = { .type_name = "q5_0", .blck_size = QK5_0, .type_size = sizeof(block_q5_0), .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q5_0, + .to_float = (bark_ggml_to_float_t) dequantize_row_q5_0, .from_float = quantize_row_q5_0, - .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference, - .vec_dot = ggml_vec_dot_q5_0_q8_0, - .vec_dot_type = GGML_TYPE_Q8_0, + .from_float_reference = (bark_ggml_from_float_t) quantize_row_q5_0_reference, + .vec_dot = bark_ggml_vec_dot_q5_0_q8_0, + .vec_dot_type = BARK_GGML_TYPE_Q8_0, }, - [GGML_TYPE_Q5_1] = { + [BARK_GGML_TYPE_Q5_1] = { .type_name = "q5_1", .blck_size = QK5_1, .type_size = sizeof(block_q5_1), .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q5_1, + .to_float = (bark_ggml_to_float_t) dequantize_row_q5_1, .from_float = quantize_row_q5_1, - .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference, - .vec_dot = ggml_vec_dot_q5_1_q8_1, - .vec_dot_type = GGML_TYPE_Q8_1, + .from_float_reference = (bark_ggml_from_float_t) quantize_row_q5_1_reference, + .vec_dot = bark_ggml_vec_dot_q5_1_q8_1, + .vec_dot_type = BARK_GGML_TYPE_Q8_1, }, - [GGML_TYPE_Q8_0] = { + [BARK_GGML_TYPE_Q8_0] = { .type_name = "q8_0", .blck_size = QK8_0, .type_size = sizeof(block_q8_0), .is_quantized = true, .to_float = dequantize_row_q8_0, .from_float = quantize_row_q8_0, - .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference, - .vec_dot = ggml_vec_dot_q8_0_q8_0, - .vec_dot_type = GGML_TYPE_Q8_0, + .from_float_reference = (bark_ggml_from_float_t) quantize_row_q8_0_reference, + .vec_dot = bark_ggml_vec_dot_q8_0_q8_0, + .vec_dot_type = BARK_GGML_TYPE_Q8_0, }, - [GGML_TYPE_Q8_1] = { + [BARK_GGML_TYPE_Q8_1] = { .type_name = "q8_1", .blck_size = QK8_1, .type_size = sizeof(block_q8_1), .is_quantized = true, .from_float = quantize_row_q8_1, - .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference, - .vec_dot_type = GGML_TYPE_Q8_1, + .from_float_reference = (bark_ggml_from_float_t) quantize_row_q8_1_reference, + .vec_dot_type = BARK_GGML_TYPE_Q8_1, }, -#ifdef GGML_USE_K_QUANTS - [GGML_TYPE_Q2_K] = { +#ifdef BARK_GGML_USE_K_QUANTS + [BARK_GGML_TYPE_Q2_K] = { .type_name = "q2_K", .blck_size = QK_K, .type_size = sizeof(block_q2_K), .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q2_K, + .to_float = (bark_ggml_to_float_t) dequantize_row_q2_K, .from_float = quantize_row_q2_K, - .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference, - .vec_dot = ggml_vec_dot_q2_K_q8_K, - .vec_dot_type = GGML_TYPE_Q8_K, + .from_float_reference = (bark_ggml_from_float_t) quantize_row_q2_K_reference, + .vec_dot = bark_ggml_vec_dot_q2_K_q8_K, + .vec_dot_type = BARK_GGML_TYPE_Q8_K, }, - [GGML_TYPE_Q3_K] = { + [BARK_GGML_TYPE_Q3_K] = { .type_name = "q3_K", .blck_size = QK_K, .type_size = sizeof(block_q3_K), .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q3_K, + .to_float = (bark_ggml_to_float_t) dequantize_row_q3_K, .from_float = quantize_row_q3_K, - .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference, - .vec_dot = ggml_vec_dot_q3_K_q8_K, - .vec_dot_type = GGML_TYPE_Q8_K, + .from_float_reference = (bark_ggml_from_float_t) quantize_row_q3_K_reference, + .vec_dot = bark_ggml_vec_dot_q3_K_q8_K, + .vec_dot_type = BARK_GGML_TYPE_Q8_K, }, - [GGML_TYPE_Q4_K] = { + [BARK_GGML_TYPE_Q4_K] = { .type_name = "q4_K", .blck_size = QK_K, .type_size = sizeof(block_q4_K), .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q4_K, + .to_float = (bark_ggml_to_float_t) dequantize_row_q4_K, .from_float = quantize_row_q4_K, - .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference, - .vec_dot = ggml_vec_dot_q4_K_q8_K, - .vec_dot_type = GGML_TYPE_Q8_K, + .from_float_reference = (bark_ggml_from_float_t) quantize_row_q4_K_reference, + .vec_dot = bark_ggml_vec_dot_q4_K_q8_K, + .vec_dot_type = BARK_GGML_TYPE_Q8_K, }, - [GGML_TYPE_Q5_K] = { + [BARK_GGML_TYPE_Q5_K] = { .type_name = "q5_K", .blck_size = QK_K, .type_size = sizeof(block_q5_K), .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q5_K, + .to_float = (bark_ggml_to_float_t) dequantize_row_q5_K, .from_float = quantize_row_q5_K, - .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference, - .vec_dot = ggml_vec_dot_q5_K_q8_K, - .vec_dot_type = GGML_TYPE_Q8_K, + .from_float_reference = (bark_ggml_from_float_t) quantize_row_q5_K_reference, + .vec_dot = bark_ggml_vec_dot_q5_K_q8_K, + .vec_dot_type = BARK_GGML_TYPE_Q8_K, }, - [GGML_TYPE_Q6_K] = { + [BARK_GGML_TYPE_Q6_K] = { .type_name = "q6_K", .blck_size = QK_K, .type_size = sizeof(block_q6_K), .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q6_K, + .to_float = (bark_ggml_to_float_t) dequantize_row_q6_K, .from_float = quantize_row_q6_K, - .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference, - .vec_dot = ggml_vec_dot_q6_K_q8_K, - .vec_dot_type = GGML_TYPE_Q8_K, + .from_float_reference = (bark_ggml_from_float_t) quantize_row_q6_K_reference, + .vec_dot = bark_ggml_vec_dot_q6_K_q8_K, + .vec_dot_type = BARK_GGML_TYPE_Q8_K, }, - [GGML_TYPE_Q8_K] = { + [BARK_GGML_TYPE_Q8_K] = { .type_name = "q8_K", .blck_size = QK_K, .type_size = sizeof(block_q8_K), @@ -1822,8 +1822,8 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { }; // For internal test use -ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) { - GGML_ASSERT(type < GGML_TYPE_COUNT); +bark_ggml_type_traits_t bark_ggml_internal_get_type_traits(enum bark_ggml_type type) { + BARK_GGML_ASSERT(type < BARK_GGML_TYPE_COUNT); return type_traits[type]; } @@ -1835,34 +1835,34 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) { // we then implement the fundamental computation operations below using only these macros // adding support for new architectures requires to define the corresponding SIMD macros // -// GGML_F32_STEP / GGML_F16_STEP +// BARK_GGML_F32_STEP / BARK_GGML_F16_STEP // number of elements to process in a single step // -// GGML_F32_EPR / GGML_F16_EPR +// BARK_GGML_F32_EPR / BARK_GGML_F16_EPR // number of elements to fit in a single register // #if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA) -#define GGML_SIMD +#define BARK_GGML_SIMD // F32 NEON -#define GGML_F32_STEP 16 -#define GGML_F32_EPR 4 - -#define GGML_F32x4 float32x4_t -#define GGML_F32x4_ZERO vdupq_n_f32(0.0f) -#define GGML_F32x4_SET1(x) vdupq_n_f32(x) -#define GGML_F32x4_LOAD vld1q_f32 -#define GGML_F32x4_STORE vst1q_f32 -#define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c) -#define GGML_F32x4_ADD vaddq_f32 -#define GGML_F32x4_MUL vmulq_f32 -#define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x) -#define GGML_F32x4_REDUCE(res, x) \ +#define BARK_GGML_F32_STEP 16 +#define BARK_GGML_F32_EPR 4 + +#define BARK_GGML_F32x4 float32x4_t +#define BARK_GGML_F32x4_ZERO vdupq_n_f32(0.0f) +#define BARK_GGML_F32x4_SET1(x) vdupq_n_f32(x) +#define BARK_GGML_F32x4_LOAD vld1q_f32 +#define BARK_GGML_F32x4_STORE vst1q_f32 +#define BARK_GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c) +#define BARK_GGML_F32x4_ADD vaddq_f32 +#define BARK_GGML_F32x4_MUL vmulq_f32 +#define BARK_GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x) +#define BARK_GGML_F32x4_REDUCE(res, x) \ { \ - int offset = GGML_F32_ARR >> 1; \ + int offset = BARK_GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = vaddq_f32(x[i], x[offset+i]); \ } \ @@ -1874,36 +1874,36 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) { for (int i = 0; i < offset; ++i) { \ x[i] = vaddq_f32(x[i], x[offset+i]); \ } \ - res = GGML_F32x4_REDUCE_ONE(x[0]); \ + res = BARK_GGML_F32x4_REDUCE_ONE(x[0]); \ } -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE +#define BARK_GGML_F32_VEC BARK_GGML_F32x4 +#define BARK_GGML_F32_VEC_ZERO BARK_GGML_F32x4_ZERO +#define BARK_GGML_F32_VEC_SET1 BARK_GGML_F32x4_SET1 +#define BARK_GGML_F32_VEC_LOAD BARK_GGML_F32x4_LOAD +#define BARK_GGML_F32_VEC_STORE BARK_GGML_F32x4_STORE +#define BARK_GGML_F32_VEC_FMA BARK_GGML_F32x4_FMA +#define BARK_GGML_F32_VEC_ADD BARK_GGML_F32x4_ADD +#define BARK_GGML_F32_VEC_MUL BARK_GGML_F32x4_MUL +#define BARK_GGML_F32_VEC_REDUCE BARK_GGML_F32x4_REDUCE // F16 NEON #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) - #define GGML_F16_STEP 32 - #define GGML_F16_EPR 8 - - #define GGML_F16x8 float16x8_t - #define GGML_F16x8_ZERO vdupq_n_f16(0.0f) - #define GGML_F16x8_SET1(x) vdupq_n_f16(x) - #define GGML_F16x8_LOAD vld1q_f16 - #define GGML_F16x8_STORE vst1q_f16 - #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c) - #define GGML_F16x8_ADD vaddq_f16 - #define GGML_F16x8_MUL vmulq_f16 - #define GGML_F16x8_REDUCE(res, x) \ + #define BARK_GGML_F16_STEP 32 + #define BARK_GGML_F16_EPR 8 + + #define BARK_GGML_F16x8 float16x8_t + #define BARK_GGML_F16x8_ZERO vdupq_n_f16(0.0f) + #define BARK_GGML_F16x8_SET1(x) vdupq_n_f16(x) + #define BARK_GGML_F16x8_LOAD vld1q_f16 + #define BARK_GGML_F16x8_STORE vst1q_f16 + #define BARK_GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c) + #define BARK_GGML_F16x8_ADD vaddq_f16 + #define BARK_GGML_F16x8_MUL vmulq_f16 + #define BARK_GGML_F16x8_REDUCE(res, x) \ do { \ - int offset = GGML_F16_ARR >> 1; \ + int offset = BARK_GGML_F16_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = vaddq_f16(x[i], x[offset+i]); \ } \ @@ -1917,70 +1917,70 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) { } \ const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \ const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \ - res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \ + res = (bark_ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \ } while (0) - #define GGML_F16_VEC GGML_F16x8 - #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO - #define GGML_F16_VEC_SET1 GGML_F16x8_SET1 - #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p) - #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i]) - #define GGML_F16_VEC_FMA GGML_F16x8_FMA - #define GGML_F16_VEC_ADD GGML_F16x8_ADD - #define GGML_F16_VEC_MUL GGML_F16x8_MUL - #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE + #define BARK_GGML_F16_VEC BARK_GGML_F16x8 + #define BARK_GGML_F16_VEC_ZERO BARK_GGML_F16x8_ZERO + #define BARK_GGML_F16_VEC_SET1 BARK_GGML_F16x8_SET1 + #define BARK_GGML_F16_VEC_LOAD(p, i) BARK_GGML_F16x8_LOAD(p) + #define BARK_GGML_F16_VEC_STORE(p, r, i) BARK_GGML_F16x8_STORE(p, r[i]) + #define BARK_GGML_F16_VEC_FMA BARK_GGML_F16x8_FMA + #define BARK_GGML_F16_VEC_ADD BARK_GGML_F16x8_ADD + #define BARK_GGML_F16_VEC_MUL BARK_GGML_F16x8_MUL + #define BARK_GGML_F16_VEC_REDUCE BARK_GGML_F16x8_REDUCE #else // if FP16 vector arithmetic is not supported, we use FP32 instead // and take advantage of the vcvt_ functions to convert to/from FP16 - #define GGML_F16_STEP 16 - #define GGML_F16_EPR 4 - - #define GGML_F32Cx4 float32x4_t - #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f) - #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x) - #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x)) - #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y)) - #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c) - #define GGML_F32Cx4_ADD vaddq_f32 - #define GGML_F32Cx4_MUL vmulq_f32 - #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE - - #define GGML_F16_VEC GGML_F32Cx4 - #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO - #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 - #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) - #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) - #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA - #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD - #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL - #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE + #define BARK_GGML_F16_STEP 16 + #define BARK_GGML_F16_EPR 4 + + #define BARK_GGML_F32Cx4 float32x4_t + #define BARK_GGML_F32Cx4_ZERO vdupq_n_f32(0.0f) + #define BARK_GGML_F32Cx4_SET1(x) vdupq_n_f32(x) + #define BARK_GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x)) + #define BARK_GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y)) + #define BARK_GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c) + #define BARK_GGML_F32Cx4_ADD vaddq_f32 + #define BARK_GGML_F32Cx4_MUL vmulq_f32 + #define BARK_GGML_F32Cx4_REDUCE BARK_GGML_F32x4_REDUCE + + #define BARK_GGML_F16_VEC BARK_GGML_F32Cx4 + #define BARK_GGML_F16_VEC_ZERO BARK_GGML_F32Cx4_ZERO + #define BARK_GGML_F16_VEC_SET1 BARK_GGML_F32Cx4_SET1 + #define BARK_GGML_F16_VEC_LOAD(p, i) BARK_GGML_F32Cx4_LOAD(p) + #define BARK_GGML_F16_VEC_STORE(p, r, i) BARK_GGML_F32Cx4_STORE(p, r[i]) + #define BARK_GGML_F16_VEC_FMA BARK_GGML_F32Cx4_FMA + #define BARK_GGML_F16_VEC_ADD BARK_GGML_F32Cx4_ADD + #define BARK_GGML_F16_VEC_MUL BARK_GGML_F32Cx4_MUL + #define BARK_GGML_F16_VEC_REDUCE BARK_GGML_F32Cx4_REDUCE #endif #elif defined(__AVX__) -#define GGML_SIMD +#define BARK_GGML_SIMD // F32 AVX -#define GGML_F32_STEP 32 -#define GGML_F32_EPR 8 +#define BARK_GGML_F32_STEP 32 +#define BARK_GGML_F32_EPR 8 -#define GGML_F32x8 __m256 -#define GGML_F32x8_ZERO _mm256_setzero_ps() -#define GGML_F32x8_SET1(x) _mm256_set1_ps(x) -#define GGML_F32x8_LOAD _mm256_loadu_ps -#define GGML_F32x8_STORE _mm256_storeu_ps +#define BARK_GGML_F32x8 __m256 +#define BARK_GGML_F32x8_ZERO _mm256_setzero_ps() +#define BARK_GGML_F32x8_SET1(x) _mm256_set1_ps(x) +#define BARK_GGML_F32x8_LOAD _mm256_loadu_ps +#define BARK_GGML_F32x8_STORE _mm256_storeu_ps #if defined(__FMA__) - #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a) + #define BARK_GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a) #else - #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a) + #define BARK_GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a) #endif -#define GGML_F32x8_ADD _mm256_add_ps -#define GGML_F32x8_MUL _mm256_mul_ps -#define GGML_F32x8_REDUCE(res, x) \ +#define BARK_GGML_F32x8_ADD _mm256_add_ps +#define BARK_GGML_F32x8_MUL _mm256_mul_ps +#define BARK_GGML_F32x8_REDUCE(res, x) \ do { \ - int offset = GGML_F32_ARR >> 1; \ + int offset = BARK_GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm256_add_ps(x[i], x[offset+i]); \ } \ @@ -1999,88 +1999,88 @@ do { \ } while (0) // TODO: is this optimal ? -#define GGML_F32_VEC GGML_F32x8 -#define GGML_F32_VEC_ZERO GGML_F32x8_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x8_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x8_LOAD -#define GGML_F32_VEC_STORE GGML_F32x8_STORE -#define GGML_F32_VEC_FMA GGML_F32x8_FMA -#define GGML_F32_VEC_ADD GGML_F32x8_ADD -#define GGML_F32_VEC_MUL GGML_F32x8_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE +#define BARK_GGML_F32_VEC BARK_GGML_F32x8 +#define BARK_GGML_F32_VEC_ZERO BARK_GGML_F32x8_ZERO +#define BARK_GGML_F32_VEC_SET1 BARK_GGML_F32x8_SET1 +#define BARK_GGML_F32_VEC_LOAD BARK_GGML_F32x8_LOAD +#define BARK_GGML_F32_VEC_STORE BARK_GGML_F32x8_STORE +#define BARK_GGML_F32_VEC_FMA BARK_GGML_F32x8_FMA +#define BARK_GGML_F32_VEC_ADD BARK_GGML_F32x8_ADD +#define BARK_GGML_F32_VEC_MUL BARK_GGML_F32x8_MUL +#define BARK_GGML_F32_VEC_REDUCE BARK_GGML_F32x8_REDUCE // F16 AVX -#define GGML_F16_STEP 32 -#define GGML_F16_EPR 8 +#define BARK_GGML_F16_STEP 32 +#define BARK_GGML_F16_EPR 8 // F16 arithmetic is not supported by AVX, so we use F32 instead -#define GGML_F32Cx8 __m256 -#define GGML_F32Cx8_ZERO _mm256_setzero_ps() -#define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x) +#define BARK_GGML_F32Cx8 __m256 +#define BARK_GGML_F32Cx8_ZERO _mm256_setzero_ps() +#define BARK_GGML_F32Cx8_SET1(x) _mm256_set1_ps(x) #if defined(__F16C__) // the _mm256_cvt intrinsics require F16C -#define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x))) -#define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0)) +#define BARK_GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x))) +#define BARK_GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0)) #else -static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) { +static inline __m256 __avx_f32cx8_load(bark_ggml_fp16_t *x) { float tmp[8]; for (int i = 0; i < 8; i++) { - tmp[i] = GGML_FP16_TO_FP32(x[i]); + tmp[i] = BARK_GGML_FP16_TO_FP32(x[i]); } return _mm256_loadu_ps(tmp); } -static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { +static inline void __avx_f32cx8_store(bark_ggml_fp16_t *x, __m256 y) { float arr[8]; _mm256_storeu_ps(arr, y); for (int i = 0; i < 8; i++) - x[i] = GGML_FP32_TO_FP16(arr[i]); + x[i] = BARK_GGML_FP32_TO_FP16(arr[i]); } -#define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) -#define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y) +#define BARK_GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) +#define BARK_GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y) #endif -#define GGML_F32Cx8_FMA GGML_F32x8_FMA -#define GGML_F32Cx8_ADD _mm256_add_ps -#define GGML_F32Cx8_MUL _mm256_mul_ps -#define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE - -#define GGML_F16_VEC GGML_F32Cx8 -#define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO -#define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1 -#define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p) -#define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i]) -#define GGML_F16_VEC_FMA GGML_F32Cx8_FMA -#define GGML_F16_VEC_ADD GGML_F32Cx8_ADD -#define GGML_F16_VEC_MUL GGML_F32Cx8_MUL -#define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE +#define BARK_GGML_F32Cx8_FMA BARK_GGML_F32x8_FMA +#define BARK_GGML_F32Cx8_ADD _mm256_add_ps +#define BARK_GGML_F32Cx8_MUL _mm256_mul_ps +#define BARK_GGML_F32Cx8_REDUCE BARK_GGML_F32x8_REDUCE + +#define BARK_GGML_F16_VEC BARK_GGML_F32Cx8 +#define BARK_GGML_F16_VEC_ZERO BARK_GGML_F32Cx8_ZERO +#define BARK_GGML_F16_VEC_SET1 BARK_GGML_F32Cx8_SET1 +#define BARK_GGML_F16_VEC_LOAD(p, i) BARK_GGML_F32Cx8_LOAD(p) +#define BARK_GGML_F16_VEC_STORE(p, r, i) BARK_GGML_F32Cx8_STORE(p, r[i]) +#define BARK_GGML_F16_VEC_FMA BARK_GGML_F32Cx8_FMA +#define BARK_GGML_F16_VEC_ADD BARK_GGML_F32Cx8_ADD +#define BARK_GGML_F16_VEC_MUL BARK_GGML_F32Cx8_MUL +#define BARK_GGML_F16_VEC_REDUCE BARK_GGML_F32Cx8_REDUCE #elif defined(__POWER9_VECTOR__) -#define GGML_SIMD +#define BARK_GGML_SIMD // F32 POWER9 -#define GGML_F32_STEP 32 -#define GGML_F32_EPR 4 - -#define GGML_F32x4 vector float -#define GGML_F32x4_ZERO 0.0f -#define GGML_F32x4_SET1 vec_splats -#define GGML_F32x4_LOAD(p) vec_xl(0, p) -#define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p) -#define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a) -#define GGML_F32x4_ADD vec_add -#define GGML_F32x4_MUL vec_mul -#define GGML_F32x4_REDUCE(res, x) \ +#define BARK_GGML_F32_STEP 32 +#define BARK_GGML_F32_EPR 4 + +#define BARK_GGML_F32x4 vector float +#define BARK_GGML_F32x4_ZERO 0.0f +#define BARK_GGML_F32x4_SET1 vec_splats +#define BARK_GGML_F32x4_LOAD(p) vec_xl(0, p) +#define BARK_GGML_F32x4_STORE(p, r) vec_xst(r, 0, p) +#define BARK_GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a) +#define BARK_GGML_F32x4_ADD vec_add +#define BARK_GGML_F32x4_MUL vec_mul +#define BARK_GGML_F32x4_REDUCE(res, x) \ { \ - int offset = GGML_F32_ARR >> 1; \ + int offset = BARK_GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = vec_add(x[i], x[offset+i]); \ } \ @@ -2098,55 +2098,55 @@ static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { vec_extract(x[0], 3); \ } -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE +#define BARK_GGML_F32_VEC BARK_GGML_F32x4 +#define BARK_GGML_F32_VEC_ZERO BARK_GGML_F32x4_ZERO +#define BARK_GGML_F32_VEC_SET1 BARK_GGML_F32x4_SET1 +#define BARK_GGML_F32_VEC_LOAD BARK_GGML_F32x4_LOAD +#define BARK_GGML_F32_VEC_STORE BARK_GGML_F32x4_STORE +#define BARK_GGML_F32_VEC_FMA BARK_GGML_F32x4_FMA +#define BARK_GGML_F32_VEC_ADD BARK_GGML_F32x4_ADD +#define BARK_GGML_F32_VEC_MUL BARK_GGML_F32x4_MUL +#define BARK_GGML_F32_VEC_REDUCE BARK_GGML_F32x4_REDUCE // F16 POWER9 -#define GGML_F16_STEP GGML_F32_STEP -#define GGML_F16_EPR GGML_F32_EPR -#define GGML_F16_VEC GGML_F32x4 -#define GGML_F16_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F16_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F16_VEC_FMA GGML_F32x4_FMA -#define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE +#define BARK_GGML_F16_STEP BARK_GGML_F32_STEP +#define BARK_GGML_F16_EPR BARK_GGML_F32_EPR +#define BARK_GGML_F16_VEC BARK_GGML_F32x4 +#define BARK_GGML_F16_VEC_ZERO BARK_GGML_F32x4_ZERO +#define BARK_GGML_F16_VEC_SET1 BARK_GGML_F32x4_SET1 +#define BARK_GGML_F16_VEC_FMA BARK_GGML_F32x4_FMA +#define BARK_GGML_F16_VEC_REDUCE BARK_GGML_F32x4_REDUCE // Use vec_xl, not vec_ld, in case the load address is not aligned. -#define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \ - vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \ +#define BARK_GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \ + vec_extract_fp32_from_shorth(vec_xl(0, p - BARK_GGML_F16_EPR)) : \ vec_extract_fp32_from_shortl(vec_xl(0, p)) -#define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i] -#define GGML_F16_VEC_STORE(p, r, i) \ +#define BARK_GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i] +#define BARK_GGML_F16_VEC_STORE(p, r, i) \ if (i & 0x1) \ - vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \ - r[i - GGML_ENDIAN_BYTE(0)]), \ - 0, p - GGML_F16_EPR) + vec_xst(vec_pack_to_short_fp32(r[i - BARK_GGML_ENDIAN_BYTE(1)], \ + r[i - BARK_GGML_ENDIAN_BYTE(0)]), \ + 0, p - BARK_GGML_F16_EPR) #elif defined(__wasm_simd128__) -#define GGML_SIMD +#define BARK_GGML_SIMD // F32 WASM -#define GGML_F32_STEP 16 -#define GGML_F32_EPR 4 - -#define GGML_F32x4 v128_t -#define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f) -#define GGML_F32x4_SET1(x) wasm_f32x4_splat(x) -#define GGML_F32x4_LOAD wasm_v128_load -#define GGML_F32x4_STORE wasm_v128_store -#define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a) -#define GGML_F32x4_ADD wasm_f32x4_add -#define GGML_F32x4_MUL wasm_f32x4_mul -#define GGML_F32x4_REDUCE(res, x) \ +#define BARK_GGML_F32_STEP 16 +#define BARK_GGML_F32_EPR 4 + +#define BARK_GGML_F32x4 v128_t +#define BARK_GGML_F32x4_ZERO wasm_f32x4_splat(0.0f) +#define BARK_GGML_F32x4_SET1(x) wasm_f32x4_splat(x) +#define BARK_GGML_F32x4_LOAD wasm_v128_load +#define BARK_GGML_F32x4_STORE wasm_v128_store +#define BARK_GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a) +#define BARK_GGML_F32x4_ADD wasm_f32x4_add +#define BARK_GGML_F32x4_MUL wasm_f32x4_mul +#define BARK_GGML_F32x4_REDUCE(res, x) \ { \ - int offset = GGML_F32_ARR >> 1; \ + int offset = BARK_GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ } \ @@ -2164,54 +2164,54 @@ static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { wasm_f32x4_extract_lane(x[0], 3); \ } -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE +#define BARK_GGML_F32_VEC BARK_GGML_F32x4 +#define BARK_GGML_F32_VEC_ZERO BARK_GGML_F32x4_ZERO +#define BARK_GGML_F32_VEC_SET1 BARK_GGML_F32x4_SET1 +#define BARK_GGML_F32_VEC_LOAD BARK_GGML_F32x4_LOAD +#define BARK_GGML_F32_VEC_STORE BARK_GGML_F32x4_STORE +#define BARK_GGML_F32_VEC_FMA BARK_GGML_F32x4_FMA +#define BARK_GGML_F32_VEC_ADD BARK_GGML_F32x4_ADD +#define BARK_GGML_F32_VEC_MUL BARK_GGML_F32x4_MUL +#define BARK_GGML_F32_VEC_REDUCE BARK_GGML_F32x4_REDUCE // F16 WASM -#define GGML_F16_STEP 16 -#define GGML_F16_EPR 4 +#define BARK_GGML_F16_STEP 16 +#define BARK_GGML_F16_EPR 4 -inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) { +inline static v128_t __wasm_f16x4_load(const bark_ggml_fp16_t * p) { float tmp[4]; - tmp[0] = GGML_FP16_TO_FP32(p[0]); - tmp[1] = GGML_FP16_TO_FP32(p[1]); - tmp[2] = GGML_FP16_TO_FP32(p[2]); - tmp[3] = GGML_FP16_TO_FP32(p[3]); + tmp[0] = BARK_GGML_FP16_TO_FP32(p[0]); + tmp[1] = BARK_GGML_FP16_TO_FP32(p[1]); + tmp[2] = BARK_GGML_FP16_TO_FP32(p[2]); + tmp[3] = BARK_GGML_FP16_TO_FP32(p[3]); return wasm_v128_load(tmp); } -inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { +inline static void __wasm_f16x4_store(bark_ggml_fp16_t * p, v128_t x) { float tmp[4]; wasm_v128_store(tmp, x); - p[0] = GGML_FP32_TO_FP16(tmp[0]); - p[1] = GGML_FP32_TO_FP16(tmp[1]); - p[2] = GGML_FP32_TO_FP16(tmp[2]); - p[3] = GGML_FP32_TO_FP16(tmp[3]); -} - -#define GGML_F16x4 v128_t -#define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f) -#define GGML_F16x4_SET1(x) wasm_f32x4_splat(x) -#define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x) -#define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y) -#define GGML_F16x4_FMA GGML_F32x4_FMA -#define GGML_F16x4_ADD wasm_f32x4_add -#define GGML_F16x4_MUL wasm_f32x4_mul -#define GGML_F16x4_REDUCE(res, x) \ + p[0] = BARK_GGML_FP32_TO_FP16(tmp[0]); + p[1] = BARK_GGML_FP32_TO_FP16(tmp[1]); + p[2] = BARK_GGML_FP32_TO_FP16(tmp[2]); + p[3] = BARK_GGML_FP32_TO_FP16(tmp[3]); +} + +#define BARK_GGML_F16x4 v128_t +#define BARK_GGML_F16x4_ZERO wasm_f32x4_splat(0.0f) +#define BARK_GGML_F16x4_SET1(x) wasm_f32x4_splat(x) +#define BARK_GGML_F16x4_LOAD(x) __wasm_f16x4_load(x) +#define BARK_GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y) +#define BARK_GGML_F16x4_FMA BARK_GGML_F32x4_FMA +#define BARK_GGML_F16x4_ADD wasm_f32x4_add +#define BARK_GGML_F16x4_MUL wasm_f32x4_mul +#define BARK_GGML_F16x4_REDUCE(res, x) \ { \ - int offset = GGML_F16_ARR >> 1; \ + int offset = BARK_GGML_F16_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ } \ @@ -2229,41 +2229,41 @@ inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { wasm_f32x4_extract_lane(x[0], 3); \ } -#define GGML_F16_VEC GGML_F16x4 -#define GGML_F16_VEC_ZERO GGML_F16x4_ZERO -#define GGML_F16_VEC_SET1 GGML_F16x4_SET1 -#define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p) -#define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i]) -#define GGML_F16_VEC_FMA GGML_F16x4_FMA -#define GGML_F16_VEC_ADD GGML_F16x4_ADD -#define GGML_F16_VEC_MUL GGML_F16x4_MUL -#define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE +#define BARK_GGML_F16_VEC BARK_GGML_F16x4 +#define BARK_GGML_F16_VEC_ZERO BARK_GGML_F16x4_ZERO +#define BARK_GGML_F16_VEC_SET1 BARK_GGML_F16x4_SET1 +#define BARK_GGML_F16_VEC_LOAD(p, i) BARK_GGML_F16x4_LOAD(p) +#define BARK_GGML_F16_VEC_STORE(p, r, i) BARK_GGML_F16x4_STORE(p, r[i]) +#define BARK_GGML_F16_VEC_FMA BARK_GGML_F16x4_FMA +#define BARK_GGML_F16_VEC_ADD BARK_GGML_F16x4_ADD +#define BARK_GGML_F16_VEC_MUL BARK_GGML_F16x4_MUL +#define BARK_GGML_F16_VEC_REDUCE BARK_GGML_F16x4_REDUCE #elif defined(__SSE3__) -#define GGML_SIMD +#define BARK_GGML_SIMD // F32 SSE -#define GGML_F32_STEP 32 -#define GGML_F32_EPR 4 +#define BARK_GGML_F32_STEP 32 +#define BARK_GGML_F32_EPR 4 -#define GGML_F32x4 __m128 -#define GGML_F32x4_ZERO _mm_setzero_ps() -#define GGML_F32x4_SET1(x) _mm_set1_ps(x) -#define GGML_F32x4_LOAD _mm_loadu_ps -#define GGML_F32x4_STORE _mm_storeu_ps +#define BARK_GGML_F32x4 __m128 +#define BARK_GGML_F32x4_ZERO _mm_setzero_ps() +#define BARK_GGML_F32x4_SET1(x) _mm_set1_ps(x) +#define BARK_GGML_F32x4_LOAD _mm_loadu_ps +#define BARK_GGML_F32x4_STORE _mm_storeu_ps #if defined(__FMA__) // TODO: Does this work? - #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a) + #define BARK_GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a) #else - #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a) + #define BARK_GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a) #endif -#define GGML_F32x4_ADD _mm_add_ps -#define GGML_F32x4_MUL _mm_mul_ps -#define GGML_F32x4_REDUCE(res, x) \ +#define BARK_GGML_F32x4_ADD _mm_add_ps +#define BARK_GGML_F32x4_MUL _mm_mul_ps +#define BARK_GGML_F32x4_REDUCE(res, x) \ { \ - int offset = GGML_F32_ARR >> 1; \ + int offset = BARK_GGML_F32_ARR >> 1; \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm_add_ps(x[i], x[offset+i]); \ } \ @@ -2280,117 +2280,117 @@ inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { } // TODO: is this optimal ? -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE +#define BARK_GGML_F32_VEC BARK_GGML_F32x4 +#define BARK_GGML_F32_VEC_ZERO BARK_GGML_F32x4_ZERO +#define BARK_GGML_F32_VEC_SET1 BARK_GGML_F32x4_SET1 +#define BARK_GGML_F32_VEC_LOAD BARK_GGML_F32x4_LOAD +#define BARK_GGML_F32_VEC_STORE BARK_GGML_F32x4_STORE +#define BARK_GGML_F32_VEC_FMA BARK_GGML_F32x4_FMA +#define BARK_GGML_F32_VEC_ADD BARK_GGML_F32x4_ADD +#define BARK_GGML_F32_VEC_MUL BARK_GGML_F32x4_MUL +#define BARK_GGML_F32_VEC_REDUCE BARK_GGML_F32x4_REDUCE // F16 SSE -#define GGML_F16_STEP 32 -#define GGML_F16_EPR 4 +#define BARK_GGML_F16_STEP 32 +#define BARK_GGML_F16_EPR 4 -static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) { +static inline __m128 __sse_f16x4_load(bark_ggml_fp16_t *x) { float tmp[4]; - tmp[0] = GGML_FP16_TO_FP32(x[0]); - tmp[1] = GGML_FP16_TO_FP32(x[1]); - tmp[2] = GGML_FP16_TO_FP32(x[2]); - tmp[3] = GGML_FP16_TO_FP32(x[3]); + tmp[0] = BARK_GGML_FP16_TO_FP32(x[0]); + tmp[1] = BARK_GGML_FP16_TO_FP32(x[1]); + tmp[2] = BARK_GGML_FP16_TO_FP32(x[2]); + tmp[3] = BARK_GGML_FP16_TO_FP32(x[3]); return _mm_loadu_ps(tmp); } -static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) { +static inline void __sse_f16x4_store(bark_ggml_fp16_t *x, __m128 y) { float arr[4]; _mm_storeu_ps(arr, y); - x[0] = GGML_FP32_TO_FP16(arr[0]); - x[1] = GGML_FP32_TO_FP16(arr[1]); - x[2] = GGML_FP32_TO_FP16(arr[2]); - x[3] = GGML_FP32_TO_FP16(arr[3]); -} - -#define GGML_F32Cx4 __m128 -#define GGML_F32Cx4_ZERO _mm_setzero_ps() -#define GGML_F32Cx4_SET1(x) _mm_set1_ps(x) -#define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x) -#define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y) -#define GGML_F32Cx4_FMA GGML_F32x4_FMA -#define GGML_F32Cx4_ADD _mm_add_ps -#define GGML_F32Cx4_MUL _mm_mul_ps -#define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE - -#define GGML_F16_VEC GGML_F32Cx4 -#define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO -#define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 -#define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) -#define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) -#define GGML_F16_VEC_FMA GGML_F32Cx4_FMA -#define GGML_F16_VEC_ADD GGML_F32Cx4_ADD -#define GGML_F16_VEC_MUL GGML_F32Cx4_MUL -#define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE + x[0] = BARK_GGML_FP32_TO_FP16(arr[0]); + x[1] = BARK_GGML_FP32_TO_FP16(arr[1]); + x[2] = BARK_GGML_FP32_TO_FP16(arr[2]); + x[3] = BARK_GGML_FP32_TO_FP16(arr[3]); +} + +#define BARK_GGML_F32Cx4 __m128 +#define BARK_GGML_F32Cx4_ZERO _mm_setzero_ps() +#define BARK_GGML_F32Cx4_SET1(x) _mm_set1_ps(x) +#define BARK_GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x) +#define BARK_GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y) +#define BARK_GGML_F32Cx4_FMA BARK_GGML_F32x4_FMA +#define BARK_GGML_F32Cx4_ADD _mm_add_ps +#define BARK_GGML_F32Cx4_MUL _mm_mul_ps +#define BARK_GGML_F32Cx4_REDUCE BARK_GGML_F32x4_REDUCE + +#define BARK_GGML_F16_VEC BARK_GGML_F32Cx4 +#define BARK_GGML_F16_VEC_ZERO BARK_GGML_F32Cx4_ZERO +#define BARK_GGML_F16_VEC_SET1 BARK_GGML_F32Cx4_SET1 +#define BARK_GGML_F16_VEC_LOAD(p, i) BARK_GGML_F32Cx4_LOAD(p) +#define BARK_GGML_F16_VEC_STORE(p, r, i) BARK_GGML_F32Cx4_STORE(p, r[i]) +#define BARK_GGML_F16_VEC_FMA BARK_GGML_F32Cx4_FMA +#define BARK_GGML_F16_VEC_ADD BARK_GGML_F32Cx4_ADD +#define BARK_GGML_F16_VEC_MUL BARK_GGML_F32Cx4_MUL +#define BARK_GGML_F16_VEC_REDUCE BARK_GGML_F32Cx4_REDUCE #endif -// GGML_F32_ARR / GGML_F16_ARR +// BARK_GGML_F32_ARR / BARK_GGML_F16_ARR // number of registers to use per step -#ifdef GGML_SIMD -#define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR) -#define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR) +#ifdef BARK_GGML_SIMD +#define BARK_GGML_F32_ARR (BARK_GGML_F32_STEP/BARK_GGML_F32_EPR) +#define BARK_GGML_F16_ARR (BARK_GGML_F16_STEP/BARK_GGML_F16_EPR) #endif // // fundamental operations // -inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void bark_ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void bark_ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_cpy_i32(const int n, int32_t * y, const int32_t * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } +inline static void bark_ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void bark_ggml_vec_cpy_i32(const int n, int32_t * y, const int32_t * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } -inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void bark_ggml_vec_set_f16(const int n, bark_ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; } -inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; } -inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; } -inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; } -inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; } -inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } -inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; } -inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; } -inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; } +inline static void bark_ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; } +inline static void bark_ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; } +inline static void bark_ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; } +inline static void bark_ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; } +inline static void bark_ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; } +inline static void bark_ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void bark_ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } +inline static void bark_ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; } +inline static void bark_ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; } +inline static void bark_ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; } -static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) { -#ifdef GGML_SIMD +static void bark_ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) { +#ifdef BARK_GGML_SIMD float sumf = 0.0f; - const int np = (n & ~(GGML_F32_STEP - 1)); + const int np = (n & ~(BARK_GGML_F32_STEP - 1)); - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + BARK_GGML_F32_VEC sum[BARK_GGML_F32_ARR] = { BARK_GGML_F32_VEC_ZERO }; - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; + BARK_GGML_F32_VEC ax[BARK_GGML_F32_ARR]; + BARK_GGML_F32_VEC ay[BARK_GGML_F32_ARR]; - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + for (int i = 0; i < np; i += BARK_GGML_F32_STEP) { + for (int j = 0; j < BARK_GGML_F32_ARR; j++) { + ax[j] = BARK_GGML_F32_VEC_LOAD(x + i + j*BARK_GGML_F32_EPR); + ay[j] = BARK_GGML_F32_VEC_LOAD(y + i + j*BARK_GGML_F32_EPR); - sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); + sum[j] = BARK_GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); } } // reduce sum0..sum3 to sum0 - GGML_F32_VEC_REDUCE(sumf, sum); + BARK_GGML_F32_VEC_REDUCE(sumf, sum); // leftovers for (int i = np; i < n; ++i) { @@ -2398,52 +2398,52 @@ static void ggml_vec_dot_f32(const int n, float * restrict s, const float * rest } #else // scalar - ggml_float sumf = 0.0; + bark_ggml_float sumf = 0.0; for (int i = 0; i < n; ++i) { - sumf += (ggml_float)(x[i]*y[i]); + sumf += (bark_ggml_float)(x[i]*y[i]); } #endif *s = sumf; } -static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) { - ggml_float sumf = 0.0; +static void bark_ggml_vec_dot_f16(const int n, float * restrict s, bark_ggml_fp16_t * restrict x, bark_ggml_fp16_t * restrict y) { + bark_ggml_float sumf = 0.0; -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F16_STEP - 1)); +#if defined(BARK_GGML_SIMD) + const int np = (n & ~(BARK_GGML_F16_STEP - 1)); - GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO }; + BARK_GGML_F16_VEC sum[BARK_GGML_F16_ARR] = { BARK_GGML_F16_VEC_ZERO }; - GGML_F16_VEC ax[GGML_F16_ARR]; - GGML_F16_VEC ay[GGML_F16_ARR]; + BARK_GGML_F16_VEC ax[BARK_GGML_F16_ARR]; + BARK_GGML_F16_VEC ay[BARK_GGML_F16_ARR]; - for (int i = 0; i < np; i += GGML_F16_STEP) { - for (int j = 0; j < GGML_F16_ARR; j++) { - ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j); - ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); + for (int i = 0; i < np; i += BARK_GGML_F16_STEP) { + for (int j = 0; j < BARK_GGML_F16_ARR; j++) { + ax[j] = BARK_GGML_F16_VEC_LOAD(x + i + j*BARK_GGML_F16_EPR, j); + ay[j] = BARK_GGML_F16_VEC_LOAD(y + i + j*BARK_GGML_F16_EPR, j); - sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]); + sum[j] = BARK_GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]); } } // reduce sum0..sum3 to sum0 - GGML_F16_VEC_REDUCE(sumf, sum); + BARK_GGML_F16_VEC_REDUCE(sumf, sum); // leftovers for (int i = np; i < n; ++i) { - sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); + sumf += (bark_ggml_float)(BARK_GGML_FP16_TO_FP32(x[i])*BARK_GGML_FP16_TO_FP32(y[i])); } #else for (int i = 0; i < n; ++i) { - sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); + sumf += (bark_ggml_float)(BARK_GGML_FP16_TO_FP32(x[i])*BARK_GGML_FP16_TO_FP32(y[i])); } #endif *s = sumf; } -static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { +static void bark_ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { const int qk = QK8_0; const int nb = n / qk; @@ -2456,7 +2456,7 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * float32x4_t sumv0 = vdupq_n_f32(0.0f); float32x4_t sumv1 = vdupq_n_f32(0.0f); - GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb + BARK_GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb for (int i = 0; i < nb; i += 2) { const block_q4_0 * restrict x0 = &x[i + 0]; const block_q4_0 * restrict x1 = &x[i + 1]; @@ -2492,8 +2492,8 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), BARK_GGML_FP16_TO_FP32(x0->d)*BARK_GGML_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), BARK_GGML_FP16_TO_FP32(x1->d)*BARK_GGML_FP16_TO_FP32(y1->d)); #else const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l)); const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l)); @@ -2510,8 +2510,8 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), BARK_GGML_FP16_TO_FP32(x0->d)*BARK_GGML_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), BARK_GGML_FP16_TO_FP32(x1->d)*BARK_GGML_FP16_TO_FP32(y1->d)); #endif } @@ -2523,7 +2523,7 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * // Main loop for (int i = 0; i < nb; ++i) { /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) ); + const __m256 d = _mm256_set1_ps( BARK_GGML_FP16_TO_FP32(x[i].d) * BARK_GGML_FP16_TO_FP32(y[i].d) ); __m256i bx = bytes_from_nibbles_32(x[i].qs); @@ -2547,7 +2547,7 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * // Main loop for (int i = 0; i < nb; ++i) { // Compute combined scale for the block - const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) ); + const __m256 d = _mm256_set1_ps( BARK_GGML_FP16_TO_FP32(x[i].d) * BARK_GGML_FP16_TO_FP32(y[i].d) ); const __m128i lowMask = _mm_set1_epi8(0xF); const __m128i off = _mm_set1_epi8(8); @@ -2589,7 +2589,7 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) ); + const __m128 d_0_1 = _mm_set1_ps( BARK_GGML_FP16_TO_FP32(x[0].d) * BARK_GGML_FP16_TO_FP32(y[0].d) ); const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs); @@ -2607,7 +2607,7 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) ); + const __m128 d_2_3 = _mm_set1_ps( BARK_GGML_FP16_TO_FP32(x[1].d) * BARK_GGML_FP16_TO_FP32(y[1].d) ); const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs); @@ -2635,13 +2635,13 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * } // Main loop - GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb + BARK_GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb for (int i = 2; i < nb; i+=2) { _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0); _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) ); + const __m128 d_0_1 = _mm_set1_ps( BARK_GGML_FP16_TO_FP32(x[i].d) * BARK_GGML_FP16_TO_FP32(y[i].d) ); const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs); @@ -2659,7 +2659,7 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) ); + const __m128 d_2_3 = _mm_set1_ps( BARK_GGML_FP16_TO_FP32(x[i + 1].d) * BARK_GGML_FP16_TO_FP32(y[i + 1].d) ); const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs); @@ -2726,7 +2726,7 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d); + sumf += sumi*BARK_GGML_FP16_TO_FP32(x[i].d)*BARK_GGML_FP16_TO_FP32(y[i].d); } *s = sumf; @@ -2744,14 +2744,14 @@ static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]); } - sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d); + sumf += sumi*BARK_GGML_FP16_TO_FP32(x[i].d)*BARK_GGML_FP16_TO_FP32(y[i].d); } *s = sumf; #endif } -static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { +static void bark_ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { const int qk = QK8_1; const int nb = n / qk; @@ -2767,14 +2767,14 @@ static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * float summs = 0; - GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb + BARK_GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb for (int i = 0; i < nb; i += 2) { const block_q4_1 * restrict x0 = &x[i + 0]; const block_q4_1 * restrict x1 = &x[i + 1]; const block_q8_1 * restrict y0 = &y[i + 0]; const block_q8_1 * restrict y1 = &y[i + 1]; - summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s; + summs += BARK_GGML_FP16_TO_FP32(x0->m) * y0->s + BARK_GGML_FP16_TO_FP32(x1->m) * y1->s; const uint8x16_t m4b = vdupq_n_u8(0x0F); @@ -2798,8 +2798,8 @@ static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h); const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), BARK_GGML_FP16_TO_FP32(x0->d)*y0->d); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), BARK_GGML_FP16_TO_FP32(x1->d)*y1->d); #else const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l)); const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l)); @@ -2816,8 +2816,8 @@ static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), BARK_GGML_FP16_TO_FP32(x0->d)*y0->d); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), BARK_GGML_FP16_TO_FP32(x1->d)*y1->d); #endif } @@ -2830,10 +2830,10 @@ static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * // Main loop for (int i = 0; i < nb; ++i) { - const float d0 = GGML_FP16_TO_FP32(x[i].d); + const float d0 = BARK_GGML_FP16_TO_FP32(x[i].d); const float d1 = y[i].d; - summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s; + summs += BARK_GGML_FP16_TO_FP32(x[i].m) * y[i].s; const __m256 d0v = _mm256_set1_ps( d0 ); const __m256 d1v = _mm256_set1_ps( d1 ); @@ -2885,7 +2885,7 @@ static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; + sumf += (BARK_GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + BARK_GGML_FP16_TO_FP32(x[i].m)*y[i].s; } *s = sumf; @@ -2903,14 +2903,14 @@ static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]); } - sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; + sumf += (BARK_GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + BARK_GGML_FP16_TO_FP32(x[i].m)*y[i].s; } *s = sumf; #endif } -static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { +static void bark_ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { const int qk = QK8_0; const int nb = n / qk; @@ -2930,7 +2930,7 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * uint64_t tmp0[4]; uint64_t tmp1[4]; - GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb + BARK_GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb for (int i = 0; i < nb; i += 2) { const block_q5_0 * restrict x0 = &x[i]; const block_q5_0 * restrict x1 = &x[i + 1]; @@ -2982,10 +2982,10 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * #if defined(__ARM_FEATURE_DOTPROD) sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), - vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), BARK_GGML_FP16_TO_FP32(x0->d)*BARK_GGML_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), - vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), BARK_GGML_FP16_TO_FP32(x1->d)*BARK_GGML_FP16_TO_FP32(y1->d)); #else const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l)); const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l)); @@ -3002,8 +3002,8 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), BARK_GGML_FP16_TO_FP32(x0->d)*BARK_GGML_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), BARK_GGML_FP16_TO_FP32(x1->d)*BARK_GGML_FP16_TO_FP32(y1->d)); #endif } @@ -3064,7 +3064,7 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * wasm_i32x4_dot_i16x8(v0lfh, v1lh)), wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), - wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d)))); + wasm_f32x4_splat(BARK_GGML_FP16_TO_FP32(x0->d) * BARK_GGML_FP16_TO_FP32(y0->d)))); } *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + @@ -3076,7 +3076,7 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * // Main loop for (int i = 0; i < nb; i++) { /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d)); + const __m256 d = _mm256_set1_ps(BARK_GGML_FP16_TO_FP32(x[i].d) * BARK_GGML_FP16_TO_FP32(y[i].d)); __m256i bx = bytes_from_nibbles_32(x[i].qs); __m256i bxhi = bytes_from_bits_32(x[i].qh); @@ -3100,7 +3100,7 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * // Main loop for (int i = 0; i < nb; i++) { /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d)); + const __m256 d = _mm256_set1_ps(BARK_GGML_FP16_TO_FP32(x[i].d) * BARK_GGML_FP16_TO_FP32(y[i].d)); __m256i bx = bytes_from_nibbles_32(x[i].qs); const __m256i bxhi = bytes_from_bits_32(x[i].qh); @@ -3184,7 +3184,7 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi; + sumf += (BARK_GGML_FP16_TO_FP32(x[i].d)*BARK_GGML_FP16_TO_FP32(y[i].d)) * sumi; } *s = sumf; @@ -3208,14 +3208,14 @@ static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]); } - sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi; + sumf += (BARK_GGML_FP16_TO_FP32(x[i].d)*BARK_GGML_FP16_TO_FP32(y[i].d)) * sumi; } *s = sumf; #endif } -static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { +static void bark_ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { const int qk = QK8_1; const int nb = n / qk; @@ -3238,7 +3238,7 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * uint64_t tmp0[4]; uint64_t tmp1[4]; - GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb + BARK_GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb for (int i = 0; i < nb; i += 2) { const block_q5_1 * restrict x0 = &x[i]; const block_q5_1 * restrict x1 = &x[i + 1]; @@ -3247,8 +3247,8 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * const uint8x16_t m4b = vdupq_n_u8(0x0F); - summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s; - summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s; + summs0 += BARK_GGML_FP16_TO_FP32(x0->m) * y0->s; + summs1 += BARK_GGML_FP16_TO_FP32(x1->m) * y1->s; // extract the 5th bit via lookup table ((b) << 4) memcpy(&qh0, x0->qh, sizeof(qh0)); @@ -3293,10 +3293,10 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * #if defined(__ARM_FEATURE_DOTPROD) sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), - vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d); + vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), BARK_GGML_FP16_TO_FP32(x0->d)*y0->d); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), - vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d); + vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), BARK_GGML_FP16_TO_FP32(x1->d)*y1->d); #else const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l)); const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l)); @@ -3313,8 +3313,8 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), BARK_GGML_FP16_TO_FP32(x0->d)*y0->d); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), BARK_GGML_FP16_TO_FP32(x1->d)*y1->d); #endif } @@ -3332,7 +3332,7 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * const block_q5_1 * restrict x0 = &x[i]; const block_q8_1 * restrict y0 = &y[i]; - summs += GGML_FP16_TO_FP32(x0->m) * y0->s; + summs += BARK_GGML_FP16_TO_FP32(x0->m) * y0->s; const v128_t m4b = wasm_i8x16_splat(0x0F); @@ -3379,7 +3379,7 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * wasm_i32x4_dot_i16x8(v0lfh, v1lh)), wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), - wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d))); + wasm_f32x4_splat(BARK_GGML_FP16_TO_FP32(x0->d) * y0->d))); } *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + @@ -3392,9 +3392,9 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * // Main loop for (int i = 0; i < nb; i++) { - const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d)); + const __m256 dx = _mm256_set1_ps(BARK_GGML_FP16_TO_FP32(x[i].d)); - summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s; + summs += BARK_GGML_FP16_TO_FP32(x[i].m) * y[i].s; __m256i bx = bytes_from_nibbles_32(x[i].qs); __m256i bxhi = bytes_from_bits_32(x[i].qh); @@ -3419,9 +3419,9 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * // Main loop for (int i = 0; i < nb; i++) { - const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d)); + const __m256 dx = _mm256_set1_ps(BARK_GGML_FP16_TO_FP32(x[i].d)); - summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s; + summs += BARK_GGML_FP16_TO_FP32(x[i].m) * y[i].s; __m256i bx = bytes_from_nibbles_32(x[i].qs); const __m256i bxhi = bytes_from_bits_32(x[i].qh); @@ -3502,7 +3502,7 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; + sumf += (BARK_GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + BARK_GGML_FP16_TO_FP32(x[i].m)*y[i].s; } *s = sumf; @@ -3526,14 +3526,14 @@ static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]); } - sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; + sumf += (BARK_GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + BARK_GGML_FP16_TO_FP32(x[i].m)*y[i].s; } *s = sumf; #endif } -static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { +static void bark_ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { const int qk = QK8_0; const int nb = n / qk; @@ -3546,7 +3546,7 @@ static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * float32x4_t sumv0 = vdupq_n_f32(0.0f); float32x4_t sumv1 = vdupq_n_f32(0.0f); - GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb + BARK_GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb for (int i = 0; i < nb; i += 2) { const block_q8_0 * restrict x0 = &x[i + 0]; const block_q8_0 * restrict x1 = &x[i + 1]; @@ -3567,11 +3567,11 @@ static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * #if defined(__ARM_FEATURE_DOTPROD) sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), - vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), BARK_GGML_FP16_TO_FP32(x0->d)*BARK_GGML_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), - vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), BARK_GGML_FP16_TO_FP32(x1->d)*BARK_GGML_FP16_TO_FP32(y1->d)); #else const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0)); @@ -3589,8 +3589,8 @@ static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1)); const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3)); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), BARK_GGML_FP16_TO_FP32(x0->d)*BARK_GGML_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), BARK_GGML_FP16_TO_FP32(x1->d)*BARK_GGML_FP16_TO_FP32(y1->d)); #endif } @@ -3602,7 +3602,7 @@ static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * // Main loop for (int i = 0; i < nb; ++i) { // Compute combined scale for the block - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d)); + const __m256 d = _mm256_set1_ps(BARK_GGML_FP16_TO_FP32(x[i].d) * BARK_GGML_FP16_TO_FP32(y[i].d)); __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs); __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); @@ -3633,7 +3633,7 @@ static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum); - sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)); + sumf += sumi*(BARK_GGML_FP16_TO_FP32(x[i].d)*BARK_GGML_FP16_TO_FP32(y[i].d)); } *s = sumf; @@ -3648,84 +3648,84 @@ static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * sumi += x[i].qs[j]*y[i].qs[j]; } - sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)); + sumf += sumi*(BARK_GGML_FP16_TO_FP32(x[i].d)*BARK_GGML_FP16_TO_FP32(y[i].d)); } *s = sumf; #endif } -// compute GGML_VEC_DOT_UNROLL dot products at once +// compute BARK_GGML_VEC_DOT_UNROLL dot products at once // xs - x row stride in bytes -inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) { - ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 }; +inline static void bark_ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, bark_ggml_fp16_t * restrict y) { + bark_ggml_float sumf[BARK_GGML_VEC_DOT_UNROLL] = { 0.0 }; - ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL]; + bark_ggml_fp16_t * restrict x[BARK_GGML_VEC_DOT_UNROLL]; - for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) { - x[i] = (ggml_fp16_t *) ((char *) xv + i*xs); + for (int i = 0; i < BARK_GGML_VEC_DOT_UNROLL; ++i) { + x[i] = (bark_ggml_fp16_t *) ((char *) xv + i*xs); } -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F16_STEP - 1)); +#if defined(BARK_GGML_SIMD) + const int np = (n & ~(BARK_GGML_F16_STEP - 1)); - GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } }; + BARK_GGML_F16_VEC sum[BARK_GGML_VEC_DOT_UNROLL][BARK_GGML_F16_ARR] = { { BARK_GGML_F16_VEC_ZERO } }; - GGML_F16_VEC ax[GGML_F16_ARR]; - GGML_F16_VEC ay[GGML_F16_ARR]; + BARK_GGML_F16_VEC ax[BARK_GGML_F16_ARR]; + BARK_GGML_F16_VEC ay[BARK_GGML_F16_ARR]; - for (int i = 0; i < np; i += GGML_F16_STEP) { - for (int j = 0; j < GGML_F16_ARR; j++) { - ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); + for (int i = 0; i < np; i += BARK_GGML_F16_STEP) { + for (int j = 0; j < BARK_GGML_F16_ARR; j++) { + ay[j] = BARK_GGML_F16_VEC_LOAD(y + i + j*BARK_GGML_F16_EPR, j); - for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) { - ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j); + for (int k = 0; k < BARK_GGML_VEC_DOT_UNROLL; ++k) { + ax[j] = BARK_GGML_F16_VEC_LOAD(x[k] + i + j*BARK_GGML_F16_EPR, j); - sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]); + sum[k][j] = BARK_GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]); } } } // reduce sum0..sum3 to sum0 - for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) { - GGML_F16_VEC_REDUCE(sumf[k], sum[k]); + for (int k = 0; k < BARK_GGML_VEC_DOT_UNROLL; ++k) { + BARK_GGML_F16_VEC_REDUCE(sumf[k], sum[k]); } // leftovers for (int i = np; i < n; ++i) { - for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { - sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i])); + for (int j = 0; j < BARK_GGML_VEC_DOT_UNROLL; ++j) { + sumf[j] += (bark_ggml_float)(BARK_GGML_FP16_TO_FP32(x[j][i])*BARK_GGML_FP16_TO_FP32(y[i])); } } #else for (int i = 0; i < n; ++i) { - for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { - sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i])); + for (int j = 0; j < BARK_GGML_VEC_DOT_UNROLL; ++j) { + sumf[j] += (bark_ggml_float)(BARK_GGML_FP16_TO_FP32(x[j][i])*BARK_GGML_FP16_TO_FP32(y[i])); } } #endif - for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) { + for (int i = 0; i < BARK_GGML_VEC_DOT_UNROLL; ++i) { s[i] = sumf[i]; } } -inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) { -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); +inline static void bark_ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) { +#if defined(BARK_GGML_SIMD) + const int np = (n & ~(BARK_GGML_F32_STEP - 1)); - GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + BARK_GGML_F32_VEC vx = BARK_GGML_F32_VEC_SET1(v); - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; + BARK_GGML_F32_VEC ax[BARK_GGML_F32_ARR]; + BARK_GGML_F32_VEC ay[BARK_GGML_F32_ARR]; - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx); + for (int i = 0; i < np; i += BARK_GGML_F32_STEP) { + for (int j = 0; j < BARK_GGML_F32_ARR; j++) { + ax[j] = BARK_GGML_F32_VEC_LOAD(x + i + j*BARK_GGML_F32_EPR); + ay[j] = BARK_GGML_F32_VEC_LOAD(y + i + j*BARK_GGML_F32_EPR); + ay[j] = BARK_GGML_F32_VEC_FMA(ay[j], ax[j], vx); - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + BARK_GGML_F32_VEC_STORE(y + i + j*BARK_GGML_F32_EPR, ay[j]); } } @@ -3742,50 +3742,50 @@ inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float } // xs and vs are byte strides of x and v -inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * restrict y, const float * restrict xv, const float * restrict vv) { +inline static void bark_ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * restrict y, const float * restrict xv, const float * restrict vv) { - const float * restrict x[GGML_VEC_MAD_UNROLL]; - const float * restrict v[GGML_VEC_MAD_UNROLL]; + const float * restrict x[BARK_GGML_VEC_MAD_UNROLL]; + const float * restrict v[BARK_GGML_VEC_MAD_UNROLL]; - for (int i = 0; i < GGML_VEC_MAD_UNROLL; ++i) { + for (int i = 0; i < BARK_GGML_VEC_MAD_UNROLL; ++i) { x[i] = (const float *) ((const char *) xv + i*xs); v[i] = (const float *) ((const char *) vv + i*vs); } -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); +#if defined(BARK_GGML_SIMD) + const int np = (n & ~(BARK_GGML_F32_STEP - 1)); - GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL]; + BARK_GGML_F32_VEC vx[BARK_GGML_VEC_MAD_UNROLL]; - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { - vx[k] = GGML_F32_VEC_SET1(v[k][0]); + for (int k = 0; k < BARK_GGML_VEC_MAD_UNROLL; ++k) { + vx[k] = BARK_GGML_F32_VEC_SET1(v[k][0]); } - GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; + BARK_GGML_F32_VEC ax[BARK_GGML_VEC_MAD_UNROLL][BARK_GGML_F32_ARR]; + BARK_GGML_F32_VEC ay[BARK_GGML_F32_ARR]; - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + for (int i = 0; i < np; i += BARK_GGML_F32_STEP) { + for (int j = 0; j < BARK_GGML_F32_ARR; j++) { + ay[j] = BARK_GGML_F32_VEC_LOAD(y + i + j*BARK_GGML_F32_EPR); - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { - ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]); + for (int k = 0; k < BARK_GGML_VEC_MAD_UNROLL; ++k) { + ax[k][j] = BARK_GGML_F32_VEC_LOAD(x[k] + i + j*BARK_GGML_F32_EPR); + ay[j] = BARK_GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]); } - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + BARK_GGML_F32_VEC_STORE(y + i + j*BARK_GGML_F32_EPR, ay[j]); } } // leftovers - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + for (int k = 0; k < BARK_GGML_VEC_MAD_UNROLL; ++k) { for (int i = np; i < n; ++i) { y[i] += x[k][i]*v[k][0]; } } #else // scalar - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + for (int k = 0; k < BARK_GGML_VEC_MAD_UNROLL; ++k) { for (int i = 0; i < n; ++i) { y[i] += x[k][i]*v[k][0]; } @@ -3793,23 +3793,23 @@ inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int #endif } -//inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; } -inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { -#if defined(GGML_USE_ACCELERATE) +//inline static void bark_ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; } +inline static void bark_ggml_vec_scale_f32(const int n, float * y, const float v) { +#if defined(BARK_GGML_USE_ACCELERATE) vDSP_vsmul(y, 1, &v, y, 1, n); -#elif defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); +#elif defined(BARK_GGML_SIMD) + const int np = (n & ~(BARK_GGML_F32_STEP - 1)); - GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + BARK_GGML_F32_VEC vx = BARK_GGML_F32_VEC_SET1(v); - GGML_F32_VEC ay[GGML_F32_ARR]; + BARK_GGML_F32_VEC ay[BARK_GGML_F32_ARR]; - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_MUL(ay[j], vx); + for (int i = 0; i < np; i += BARK_GGML_F32_STEP) { + for (int j = 0; j < BARK_GGML_F32_ARR; j++) { + ay[j] = BARK_GGML_F32_VEC_LOAD(y + i + j*BARK_GGML_F32_EPR); + ay[j] = BARK_GGML_F32_VEC_MUL(ay[j], vx); - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + BARK_GGML_F32_VEC_STORE(y + i + j*BARK_GGML_F32_EPR, ay[j]); } } @@ -3825,134 +3825,134 @@ inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { #endif } -inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); } -inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; } -inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); } -inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); } -inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); } -inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); } -inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; } -inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); } -inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; } -inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } +inline static void bark_ggml_vec_norm_f32 (const int n, float * s, const float * x) { bark_ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); } +inline static void bark_ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; } +inline static void bark_ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); } +inline static void bark_ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); } +inline static void bark_ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); } +inline static void bark_ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); } +inline static void bark_ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; } +inline static void bark_ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); } +inline static void bark_ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; } +inline static void bark_ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } static const float GELU_COEF_A = 0.044715f; static const float GELU_QUICK_COEF = -1.702f; static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; -inline static float ggml_gelu_f32(float x) { +inline static float bark_ggml_gelu_f32(float x) { return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); } -inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { +inline static void bark_ggml_vec_gelu_f16(const int n, bark_ggml_fp16_t * y, const bark_ggml_fp16_t * x) { const uint16_t * i16 = (const uint16_t *) x; for (int i = 0; i < n; ++i) { y[i] = table_gelu_f16[i16[i]]; } } -#ifdef GGML_GELU_FP16 -inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { +#ifdef BARK_GGML_GELU_FP16 +inline static void bark_ggml_vec_gelu_f32(const int n, float * y, const float * x) { uint16_t t; for (int i = 0; i < n; ++i) { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); + bark_ggml_fp16_t fp16 = BARK_GGML_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(table_gelu_f16[t]); + y[i] = BARK_GGML_FP16_TO_FP32(table_gelu_f16[t]); } } #else -inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { +inline static void bark_ggml_vec_gelu_f32(const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) { - y[i] = ggml_gelu_f32(x[i]); + y[i] = bark_ggml_gelu_f32(x[i]); } } #endif -inline static float ggml_gelu_quick_f32(float x) { +inline static float bark_ggml_gelu_quick_f32(float x) { return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x))); } -//inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { +//inline static void bark_ggml_vec_gelu_quick_f16(const int n, bark_ggml_fp16_t * y, const bark_ggml_fp16_t * x) { // const uint16_t * i16 = (const uint16_t *) x; // for (int i = 0; i < n; ++i) { // y[i] = table_gelu_quick_f16[i16[i]]; // } //} -#ifdef GGML_GELU_QUICK_FP16 -inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) { +#ifdef BARK_GGML_GELU_QUICK_FP16 +inline static void bark_ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) { uint16_t t; for (int i = 0; i < n; ++i) { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); + bark_ggml_fp16_t fp16 = BARK_GGML_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(table_gelu_quick_f16[t]); + y[i] = BARK_GGML_FP16_TO_FP32(table_gelu_quick_f16[t]); } } #else -inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) { +inline static void bark_ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) { - y[i] = ggml_gelu_quick_f32(x[i]); + y[i] = bark_ggml_gelu_quick_f32(x[i]); } } #endif // Sigmoid Linear Unit (SiLU) function -inline static float ggml_silu_f32(float x) { +inline static float bark_ggml_silu_f32(float x) { return x/(1.0f + expf(-x)); } -//inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { +//inline static void bark_ggml_vec_silu_f16(const int n, bark_ggml_fp16_t * y, const bark_ggml_fp16_t * x) { // const uint16_t * i16 = (const uint16_t *) x; // for (int i = 0; i < n; ++i) { // y[i] = table_silu_f16[i16[i]]; // } //} -#ifdef GGML_SILU_FP16 -inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) { +#ifdef BARK_GGML_SILU_FP16 +inline static void bark_ggml_vec_silu_f32(const int n, float * y, const float * x) { uint16_t t; for (int i = 0; i < n; ++i) { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); + bark_ggml_fp16_t fp16 = BARK_GGML_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(table_silu_f16[t]); + y[i] = BARK_GGML_FP16_TO_FP32(table_silu_f16[t]); } } #else -inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) { +inline static void bark_ggml_vec_silu_f32(const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) { - y[i] = ggml_silu_f32(x[i]); + y[i] = bark_ggml_silu_f32(x[i]); } } #endif -inline static float ggml_silu_backward_f32(float x, float dy) { +inline static float bark_ggml_silu_backward_f32(float x, float dy) { const float s = 1.0f/(1.0f + expf(-x)); return dy*s*(1.0f + x*(1.0f - s)); } -#ifdef GGML_SILU_FP16 -inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) { +#ifdef BARK_GGML_SILU_FP16 +inline static void bark_ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) { for (int i = 0; i < n; ++i) { // we did not use x[i] to compute forward silu but its f16 equivalent // take derivative at f16 of x[i]: - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); - float usedx = GGML_FP16_TO_FP32(fp16); - dx[i] = ggml_silu_backward_f32(usedx, dy[i]); + bark_ggml_fp16_t fp16 = BARK_GGML_FP32_TO_FP16(x[i]); + float usedx = BARK_GGML_FP16_TO_FP32(fp16); + dx[i] = bark_ggml_silu_backward_f32(usedx, dy[i]); } } #else -inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) { +inline static void bark_ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) { for (int i = 0; i < n; ++i) { - dx[i] = ggml_silu_backward_f32(x[i], dy[i]); + dx[i] = bark_ggml_silu_backward_f32(x[i], dy[i]); } } #endif -inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { -#ifndef GGML_USE_ACCELERATE - ggml_float sum = 0.0; +inline static void bark_ggml_vec_sum_f32(const int n, float * s, const float * x) { +#ifndef BARK_GGML_USE_ACCELERATE + bark_ggml_float sum = 0.0; for (int i = 0; i < n; ++i) { - sum += (ggml_float)x[i]; + sum += (bark_ggml_float)x[i]; } *s = sum; #else @@ -3960,24 +3960,24 @@ inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { #endif } -inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) { - ggml_float sum = 0.0; +inline static void bark_ggml_vec_sum_f32_ggf(const int n, bark_ggml_float * s, const float * x) { + bark_ggml_float sum = 0.0; for (int i = 0; i < n; ++i) { - sum += (ggml_float)x[i]; + sum += (bark_ggml_float)x[i]; } *s = sum; } -inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) { +inline static void bark_ggml_vec_sum_f16_ggf(const int n, float * s, const bark_ggml_fp16_t * x) { float sum = 0.0f; for (int i = 0; i < n; ++i) { - sum += GGML_FP16_TO_FP32(x[i]); + sum += BARK_GGML_FP16_TO_FP32(x[i]); } *s = sum; } -inline static void ggml_vec_max_f32(const int n, float * s, const float * x) { -#ifndef GGML_USE_ACCELERATE +inline static void bark_ggml_vec_max_f32(const int n, float * s, const float * x) { +#ifndef BARK_GGML_USE_ACCELERATE float max = -INFINITY; for (int i = 0; i < n; ++i) { max = MAX(max, x[i]); @@ -3988,12 +3988,12 @@ inline static void ggml_vec_max_f32(const int n, float * s, const float * x) { #endif } -inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) { - ggml_vec_norm_f32(n, s, x); +inline static void bark_ggml_vec_norm_inv_f32(const int n, float * s, const float * x) { + bark_ggml_vec_norm_f32(n, s, x); *s = 1.f/(*s); } -inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) { +inline static void bark_ggml_vec_argmax_f32(const int n, int * s, const float * x) { float max = -INFINITY; int idx = 0; for (int i = 0; i < n; ++i) { @@ -4007,7 +4007,7 @@ inline static void ggml_vec_argmax_f32(const int n, int * s, const float * x) { // data types // -static const char * GGML_OP_NAME[76] = { +static const char * BARK_GGML_OP_NAME[76] = { "NONE", "DUP", @@ -4097,9 +4097,9 @@ static const char * GGML_OP_NAME[76] = { "CROSS_ENTROPY_LOSS_BACK", }; -static_assert(GGML_OP_COUNT == 74, "GGML_OP_COUNT != 74"); +static_assert(BARK_GGML_OP_COUNT == 74, "BARK_GGML_OP_COUNT != 74"); -static const char * GGML_OP_SYMBOL[76] = { +static const char * BARK_GGML_OP_SYMBOL[76] = { "none", "x", @@ -4189,12 +4189,12 @@ static const char * GGML_OP_SYMBOL[76] = { "cross_entropy_loss_back(x,y)", }; -static_assert(GGML_OP_COUNT == 74, "GGML_OP_COUNT != 74"); +static_assert(BARK_GGML_OP_COUNT == 74, "BARK_GGML_OP_COUNT != 74"); -static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2"); +static_assert(BARK_GGML_OP_POOL_COUNT == 2, "BARK_GGML_OP_POOL_COUNT != 2"); -static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN"); -static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN"); +static_assert(sizeof(struct bark_ggml_object)%BARK_GGML_MEM_ALIGN == 0, "bark_ggml_object size must be a multiple of BARK_GGML_MEM_ALIGN"); +static_assert(sizeof(struct bark_ggml_tensor)%BARK_GGML_MEM_ALIGN == 0, "bark_ggml_tensor size must be a multiple of BARK_GGML_MEM_ALIGN"); // WARN: // Mis-confguration can lead to problem that's hard to reason about: @@ -4202,38 +4202,38 @@ static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size // * At worst it talks slightly difference but hard to perceive. // // An op has to enable INIT or FINALIZE when any of it's branch needs that pass. -// Take care about compile options (e.g., GGML_USE_xxx). -static bool GGML_OP_HAS_INIT [GGML_OP_COUNT] = { 0 }; -static bool GGML_OP_HAS_FINALIZE[GGML_OP_COUNT] = { 0 }; +// Take care about compile options (e.g., BARK_GGML_USE_xxx). +static bool BARK_GGML_OP_HAS_INIT [BARK_GGML_OP_COUNT] = { 0 }; +static bool BARK_GGML_OP_HAS_FINALIZE[BARK_GGML_OP_COUNT] = { 0 }; -static void ggml_setup_op_has_task_pass(void) { +static void bark_ggml_setup_op_has_task_pass(void) { { // INIT - bool * p = GGML_OP_HAS_INIT; - - p[GGML_OP_ACC ] = true; - p[GGML_OP_MUL_MAT ] = true; - p[GGML_OP_OUT_PROD ] = true; - p[GGML_OP_SET ] = true; - p[GGML_OP_GET_ROWS_BACK ] = true; - p[GGML_OP_DIAG_MASK_INF ] = true; - p[GGML_OP_DIAG_MASK_ZERO ] = true; - p[GGML_OP_CONV_1D ] = true; - p[GGML_OP_CONV_1D_STAGE_0 ] = true; - p[GGML_OP_CONV_1D_STAGE_1 ] = true; - p[GGML_OP_CONV_TRANSPOSE_1D ] = true; - p[GGML_OP_CONV_2D ] = true; - p[GGML_OP_CONV_2D_STAGE_0 ] = true; - p[GGML_OP_CONV_2D_STAGE_1 ] = true; - p[GGML_OP_CONV_TRANSPOSE_2D ] = true; - p[GGML_OP_FLASH_ATTN_BACK ] = true; - p[GGML_OP_CROSS_ENTROPY_LOSS ] = true; - p[GGML_OP_ADD_REL_POS ] = true; + bool * p = BARK_GGML_OP_HAS_INIT; + + p[BARK_GGML_OP_ACC ] = true; + p[BARK_GGML_OP_MUL_MAT ] = true; + p[BARK_GGML_OP_OUT_PROD ] = true; + p[BARK_GGML_OP_SET ] = true; + p[BARK_GGML_OP_GET_ROWS_BACK ] = true; + p[BARK_GGML_OP_DIAG_MASK_INF ] = true; + p[BARK_GGML_OP_DIAG_MASK_ZERO ] = true; + p[BARK_GGML_OP_CONV_1D ] = true; + p[BARK_GGML_OP_CONV_1D_STAGE_0 ] = true; + p[BARK_GGML_OP_CONV_1D_STAGE_1 ] = true; + p[BARK_GGML_OP_CONV_TRANSPOSE_1D ] = true; + p[BARK_GGML_OP_CONV_2D ] = true; + p[BARK_GGML_OP_CONV_2D_STAGE_0 ] = true; + p[BARK_GGML_OP_CONV_2D_STAGE_1 ] = true; + p[BARK_GGML_OP_CONV_TRANSPOSE_2D ] = true; + p[BARK_GGML_OP_FLASH_ATTN_BACK ] = true; + p[BARK_GGML_OP_CROSS_ENTROPY_LOSS ] = true; + p[BARK_GGML_OP_ADD_REL_POS ] = true; } { // FINALIZE - bool * p = GGML_OP_HAS_FINALIZE; + bool * p = BARK_GGML_OP_HAS_FINALIZE; - p[GGML_OP_CROSS_ENTROPY_LOSS ] = true; + p[BARK_GGML_OP_CROSS_ENTROPY_LOSS ] = true; } } @@ -4241,7 +4241,7 @@ static void ggml_setup_op_has_task_pass(void) { // ggml context // -struct ggml_context { +struct bark_ggml_context { size_t mem_size; void * mem_buffer; bool mem_buffer_owned; @@ -4250,33 +4250,33 @@ struct ggml_context { int n_objects; - struct ggml_object * objects_begin; - struct ggml_object * objects_end; + struct bark_ggml_object * objects_begin; + struct bark_ggml_object * objects_end; - struct ggml_scratch scratch; - struct ggml_scratch scratch_save; + struct bark_ggml_scratch scratch; + struct bark_ggml_scratch scratch_save; }; -struct ggml_context_container { +struct bark_ggml_context_container { bool used; - struct ggml_context context; + struct bark_ggml_context context; }; // // NUMA support // -#define GGML_NUMA_MAX_NODES 8 -#define GGML_NUMA_MAX_CPUS 512 +#define BARK_GGML_NUMA_MAX_NODES 8 +#define BARK_GGML_NUMA_MAX_CPUS 512 -struct ggml_numa_node { - uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node +struct bark_ggml_numa_node { + uint32_t cpus[BARK_GGML_NUMA_MAX_CPUS]; // hardware threads on this node uint32_t n_cpus; }; -struct ggml_numa_nodes { - struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES]; +struct bark_ggml_numa_nodes { + struct bark_ggml_numa_node nodes[BARK_GGML_NUMA_MAX_NODES]; uint32_t n_nodes; uint32_t total_cpus; // hardware threads on system }; @@ -4285,17 +4285,17 @@ struct ggml_numa_nodes { // ggml state // -struct ggml_state { - struct ggml_context_container contexts[GGML_MAX_CONTEXTS]; - struct ggml_numa_nodes numa; +struct bark_ggml_state { + struct bark_ggml_context_container contexts[BARK_GGML_MAX_CONTEXTS]; + struct bark_ggml_numa_nodes numa; }; // global state -static struct ggml_state g_state; +static struct bark_ggml_state g_state; static atomic_int g_state_barrier = 0; // barrier via spin lock -inline static void ggml_critical_section_start(void) { +inline static void bark_ggml_critical_section_start(void) { int processing = atomic_fetch_add(&g_state_barrier, 1); while (processing > 0) { @@ -4308,13 +4308,13 @@ inline static void ggml_critical_section_start(void) { // TODO: make this somehow automatically executed // some sort of "sentry" mechanism -inline static void ggml_critical_section_end(void) { +inline static void bark_ggml_critical_section_end(void) { atomic_fetch_sub(&g_state_barrier, 1); } -void ggml_numa_init(void) { +void bark_ggml_numa_init(void) { if (g_state.numa.n_nodes > 0) { - fprintf(stderr, "ggml_numa_init: NUMA already initialized\n"); + fprintf(stderr, "bark_ggml_numa_init: NUMA already initialized\n"); return; } @@ -4325,22 +4325,22 @@ void ggml_numa_init(void) { int rv; // enumerate nodes - while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) { + while (g_state.numa.n_nodes < BARK_GGML_NUMA_MAX_NODES) { rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes); - GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); + BARK_GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); if (stat(path, &st) != 0) { break; } ++g_state.numa.n_nodes; } // enumerate CPUs - while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) { + while (g_state.numa.total_cpus < BARK_GGML_NUMA_MAX_CPUS) { rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus); - GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); + BARK_GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); if (stat(path, &st) != 0) { break; } ++g_state.numa.total_cpus; } - GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus); + BARK_GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus); if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) { g_state.numa.n_nodes = 0; @@ -4348,26 +4348,26 @@ void ggml_numa_init(void) { } for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) { - struct ggml_numa_node * node = &g_state.numa.nodes[n]; - GGML_PRINT_DEBUG("CPUs on node %u:", n); + struct bark_ggml_numa_node * node = &g_state.numa.nodes[n]; + BARK_GGML_PRINT_DEBUG("CPUs on node %u:", n); node->n_cpus = 0; for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) { rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c); - GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); + BARK_GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); if (stat(path, &st) == 0) { node->cpus[node->n_cpus++] = c; - GGML_PRINT_DEBUG(" %u", c); + BARK_GGML_PRINT_DEBUG(" %u", c); } } - GGML_PRINT_DEBUG("\n"); + BARK_GGML_PRINT_DEBUG("\n"); } - if (ggml_is_numa()) { + if (bark_ggml_is_numa()) { FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r"); if (fptr != NULL) { char buf[42]; if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) { - GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n"); + BARK_GGML_PRINT("WARNING: /proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n"); } fclose(fptr); } @@ -4377,54 +4377,54 @@ void ggml_numa_init(void) { #endif } -bool ggml_is_numa(void) { +bool bark_ggml_is_numa(void) { return g_state.numa.n_nodes > 1; } //////////////////////////////////////////////////////////////////////////////// -void ggml_print_object(const struct ggml_object * obj) { - GGML_PRINT(" - ggml_object: type = %d, offset = %zu, size = %zu, next = %p\n", +void bark_ggml_print_object(const struct bark_ggml_object * obj) { + BARK_GGML_PRINT(" - bark_ggml_object: type = %d, offset = %zu, size = %zu, next = %p\n", obj->type, obj->offs, obj->size, (const void *) obj->next); } -void ggml_print_objects(const struct ggml_context * ctx) { - struct ggml_object * obj = ctx->objects_begin; +void bark_ggml_print_objects(const struct bark_ggml_context * ctx) { + struct bark_ggml_object * obj = ctx->objects_begin; - GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx); + BARK_GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx); while (obj != NULL) { - ggml_print_object(obj); + bark_ggml_print_object(obj); obj = obj->next; } - GGML_PRINT("%s: --- end ---\n", __func__); + BARK_GGML_PRINT("%s: --- end ---\n", __func__); } -int64_t ggml_nelements(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +int64_t bark_ggml_nelements(const struct bark_ggml_tensor * tensor) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3]; } -int64_t ggml_nrows(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +int64_t bark_ggml_nrows(const struct bark_ggml_tensor * tensor) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return tensor->ne[1]*tensor->ne[2]*tensor->ne[3]; } -size_t ggml_nbytes(const struct ggml_tensor * tensor) { +size_t bark_ggml_nbytes(const struct bark_ggml_tensor * tensor) { size_t nbytes; - size_t blck_size = ggml_blck_size(tensor->type); + size_t blck_size = bark_ggml_blck_size(tensor->type); if (blck_size == 1) { - nbytes = ggml_type_size(tensor->type); - for (int i = 0; i < GGML_MAX_DIMS; ++i) { + nbytes = bark_ggml_type_size(tensor->type); + for (int i = 0; i < BARK_GGML_MAX_DIMS; ++i) { nbytes += (tensor->ne[i] - 1)*tensor->nb[i]; } } else { nbytes = tensor->ne[0]*tensor->nb[0]/blck_size; - for (int i = 1; i < GGML_MAX_DIMS; ++i) { + for (int i = 1; i < BARK_GGML_MAX_DIMS; ++i) { nbytes += (tensor->ne[i] - 1)*tensor->nb[i]; } } @@ -4432,151 +4432,151 @@ size_t ggml_nbytes(const struct ggml_tensor * tensor) { return nbytes; } -size_t ggml_nbytes_pad(const struct ggml_tensor * tensor) { - return GGML_PAD(ggml_nbytes(tensor), GGML_MEM_ALIGN); +size_t bark_ggml_nbytes_pad(const struct bark_ggml_tensor * tensor) { + return BARK_GGML_PAD(bark_ggml_nbytes(tensor), BARK_GGML_MEM_ALIGN); } -size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +size_t bark_ggml_nbytes_split(const struct bark_ggml_tensor * tensor, int nrows_split) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); - return (nrows_split*tensor->ne[0]*ggml_type_size(tensor->type))/ggml_blck_size(tensor->type); + return (nrows_split*tensor->ne[0]*bark_ggml_type_size(tensor->type))/bark_ggml_blck_size(tensor->type); } -int ggml_blck_size(enum ggml_type type) { +int bark_ggml_blck_size(enum bark_ggml_type type) { return type_traits[type].blck_size; } -size_t ggml_type_size(enum ggml_type type) { +size_t bark_ggml_type_size(enum bark_ggml_type type) { return type_traits[type].type_size; } -float ggml_type_sizef(enum ggml_type type) { +float bark_ggml_type_sizef(enum bark_ggml_type type) { return ((float)(type_traits[type].type_size))/type_traits[type].blck_size; } -const char * ggml_type_name(enum ggml_type type) { +const char * bark_ggml_type_name(enum bark_ggml_type type) { return type_traits[type].type_name; } -bool ggml_is_quantized(enum ggml_type type) { +bool bark_ggml_is_quantized(enum bark_ggml_type type) { return type_traits[type].is_quantized; } -const char * ggml_op_name(enum ggml_op op) { - return GGML_OP_NAME[op]; +const char * bark_ggml_op_name(enum bark_ggml_op op) { + return BARK_GGML_OP_NAME[op]; } -const char * ggml_op_symbol(enum ggml_op op) { - return GGML_OP_SYMBOL[op]; +const char * bark_ggml_op_symbol(enum bark_ggml_op op) { + return BARK_GGML_OP_SYMBOL[op]; } -size_t ggml_element_size(const struct ggml_tensor * tensor) { - return ggml_type_size(tensor->type); +size_t bark_ggml_element_size(const struct bark_ggml_tensor * tensor) { + return bark_ggml_type_size(tensor->type); } -static inline bool ggml_is_scalar(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool bark_ggml_is_scalar(const struct bark_ggml_tensor * tensor) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1; } -static inline bool ggml_is_vector(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool bark_ggml_is_vector(const struct bark_ggml_tensor * tensor) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1; } -static inline bool ggml_is_matrix(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool bark_ggml_is_matrix(const struct bark_ggml_tensor * tensor) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return tensor->ne[2] == 1 && tensor->ne[3] == 1; } -static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool bark_ggml_can_mul_mat(const struct bark_ggml_tensor * t0, const struct bark_ggml_tensor * t1) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return (t0->ne[0] == t1->ne[0]) && (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable (t1->ne[3]%t0->ne[3] == 0); } -static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool bark_ggml_can_out_prod(const struct bark_ggml_tensor * t0, const struct bark_ggml_tensor * t1) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return (t0->ne[1] == t1->ne[1]) && (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable (t1->ne[3]%t0->ne[3] == 0); } -enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) { - enum ggml_type wtype = GGML_TYPE_COUNT; +enum bark_ggml_type bark_ggml_ftype_to_bark_ggml_type(enum bark_ggml_ftype ftype) { + enum bark_ggml_type wtype = BARK_GGML_TYPE_COUNT; switch (ftype) { - case GGML_FTYPE_ALL_F32: wtype = GGML_TYPE_F32; break; - case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break; - case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break; - case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break; - case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break; - case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break; - case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break; - case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break; - case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break; - case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break; - case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break; - case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break; - case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break; - case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break; - } - - GGML_ASSERT(wtype != GGML_TYPE_COUNT); + case BARK_GGML_FTYPE_ALL_F32: wtype = BARK_GGML_TYPE_F32; break; + case BARK_GGML_FTYPE_MOSTLY_F16: wtype = BARK_GGML_TYPE_F16; break; + case BARK_GGML_FTYPE_MOSTLY_Q4_0: wtype = BARK_GGML_TYPE_Q4_0; break; + case BARK_GGML_FTYPE_MOSTLY_Q4_1: wtype = BARK_GGML_TYPE_Q4_1; break; + case BARK_GGML_FTYPE_MOSTLY_Q5_0: wtype = BARK_GGML_TYPE_Q5_0; break; + case BARK_GGML_FTYPE_MOSTLY_Q5_1: wtype = BARK_GGML_TYPE_Q5_1; break; + case BARK_GGML_FTYPE_MOSTLY_Q8_0: wtype = BARK_GGML_TYPE_Q8_0; break; + case BARK_GGML_FTYPE_MOSTLY_Q2_K: wtype = BARK_GGML_TYPE_Q2_K; break; + case BARK_GGML_FTYPE_MOSTLY_Q3_K: wtype = BARK_GGML_TYPE_Q3_K; break; + case BARK_GGML_FTYPE_MOSTLY_Q4_K: wtype = BARK_GGML_TYPE_Q4_K; break; + case BARK_GGML_FTYPE_MOSTLY_Q5_K: wtype = BARK_GGML_TYPE_Q5_K; break; + case BARK_GGML_FTYPE_MOSTLY_Q6_K: wtype = BARK_GGML_TYPE_Q6_K; break; + case BARK_GGML_FTYPE_UNKNOWN: wtype = BARK_GGML_TYPE_COUNT; break; + case BARK_GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = BARK_GGML_TYPE_COUNT; break; + } + + BARK_GGML_ASSERT(wtype != BARK_GGML_TYPE_COUNT); return wtype; } -size_t ggml_tensor_overhead(void) { - return GGML_OBJECT_SIZE + GGML_TENSOR_SIZE; +size_t bark_ggml_tensor_overhead(void) { + return BARK_GGML_OBJECT_SIZE + BARK_GGML_TENSOR_SIZE; } -bool ggml_is_transposed(const struct ggml_tensor * tensor) { +bool bark_ggml_is_transposed(const struct bark_ggml_tensor * tensor) { return tensor->nb[0] > tensor->nb[1]; } -bool ggml_is_contiguous(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +bool bark_ggml_is_contiguous(const struct bark_ggml_tensor * tensor) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return - tensor->nb[0] == ggml_type_size(tensor->type) && - tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/ggml_blck_size(tensor->type) && + tensor->nb[0] == bark_ggml_type_size(tensor->type) && + tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/bark_ggml_blck_size(tensor->type) && tensor->nb[2] == tensor->nb[1]*tensor->ne[1] && tensor->nb[3] == tensor->nb[2]*tensor->ne[2]; } -static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool bark_ggml_is_contiguous_except_dim_1(const struct bark_ggml_tensor * tensor) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return - tensor->nb[0] == ggml_type_size(tensor->type) && + tensor->nb[0] == bark_ggml_type_size(tensor->type) && tensor->nb[2] == tensor->nb[1]*tensor->ne[1] && tensor->nb[3] == tensor->nb[2]*tensor->ne[2]; } -bool ggml_is_permuted(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +bool bark_ggml_is_permuted(const struct bark_ggml_tensor * tensor) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return tensor->nb[0] > tensor->nb[1] || tensor->nb[1] > tensor->nb[2] || tensor->nb[2] > tensor->nb[3]; } -static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool bark_ggml_is_padded_1d(const struct bark_ggml_tensor * tensor) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return - tensor->nb[0] == ggml_type_size(tensor->type) && + tensor->nb[0] == bark_ggml_type_size(tensor->type) && tensor->nb[2] == tensor->nb[1]*tensor->ne[1] && tensor->nb[3] == tensor->nb[2]*tensor->ne[2]; } -bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +bool bark_ggml_are_same_shape(const struct bark_ggml_tensor * t0, const struct bark_ggml_tensor * t1) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return (t0->ne[0] == t1->ne[0] ) && @@ -4586,8 +4586,8 @@ bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor } // check if t1 can be represented as a repeatition of t0 -static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool bark_ggml_can_repeat(const struct bark_ggml_tensor * t0, const struct bark_ggml_tensor * t1) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); return (t1->ne[0]%t0->ne[0] == 0) && @@ -4596,67 +4596,67 @@ static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct g (t1->ne[3]%t0->ne[3] == 0); } -static inline bool ggml_can_repeat_rows(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); +static inline bool bark_ggml_can_repeat_rows(const struct bark_ggml_tensor * t0, const struct bark_ggml_tensor * t1) { + static_assert(BARK_GGML_MAX_DIMS == 4, "BARK_GGML_MAX_DIMS is not 4 - update this function"); - return (t0->ne[0] == t1->ne[0]) && ggml_can_repeat(t0, t1); + return (t0->ne[0] == t1->ne[0]) && bark_ggml_can_repeat(t0, t1); } -static inline int ggml_up32(int n) { +static inline int bark_ggml_up32(int n) { return (n + 31) & ~31; } -//static inline int ggml_up64(int n) { +//static inline int bark_ggml_up64(int n) { // return (n + 63) & ~63; //} -static inline int ggml_up(int n, int m) { +static inline int bark_ggml_up(int n, int m) { // assert m is a power of 2 - GGML_ASSERT((m & (m - 1)) == 0); + BARK_GGML_ASSERT((m & (m - 1)) == 0); return (n + m - 1) & ~(m - 1); } -// assert that pointer is aligned to GGML_MEM_ALIGN -#define ggml_assert_aligned(ptr) \ - GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0) +// assert that pointer is aligned to BARK_GGML_MEM_ALIGN +#define bark_ggml_assert_aligned(ptr) \ + BARK_GGML_ASSERT(((uintptr_t) (ptr))%BARK_GGML_MEM_ALIGN == 0) //////////////////////////////////////////////////////////////////////////////// -struct ggml_context * ggml_init(struct ggml_init_params params) { +struct bark_ggml_context * bark_ggml_init(struct bark_ggml_init_params params) { // make this function thread safe - ggml_critical_section_start(); + bark_ggml_critical_section_start(); static bool is_first_call = true; if (is_first_call) { // initialize time system (required on Windows) - ggml_time_init(); + bark_ggml_time_init(); // initialize GELU, Quick GELU, SILU and EXP F32 tables { - const uint64_t t_start = ggml_time_us(); UNUSED(t_start); + const uint64_t t_start = bark_ggml_time_us(); UNUSED(t_start); - ggml_fp16_t ii; + bark_ggml_fp16_t ii; for (int i = 0; i < (1 << 16); ++i) { uint16_t ui = i; memcpy(&ii, &ui, sizeof(ii)); - const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii); - table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f)); - table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f)); - table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f)); - table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f)); + const float f = table_f32_f16[i] = BARK_GGML_COMPUTE_FP16_TO_FP32(ii); + table_gelu_f16[i] = BARK_GGML_FP32_TO_FP16(bark_ggml_gelu_f32(f)); + table_gelu_quick_f16[i] = BARK_GGML_FP32_TO_FP16(bark_ggml_gelu_quick_f32(f)); + table_silu_f16[i] = BARK_GGML_FP32_TO_FP16(bark_ggml_silu_f32(f)); + table_exp_f16[i] = BARK_GGML_FP32_TO_FP16(expf(f)); } - const uint64_t t_end = ggml_time_us(); UNUSED(t_end); + const uint64_t t_end = bark_ggml_time_us(); UNUSED(t_end); - GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f); + BARK_GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f); } // initialize g_state { - const uint64_t t_start = ggml_time_us(); UNUSED(t_start); + const uint64_t t_start = bark_ggml_time_us(); UNUSED(t_start); - g_state = (struct ggml_state) { + g_state = (struct bark_ggml_state) { /*.contexts =*/ { { 0 } }, /*.numa =*/ { .n_nodes = 0, @@ -4664,93 +4664,91 @@ struct ggml_context * ggml_init(struct ggml_init_params params) { }, }; - for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) { + for (int i = 0; i < BARK_GGML_MAX_CONTEXTS; ++i) { g_state.contexts[i].used = false; } - const uint64_t t_end = ggml_time_us(); UNUSED(t_end); + const uint64_t t_end = bark_ggml_time_us(); UNUSED(t_end); - GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f); + BARK_GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f); } -#if defined(GGML_USE_CUBLAS) - ggml_init_cublas(); -#elif defined(GGML_USE_CLBLAST) - ggml_cl_init(); +#if defined(BARK_GGML_USE_CUBLAS) + bark_ggml_init_cublas(); +#elif defined(BARK_GGML_USE_CLBLAST) + bark_ggml_cl_init(); #endif - ggml_setup_op_has_task_pass(); + bark_ggml_setup_op_has_task_pass(); is_first_call = false; } // find non-used context in g_state - struct ggml_context * ctx = NULL; + struct bark_ggml_context * ctx = NULL; - for (int i = 0; i < GGML_MAX_CONTEXTS; i++) { + for (int i = 0; i < BARK_GGML_MAX_CONTEXTS; i++) { if (!g_state.contexts[i].used) { g_state.contexts[i].used = true; ctx = &g_state.contexts[i].context; - GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i); + BARK_GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i); break; } } if (ctx == NULL) { - GGML_PRINT_DEBUG("%s: no unused context found\n", __func__); + BARK_GGML_PRINT_DEBUG("%s: no unused context found\n", __func__); - ggml_critical_section_end(); + bark_ggml_critical_section_end(); return NULL; } - // allow to call ggml_init with 0 size + // allow to call bark_ggml_init with 0 size if (params.mem_size == 0) { - params.mem_size = GGML_MEM_ALIGN; - } - - const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN); - - *ctx = (struct ggml_context) { - /*.mem_size =*/ mem_size, - /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size), - /*.mem_buffer_owned =*/ params.mem_buffer ? false : true, - /*.no_alloc =*/ params.no_alloc, - /*.no_alloc_save =*/ params.no_alloc, - /*.n_objects =*/ 0, - /*.objects_begin =*/ NULL, - /*.objects_end =*/ NULL, - /*.scratch =*/ { 0, 0, NULL, }, - /*.scratch_save =*/ { 0, 0, NULL, }, - }; + params.mem_size = BARK_GGML_MEM_ALIGN; + } + + const size_t mem_size = params.mem_buffer ? params.mem_size : BARK_GGML_PAD(params.mem_size, BARK_GGML_MEM_ALIGN); + + ctx->mem_size = mem_size; + ctx->mem_buffer = params.mem_buffer ? params.mem_buffer : BARK_GGML_ALIGNED_MALLOC(mem_size); + ctx->mem_buffer_owned = params.mem_buffer ? false : true; + ctx->no_alloc = params.no_alloc; + ctx->no_alloc_save = params.no_alloc; + ctx->n_objects = 0; + ctx->objects_begin = NULL; + ctx->objects_end = NULL; + ctx->scratch = (struct bark_ggml_scratch) { 0, 0, NULL }; + ctx->scratch_save = (struct bark_ggml_scratch) { 0, 0, NULL }; - GGML_ASSERT(ctx->mem_buffer != NULL); + BARK_GGML_ASSERT(ctx->mem_buffer != NULL); - ggml_assert_aligned(ctx->mem_buffer); + bark_ggml_assert_aligned(ctx->mem_buffer); - GGML_PRINT_DEBUG("%s: context initialized\n", __func__); + BARK_GGML_PRINT_DEBUG("%s: context initialized\n", __func__); - ggml_critical_section_end(); + bark_ggml_critical_section_end(); return ctx; } -void ggml_free(struct ggml_context * ctx) { +void bark_ggml_free(struct bark_ggml_context * ctx) { // make this function thread safe - ggml_critical_section_start(); + bark_ggml_critical_section_start(); bool found = false; - for (int i = 0; i < GGML_MAX_CONTEXTS; i++) { + for (int i = 0; i < BARK_GGML_MAX_CONTEXTS; i++) { if (&g_state.contexts[i].context == ctx) { g_state.contexts[i].used = false; - GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n", - __func__, i, ggml_used_mem(ctx)); + BARK_GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n", + __func__, i, bark_ggml_used_mem(ctx)); if (ctx->mem_buffer_owned) { - GGML_ALIGNED_FREE(ctx->mem_buffer); + BARK_GGML_ALIGNED_FREE(ctx->mem_buffer); } found = true; @@ -4759,17 +4757,17 @@ void ggml_free(struct ggml_context * ctx) { } if (!found) { - GGML_PRINT_DEBUG("%s: context not found\n", __func__); + BARK_GGML_PRINT_DEBUG("%s: context not found\n", __func__); } - ggml_critical_section_end(); + bark_ggml_critical_section_end(); } -size_t ggml_used_mem(const struct ggml_context * ctx) { +size_t bark_ggml_used_mem(const struct bark_ggml_context * ctx) { return ctx->objects_end == NULL ? 0 : ctx->objects_end->offs + ctx->objects_end->size; } -size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) { +size_t bark_ggml_set_scratch(struct bark_ggml_context * ctx, struct bark_ggml_scratch scratch) { const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0; ctx->scratch = scratch; @@ -4777,32 +4775,32 @@ size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) return result; } -bool ggml_get_no_alloc(struct ggml_context * ctx) { +bool bark_ggml_get_no_alloc(struct bark_ggml_context * ctx) { return ctx->no_alloc; } -void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc) { +void bark_ggml_set_no_alloc(struct bark_ggml_context * ctx, bool no_alloc) { ctx->no_alloc = no_alloc; } -void * ggml_get_mem_buffer(const struct ggml_context * ctx) { +void * bark_ggml_get_mem_buffer(const struct bark_ggml_context * ctx) { return ctx->mem_buffer; } -size_t ggml_get_mem_size(const struct ggml_context * ctx) { +size_t bark_ggml_get_mem_size(const struct bark_ggml_context * ctx) { return ctx->mem_size; } -size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) { +size_t bark_ggml_get_max_tensor_size(const struct bark_ggml_context * ctx) { size_t max_size = 0; - struct ggml_object * obj = ctx->objects_begin; + struct bark_ggml_object * obj = ctx->objects_begin; while (obj != NULL) { - if (obj->type == GGML_OBJECT_TENSOR) { - struct ggml_tensor * tensor = (struct ggml_tensor *) ((char *) ctx->mem_buffer + obj->offs); + if (obj->type == BARK_GGML_OBJECT_TENSOR) { + struct bark_ggml_tensor * tensor = (struct bark_ggml_tensor *) ((char *) ctx->mem_buffer + obj->offs); - const size_t size = ggml_nbytes(tensor); + const size_t size = bark_ggml_nbytes(tensor); if (max_size < size) { max_size = size; @@ -4820,7 +4818,7 @@ size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) { // this is an error prone process, but it is necessary to support inplace // operators when using scratch buffers // TODO: implement a better way -static void ggml_scratch_save(struct ggml_context * ctx) { +static void bark_ggml_scratch_save(struct bark_ggml_context * ctx) { // this is needed to allow opt tensors to store their data // TODO: again, need to find a better way ctx->no_alloc_save = ctx->no_alloc; @@ -4830,7 +4828,7 @@ static void ggml_scratch_save(struct ggml_context * ctx) { ctx->scratch.data = NULL; } -static void ggml_scratch_load(struct ggml_context * ctx) { +static void bark_ggml_scratch_load(struct bark_ggml_context * ctx) { ctx->no_alloc = ctx->no_alloc_save; ctx->scratch = ctx->scratch_save; @@ -4838,35 +4836,35 @@ static void ggml_scratch_load(struct ggml_context * ctx) { //////////////////////////////////////////////////////////////////////////////// -static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml_object_type type, size_t size) { +static struct bark_ggml_object * bark_ggml_new_object(struct bark_ggml_context * ctx, enum bark_ggml_object_type type, size_t size) { // always insert objects at the end of the context's memory pool - struct ggml_object * obj_cur = ctx->objects_end; + struct bark_ggml_object * obj_cur = ctx->objects_end; const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs; const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size; const size_t cur_end = cur_offs + cur_size; - // align to GGML_MEM_ALIGN - size_t size_needed = GGML_PAD(size, GGML_MEM_ALIGN); + // align to BARK_GGML_MEM_ALIGN + size_t size_needed = BARK_GGML_PAD(size, BARK_GGML_MEM_ALIGN); char * const mem_buffer = ctx->mem_buffer; - struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end); + struct bark_ggml_object * const obj_new = (struct bark_ggml_object *)(mem_buffer + cur_end); - if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) { - GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n", + if (cur_end + size_needed + BARK_GGML_OBJECT_SIZE > ctx->mem_size) { + BARK_GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n", __func__, cur_end + size_needed, ctx->mem_size); assert(false); return NULL; } - *obj_new = (struct ggml_object) { - .offs = cur_end + GGML_OBJECT_SIZE, + *obj_new = (struct bark_ggml_object) { + .offs = cur_end + BARK_GGML_OBJECT_SIZE, .size = size_needed, .next = NULL, .type = type, }; - ggml_assert_aligned(mem_buffer + obj_new->offs); + bark_ggml_assert_aligned(mem_buffer + obj_new->offs); if (obj_cur != NULL) { obj_cur->next = obj_new; @@ -4882,15 +4880,15 @@ static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml return obj_new; } -static struct ggml_tensor * ggml_new_tensor_impl( - struct ggml_context * ctx, - enum ggml_type type, +static struct bark_ggml_tensor * bark_ggml_new_tensor_impl( + struct bark_ggml_context * ctx, + enum bark_ggml_type type, int n_dims, const int64_t * ne, - struct ggml_tensor * view_src, + struct bark_ggml_tensor * view_src, size_t view_offs) { - assert(n_dims >= 1 && n_dims <= GGML_MAX_DIMS); + assert(n_dims >= 1 && n_dims <= BARK_GGML_MAX_DIMS); // find the base tensor and absolute offset if (view_src != NULL && view_src->view_src != NULL) { @@ -4898,12 +4896,12 @@ static struct ggml_tensor * ggml_new_tensor_impl( view_src = view_src->view_src; } - size_t data_size = ggml_type_size(type)*(ne[0]/ggml_blck_size(type)); + size_t data_size = bark_ggml_type_size(type)*(ne[0]/bark_ggml_blck_size(type)); for (int i = 1; i < n_dims; i++) { data_size *= ne[i]; } - GGML_ASSERT(view_src == NULL || data_size + view_offs <= ggml_nbytes(view_src)); + BARK_GGML_ASSERT(view_src == NULL || data_size + view_offs <= bark_ggml_nbytes(view_src)); void * data = view_src != NULL ? view_src->data : NULL; if (data != NULL) { @@ -4916,7 +4914,7 @@ static struct ggml_tensor * ggml_new_tensor_impl( if (ctx->scratch.data != NULL) { // allocate tensor data in the scratch buffer if (ctx->scratch.offs + data_size > ctx->scratch.size) { - GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n", + BARK_GGML_PRINT("%s: not enough space in the scratch memory pool (needed %zu, available %zu)\n", __func__, ctx->scratch.offs + data_size, ctx->scratch.size); assert(false); return NULL; @@ -4931,20 +4929,20 @@ static struct ggml_tensor * ggml_new_tensor_impl( } } - struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size); + struct bark_ggml_object * const obj_new = bark_ggml_new_object(ctx, BARK_GGML_OBJECT_TENSOR, BARK_GGML_TENSOR_SIZE + obj_alloc_size); // TODO: for recoverable errors, we would need to free the data allocated from the scratch buffer here - struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs); + struct bark_ggml_tensor * const result = (struct bark_ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs); - *result = (struct ggml_tensor) { + *result = (struct bark_ggml_tensor) { /*.type =*/ type, - /*.backend =*/ GGML_BACKEND_CPU, + /*.backend =*/ BARK_GGML_BACKEND_CPU, /*.buffer =*/ NULL, /*.n_dims =*/ n_dims, /*.ne =*/ { 1, 1, 1, 1 }, /*.nb =*/ { 0, 0, 0, 0 }, - /*.op =*/ GGML_OP_NONE, + /*.op =*/ BARK_GGML_OP_NONE, /*.op_params =*/ { 0 }, /*.is_param =*/ false, /*.grad =*/ NULL, @@ -4961,15 +4959,15 @@ static struct ggml_tensor * ggml_new_tensor_impl( }; // TODO: this should not be needed as long as we don't rely on aligned SIMD loads - //ggml_assert_aligned(result->data); + //bark_ggml_assert_aligned(result->data); for (int i = 0; i < n_dims; i++) { result->ne[i] = ne[i]; } - result->nb[0] = ggml_type_size(type); - result->nb[1] = result->nb[0]*(result->ne[0]/ggml_blck_size(type)); - for (int i = 2; i < GGML_MAX_DIMS; i++) { + result->nb[0] = bark_ggml_type_size(type); + result->nb[1] = result->nb[0]*(result->ne[0]/bark_ggml_blck_size(type)); + for (int i = 2; i < BARK_GGML_MAX_DIMS; i++) { result->nb[i] = result->nb[i - 1]*result->ne[i - 1]; } @@ -4978,205 +4976,205 @@ static struct ggml_tensor * ggml_new_tensor_impl( return result; } -struct ggml_tensor * ggml_new_tensor( - struct ggml_context * ctx, - enum ggml_type type, +struct bark_ggml_tensor * bark_ggml_new_tensor( + struct bark_ggml_context * ctx, + enum bark_ggml_type type, int n_dims, const int64_t * ne) { - return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL, 0); + return bark_ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL, 0); } -struct ggml_tensor * ggml_new_tensor_1d( - struct ggml_context * ctx, - enum ggml_type type, +struct bark_ggml_tensor * bark_ggml_new_tensor_1d( + struct bark_ggml_context * ctx, + enum bark_ggml_type type, int64_t ne0) { - return ggml_new_tensor(ctx, type, 1, &ne0); + return bark_ggml_new_tensor(ctx, type, 1, &ne0); } -struct ggml_tensor * ggml_new_tensor_2d( - struct ggml_context * ctx, - enum ggml_type type, +struct bark_ggml_tensor * bark_ggml_new_tensor_2d( + struct bark_ggml_context * ctx, + enum bark_ggml_type type, int64_t ne0, int64_t ne1) { const int64_t ne[2] = { ne0, ne1 }; - return ggml_new_tensor(ctx, type, 2, ne); + return bark_ggml_new_tensor(ctx, type, 2, ne); } -struct ggml_tensor * ggml_new_tensor_3d( - struct ggml_context * ctx, - enum ggml_type type, +struct bark_ggml_tensor * bark_ggml_new_tensor_3d( + struct bark_ggml_context * ctx, + enum bark_ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2) { const int64_t ne[3] = { ne0, ne1, ne2 }; - return ggml_new_tensor(ctx, type, 3, ne); + return bark_ggml_new_tensor(ctx, type, 3, ne); } -struct ggml_tensor * ggml_new_tensor_4d( - struct ggml_context * ctx, - enum ggml_type type, +struct bark_ggml_tensor * bark_ggml_new_tensor_4d( + struct bark_ggml_context * ctx, + enum bark_ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) { const int64_t ne[4] = { ne0, ne1, ne2, ne3 }; - return ggml_new_tensor(ctx, type, 4, ne); + return bark_ggml_new_tensor(ctx, type, 4, ne); } -struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) { - ggml_scratch_save(ctx); +struct bark_ggml_tensor * bark_ggml_new_i32(struct bark_ggml_context * ctx, int32_t value) { + bark_ggml_scratch_save(ctx); - struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_I32, 1); - ggml_scratch_load(ctx); + bark_ggml_scratch_load(ctx); - ggml_set_i32(result, value); + bark_ggml_set_i32(result, value); return result; } -struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) { - ggml_scratch_save(ctx); +struct bark_ggml_tensor * bark_ggml_new_f32(struct bark_ggml_context * ctx, float value) { + bark_ggml_scratch_save(ctx); - struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 1); - ggml_scratch_load(ctx); + bark_ggml_scratch_load(ctx); - ggml_set_f32(result, value); + bark_ggml_set_f32(result, value); return result; } -struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) { - return ggml_new_tensor(ctx, src->type, src->n_dims, src->ne); +struct bark_ggml_tensor * bark_ggml_dup_tensor(struct bark_ggml_context * ctx, const struct bark_ggml_tensor * src) { + return bark_ggml_new_tensor(ctx, src->type, src->n_dims, src->ne); } -static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) { - GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings - assert(params_size <= GGML_MAX_OP_PARAMS); +static void bark_ggml_set_op_params(struct bark_ggml_tensor * tensor, const void * params, size_t params_size) { + BARK_GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings + assert(params_size <= BARK_GGML_MAX_OP_PARAMS); memcpy(tensor->op_params, params, params_size); } -static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) { - assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t)); +static int32_t bark_ggml_get_op_params_i32(const struct bark_ggml_tensor * tensor, uint32_t i) { + assert(i < BARK_GGML_MAX_OP_PARAMS / sizeof(int32_t)); return ((const int32_t *)(tensor->op_params))[i]; } -static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) { - assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t)); +static void bark_ggml_set_op_params_i32(struct bark_ggml_tensor * tensor, uint32_t i, int32_t value) { + assert(i < BARK_GGML_MAX_OP_PARAMS / sizeof(int32_t)); ((int32_t *)(tensor->op_params))[i] = value; } -struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) { - memset(tensor->data, 0, ggml_nbytes(tensor)); +struct bark_ggml_tensor * bark_ggml_set_zero(struct bark_ggml_tensor * tensor) { + memset(tensor->data, 0, bark_ggml_nbytes(tensor)); return tensor; } -struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { - const int n = ggml_nrows(tensor); +struct bark_ggml_tensor * bark_ggml_set_i32 (struct bark_ggml_tensor * tensor, int32_t value) { + const int n = bark_ggml_nrows(tensor); const int nc = tensor->ne[0]; const size_t n1 = tensor->nb[1]; char * const data = tensor->data; switch (tensor->type) { - case GGML_TYPE_I8: + case BARK_GGML_TYPE_I8: { assert(tensor->nb[0] == sizeof(int8_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); + bark_ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); } } break; - case GGML_TYPE_I16: + case BARK_GGML_TYPE_I16: { assert(tensor->nb[0] == sizeof(int16_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); + bark_ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); } } break; - case GGML_TYPE_I32: + case BARK_GGML_TYPE_I32: { assert(tensor->nb[0] == sizeof(int32_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); + bark_ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); } } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - assert(tensor->nb[0] == sizeof(ggml_fp16_t)); + assert(tensor->nb[0] == sizeof(bark_ggml_fp16_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value)); + bark_ggml_vec_set_f16(nc, (bark_ggml_fp16_t *)(data + i*n1), BARK_GGML_FP32_TO_FP16(value)); } } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { assert(tensor->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_set_f32(nc, (float *)(data + i*n1), value); + bark_ggml_vec_set_f32(nc, (float *)(data + i*n1), value); } } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } return tensor; } -struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { - const int n = ggml_nrows(tensor); +struct bark_ggml_tensor * bark_ggml_set_f32(struct bark_ggml_tensor * tensor, float value) { + const int n = bark_ggml_nrows(tensor); const int nc = tensor->ne[0]; const size_t n1 = tensor->nb[1]; char * const data = tensor->data; switch (tensor->type) { - case GGML_TYPE_I8: + case BARK_GGML_TYPE_I8: { assert(tensor->nb[0] == sizeof(int8_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); + bark_ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); } } break; - case GGML_TYPE_I16: + case BARK_GGML_TYPE_I16: { assert(tensor->nb[0] == sizeof(int16_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); + bark_ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); } } break; - case GGML_TYPE_I32: + case BARK_GGML_TYPE_I32: { assert(tensor->nb[0] == sizeof(int32_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); + bark_ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); } } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - assert(tensor->nb[0] == sizeof(ggml_fp16_t)); + assert(tensor->nb[0] == sizeof(bark_ggml_fp16_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value)); + bark_ggml_vec_set_f16(nc, (bark_ggml_fp16_t *)(data + i*n1), BARK_GGML_FP32_TO_FP16(value)); } } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { assert(tensor->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_set_f32(nc, (float *)(data + i*n1), value); + bark_ggml_vec_set_f32(nc, (float *)(data + i*n1), value); } } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } return tensor; } -void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) { +void bark_ggml_unravel_index(const struct bark_ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) { const int64_t ne2 = tensor->ne[2]; const int64_t ne1 = tensor->ne[1]; const int64_t ne0 = tensor->ne[0]; @@ -5200,293 +5198,293 @@ void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * } } -int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { - if (!ggml_is_contiguous(tensor)) { +int32_t bark_ggml_get_i32_1d(const struct bark_ggml_tensor * tensor, int i) { + if (!bark_ggml_is_contiguous(tensor)) { int64_t id[4] = { 0, 0, 0, 0 }; - ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); - return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]); + bark_ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); + return bark_ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]); } switch (tensor->type) { - case GGML_TYPE_I8: + case BARK_GGML_TYPE_I8: { - GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); return ((int8_t *)(tensor->data))[i]; } - case GGML_TYPE_I16: + case BARK_GGML_TYPE_I16: { - GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); return ((int16_t *)(tensor->data))[i]; } - case GGML_TYPE_I32: + case BARK_GGML_TYPE_I32: { - GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); return ((int32_t *)(tensor->data))[i]; } - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(bark_ggml_fp16_t)); + return BARK_GGML_FP16_TO_FP32(((bark_ggml_fp16_t *)(tensor->data))[i]); } - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - GGML_ASSERT(tensor->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(float)); return ((float *)(tensor->data))[i]; } default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } } return 0.0f; } -void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { - if (!ggml_is_contiguous(tensor)) { +void bark_ggml_set_i32_1d(const struct bark_ggml_tensor * tensor, int i, int32_t value) { + if (!bark_ggml_is_contiguous(tensor)) { int64_t id[4] = { 0, 0, 0, 0 }; - ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); - ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value); + bark_ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); + bark_ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value); return; } switch (tensor->type) { - case GGML_TYPE_I8: + case BARK_GGML_TYPE_I8: { - GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); ((int8_t *)(tensor->data))[i] = value; } break; - case GGML_TYPE_I16: + case BARK_GGML_TYPE_I16: { - GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); ((int16_t *)(tensor->data))[i] = value; } break; - case GGML_TYPE_I32: + case BARK_GGML_TYPE_I32: { - GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); ((int32_t *)(tensor->data))[i] = value; } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(bark_ggml_fp16_t)); + ((bark_ggml_fp16_t *)(tensor->data))[i] = BARK_GGML_FP32_TO_FP16(value); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - GGML_ASSERT(tensor->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(float)); ((float *)(tensor->data))[i] = value; } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) { +int32_t bark_ggml_get_i32_nd(const struct bark_ggml_tensor * tensor, int i0, int i1, int i2, int i3) { void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; switch (tensor->type) { - case GGML_TYPE_I8: + case BARK_GGML_TYPE_I8: return ((int8_t *) data)[0]; - case GGML_TYPE_I16: + case BARK_GGML_TYPE_I16: return ((int16_t *) data)[0]; - case GGML_TYPE_I32: + case BARK_GGML_TYPE_I32: return ((int32_t *) data)[0]; - case GGML_TYPE_F16: - return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F16: + return BARK_GGML_FP16_TO_FP32(((bark_ggml_fp16_t *) data)[0]); + case BARK_GGML_TYPE_F32: return ((float *) data)[0]; default: - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } return 0.0f; } -void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) { +void bark_ggml_set_i32_nd(const struct bark_ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) { void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; switch (tensor->type) { - case GGML_TYPE_I8: + case BARK_GGML_TYPE_I8: { ((int8_t *)(data))[0] = value; } break; - case GGML_TYPE_I16: + case BARK_GGML_TYPE_I16: { ((int16_t *)(data))[0] = value; } break; - case GGML_TYPE_I32: + case BARK_GGML_TYPE_I32: { ((int32_t *)(data))[0] = value; } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value); + ((bark_ggml_fp16_t *)(data))[0] = BARK_GGML_FP32_TO_FP16(value); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { ((float *)(data))[0] = value; } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { - if (!ggml_is_contiguous(tensor)) { +float bark_ggml_get_f32_1d(const struct bark_ggml_tensor * tensor, int i) { + if (!bark_ggml_is_contiguous(tensor)) { int64_t id[4] = { 0, 0, 0, 0 }; - ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); - return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]); + bark_ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); + return bark_ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]); } switch (tensor->type) { - case GGML_TYPE_I8: + case BARK_GGML_TYPE_I8: { - GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); return ((int8_t *)(tensor->data))[i]; } - case GGML_TYPE_I16: + case BARK_GGML_TYPE_I16: { - GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); return ((int16_t *)(tensor->data))[i]; } - case GGML_TYPE_I32: + case BARK_GGML_TYPE_I32: { - GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); return ((int32_t *)(tensor->data))[i]; } - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(bark_ggml_fp16_t)); + return BARK_GGML_FP16_TO_FP32(((bark_ggml_fp16_t *)(tensor->data))[i]); } - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - GGML_ASSERT(tensor->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(float)); return ((float *)(tensor->data))[i]; } default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } } return 0.0f; } -void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { - if (!ggml_is_contiguous(tensor)) { +void bark_ggml_set_f32_1d(const struct bark_ggml_tensor * tensor, int i, float value) { + if (!bark_ggml_is_contiguous(tensor)) { int64_t id[4] = { 0, 0, 0, 0 }; - ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); - ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value); + bark_ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); + bark_ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value); return; } switch (tensor->type) { - case GGML_TYPE_I8: + case BARK_GGML_TYPE_I8: { - GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); ((int8_t *)(tensor->data))[i] = value; } break; - case GGML_TYPE_I16: + case BARK_GGML_TYPE_I16: { - GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); ((int16_t *)(tensor->data))[i] = value; } break; - case GGML_TYPE_I32: + case BARK_GGML_TYPE_I32: { - GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); ((int32_t *)(tensor->data))[i] = value; } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(bark_ggml_fp16_t)); + ((bark_ggml_fp16_t *)(tensor->data))[i] = BARK_GGML_FP32_TO_FP16(value); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - GGML_ASSERT(tensor->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(tensor->nb[0] == sizeof(float)); ((float *)(tensor->data))[i] = value; } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) { +float bark_ggml_get_f32_nd(const struct bark_ggml_tensor * tensor, int i0, int i1, int i2, int i3) { void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; switch (tensor->type) { - case GGML_TYPE_I8: + case BARK_GGML_TYPE_I8: return ((int8_t *) data)[0]; - case GGML_TYPE_I16: + case BARK_GGML_TYPE_I16: return ((int16_t *) data)[0]; - case GGML_TYPE_I32: + case BARK_GGML_TYPE_I32: return ((int32_t *) data)[0]; - case GGML_TYPE_F16: - return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F16: + return BARK_GGML_FP16_TO_FP32(((bark_ggml_fp16_t *) data)[0]); + case BARK_GGML_TYPE_F32: return ((float *) data)[0]; default: - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } return 0.0f; } -void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) { +void bark_ggml_set_f32_nd(const struct bark_ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) { void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; switch (tensor->type) { - case GGML_TYPE_I8: + case BARK_GGML_TYPE_I8: { ((int8_t *)(data))[0] = value; } break; - case GGML_TYPE_I16: + case BARK_GGML_TYPE_I16: { ((int16_t *)(data))[0] = value; } break; - case GGML_TYPE_I32: + case BARK_GGML_TYPE_I32: { ((int32_t *)(data))[0] = value; } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value); + ((bark_ggml_fp16_t *)(data))[0] = BARK_GGML_FP32_TO_FP16(value); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { ((float *)(data))[0] = value; } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -void * ggml_get_data(const struct ggml_tensor * tensor) { +void * bark_ggml_get_data(const struct bark_ggml_tensor * tensor) { return tensor->data; } -float * ggml_get_data_f32(const struct ggml_tensor * tensor) { - assert(tensor->type == GGML_TYPE_F32); +float * bark_ggml_get_data_f32(const struct bark_ggml_tensor * tensor) { + assert(tensor->type == BARK_GGML_TYPE_F32); return (float *)(tensor->data); } -enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) { - GGML_ASSERT(tensor->op == GGML_OP_UNARY); - return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0); +enum bark_ggml_unary_op bark_ggml_get_unary_op(const struct bark_ggml_tensor * tensor) { + BARK_GGML_ASSERT(tensor->op == BARK_GGML_OP_UNARY); + return (enum bark_ggml_unary_op) bark_ggml_get_op_params_i32(tensor, 0); } -const char * ggml_get_name(const struct ggml_tensor * tensor) { +const char * bark_ggml_get_name(const struct bark_ggml_tensor * tensor) { return tensor->name; } -struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) { +struct bark_ggml_tensor * bark_ggml_set_name(struct bark_ggml_tensor * tensor, const char * name) { strncpy(tensor->name, name, sizeof(tensor->name)); tensor->name[sizeof(tensor->name) - 1] = '\0'; return tensor; } -struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...) { +struct bark_ggml_tensor * bark_ggml_format_name(struct bark_ggml_tensor * tensor, const char * fmt, ...) { va_list args; va_start(args, fmt); vsnprintf(tensor->name, sizeof(tensor->name), fmt, args); @@ -5494,27 +5492,27 @@ struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * return tensor; } -struct ggml_tensor * ggml_view_tensor( - struct ggml_context * ctx, - struct ggml_tensor * src) { - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src, 0); - ggml_format_name(result, "%s (view)", src->name); +struct bark_ggml_tensor * bark_ggml_view_tensor( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * src) { + struct bark_ggml_tensor * result = bark_ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src, 0); + bark_ggml_format_name(result, "%s (view)", src->name); - for (int i = 0; i < GGML_MAX_DIMS; i++) { + for (int i = 0; i < BARK_GGML_MAX_DIMS; i++) { result->nb[i] = src->nb[i]; } return result; } -struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) { - struct ggml_object * obj = ctx->objects_begin; +struct bark_ggml_tensor * bark_ggml_get_tensor(struct bark_ggml_context * ctx, const char * name) { + struct bark_ggml_object * obj = ctx->objects_begin; char * const mem_buffer = ctx->mem_buffer; while (obj != NULL) { - if (obj->type == GGML_OBJECT_TENSOR) { - struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs); + if (obj->type == BARK_GGML_OBJECT_TENSOR) { + struct bark_ggml_tensor * cur = (struct bark_ggml_tensor *)(mem_buffer + obj->offs); if (strcmp(cur->name, name) == 0) { return cur; } @@ -5528,11 +5526,11 @@ struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * nam //////////////////////////////////////////////////////////////////////////////// -// ggml_dup +// bark_ggml_dup -static struct ggml_tensor * ggml_dup_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_dup_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, bool inplace) { bool is_node = false; @@ -5540,117 +5538,117 @@ static struct ggml_tensor * ggml_dup_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_DUP; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_DUP; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_dup( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_dup_impl(ctx, a, false); +struct bark_ggml_tensor * bark_ggml_dup( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_dup_impl(ctx, a, false); } -struct ggml_tensor * ggml_dup_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_dup_impl(ctx, a, true); +struct bark_ggml_tensor * bark_ggml_dup_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_dup_impl(ctx, a, true); } -// ggml_add +// bark_ggml_add -static struct ggml_tensor * ggml_add_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_add_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, bool inplace) { // TODO: support less-strict constraint - // GGML_ASSERT(ggml_can_repeat(b, a)); - GGML_ASSERT(ggml_can_repeat_rows(b, a)); + // BARK_GGML_ASSERT(bark_ggml_can_repeat(b, a)); + BARK_GGML_ASSERT(bark_ggml_can_repeat_rows(b, a)); bool is_node = false; if (!inplace && (a->grad || b->grad)) { // TODO: support backward pass for broadcasting - GGML_ASSERT(ggml_are_same_shape(a, b)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(a, b)); is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_ADD; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_ADD; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_add( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_add_impl(ctx, a, b, false); +struct bark_ggml_tensor * bark_ggml_add( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_add_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_add_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_add_impl(ctx, a, b, true); +struct bark_ggml_tensor * bark_ggml_add_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_add_impl(ctx, a, b, true); } -// ggml_add_cast +// bark_ggml_add_cast -static struct ggml_tensor * ggml_add_cast_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - enum ggml_type type) { +static struct bark_ggml_tensor * bark_ggml_add_cast_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + enum bark_ggml_type type) { // TODO: support less-strict constraint - // GGML_ASSERT(ggml_can_repeat(b, a)); - GGML_ASSERT(ggml_can_repeat_rows(b, a)); - GGML_ASSERT(ggml_is_quantized(a->type)); // currently only supported for quantized input + // BARK_GGML_ASSERT(bark_ggml_can_repeat(b, a)); + BARK_GGML_ASSERT(bark_ggml_can_repeat_rows(b, a)); + BARK_GGML_ASSERT(bark_ggml_is_quantized(a->type)); // currently only supported for quantized input bool is_node = false; if (a->grad || b->grad) { // TODO: support backward pass for broadcasting - GGML_ASSERT(ggml_are_same_shape(a, b)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(a, b)); is_node = true; } - struct ggml_tensor * result = ggml_new_tensor(ctx, type, a->n_dims, a->ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, type, a->n_dims, a->ne); - result->op = GGML_OP_ADD; - result->grad = is_node ? ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, a->ne) : NULL; + result->op = BARK_GGML_OP_ADD; + result->grad = is_node ? bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, a->n_dims, a->ne) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_add_cast( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - enum ggml_type type) { - return ggml_add_cast_impl(ctx, a, b, type); +struct bark_ggml_tensor * bark_ggml_add_cast( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + enum bark_ggml_type type) { + return bark_ggml_add_cast_impl(ctx, a, b, type); } -// ggml_add1 +// bark_ggml_add1 -static struct ggml_tensor * ggml_add1_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_add1_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, bool inplace) { - GGML_ASSERT(ggml_is_scalar(b)); - GGML_ASSERT(ggml_is_padded_1d(a)); + BARK_GGML_ASSERT(bark_ggml_is_scalar(b)); + BARK_GGML_ASSERT(bark_ggml_is_padded_1d(a)); bool is_node = false; @@ -5658,45 +5656,45 @@ static struct ggml_tensor * ggml_add1_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_ADD1; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_ADD1; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_add1( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_add1_impl(ctx, a, b, false); +struct bark_ggml_tensor * bark_ggml_add1( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_add1_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_add1_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_add1_impl(ctx, a, b, true); +struct bark_ggml_tensor * bark_ggml_add1_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_add1_impl(ctx, a, b, true); } -// ggml_acc +// bark_ggml_acc -static struct ggml_tensor * ggml_acc_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_acc_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, bool inplace) { - GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a)); - GGML_ASSERT(ggml_is_contiguous(a)); - GGML_ASSERT(a->type == GGML_TYPE_F32); - GGML_ASSERT(b->type == GGML_TYPE_F32); + BARK_GGML_ASSERT(bark_ggml_nelements(b) <= bark_ggml_nelements(a)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(a)); + BARK_GGML_ASSERT(a->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT(b->type == BARK_GGML_TYPE_F32); bool is_node = false; @@ -5704,49 +5702,49 @@ static struct ggml_tensor * ggml_acc_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_ACC; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_ACC; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_acc( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_acc( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset) { - return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false); + return bark_ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false); } -struct ggml_tensor * ggml_acc_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_acc_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset) { - return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true); + return bark_ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true); } -// ggml_sub +// bark_ggml_sub -static struct ggml_tensor * ggml_sub_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_sub_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, bool inplace) { - GGML_ASSERT(ggml_are_same_shape(a, b)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(a, b)); bool is_node = false; @@ -5754,85 +5752,85 @@ static struct ggml_tensor * ggml_sub_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_SUB; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_SUB; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_sub( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_sub_impl(ctx, a, b, false); +struct bark_ggml_tensor * bark_ggml_sub( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_sub_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_sub_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_sub_impl(ctx, a, b, true); +struct bark_ggml_tensor * bark_ggml_sub_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_sub_impl(ctx, a, b, true); } -// ggml_mul +// bark_ggml_mul -static struct ggml_tensor * ggml_mul_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_mul_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, bool inplace) { // TODO: support less-strict constraint - // GGML_ASSERT(ggml_can_repeat(b, a)); - GGML_ASSERT(ggml_can_repeat_rows(b, a)); + // BARK_GGML_ASSERT(bark_ggml_can_repeat(b, a)); + BARK_GGML_ASSERT(bark_ggml_can_repeat_rows(b, a)); bool is_node = false; if (!inplace && (a->grad || b->grad)) { // TODO: support backward pass for broadcasting - GGML_ASSERT(ggml_are_same_shape(a, b)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(a, b)); is_node = true; } if (inplace) { - GGML_ASSERT(!is_node); + BARK_GGML_ASSERT(!is_node); } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_MUL; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_MUL; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_mul( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_mul_impl(ctx, a, b, false); +struct bark_ggml_tensor * bark_ggml_mul( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_mul_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_mul_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_mul_impl(ctx, a, b, true); +struct bark_ggml_tensor * bark_ggml_mul_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_mul_impl(ctx, a, b, true); } -// ggml_div +// bark_ggml_div -static struct ggml_tensor * ggml_div_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_div_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, bool inplace) { - GGML_ASSERT(ggml_are_same_shape(a, b)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(a, b)); bool is_node = false; @@ -5841,38 +5839,38 @@ static struct ggml_tensor * ggml_div_impl( } if (inplace) { - GGML_ASSERT(!is_node); + BARK_GGML_ASSERT(!is_node); } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_DIV; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_DIV; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_div( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_div_impl(ctx, a, b, false); +struct bark_ggml_tensor * bark_ggml_div( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_div_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_div_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_div_impl(ctx, a, b, true); +struct bark_ggml_tensor * bark_ggml_div_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_div_impl(ctx, a, b, true); } -// ggml_sqr +// bark_ggml_sqr -static struct ggml_tensor * ggml_sqr_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_sqr_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, bool inplace) { bool is_node = false; @@ -5880,32 +5878,32 @@ static struct ggml_tensor * ggml_sqr_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_SQR; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_SQR; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_sqr( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sqr_impl(ctx, a, false); +struct bark_ggml_tensor * bark_ggml_sqr( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_sqr_impl(ctx, a, false); } -struct ggml_tensor * ggml_sqr_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sqr_impl(ctx, a, true); +struct bark_ggml_tensor * bark_ggml_sqr_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_sqr_impl(ctx, a, true); } -// ggml_sqrt +// bark_ggml_sqrt -static struct ggml_tensor * ggml_sqrt_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_sqrt_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, bool inplace) { bool is_node = false; @@ -5913,32 +5911,32 @@ static struct ggml_tensor * ggml_sqrt_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_SQRT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_SQRT; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_sqrt( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sqrt_impl(ctx, a, false); +struct bark_ggml_tensor * bark_ggml_sqrt( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_sqrt_impl(ctx, a, false); } -struct ggml_tensor * ggml_sqrt_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sqrt_impl(ctx, a, true); +struct bark_ggml_tensor * bark_ggml_sqrt_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_sqrt_impl(ctx, a, true); } -// ggml_log +// bark_ggml_log -static struct ggml_tensor * ggml_log_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_log_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, bool inplace) { bool is_node = false; @@ -5946,52 +5944,52 @@ static struct ggml_tensor * ggml_log_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_LOG; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_LOG; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_log( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_log_impl(ctx, a, false); +struct bark_ggml_tensor * bark_ggml_log( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_log_impl(ctx, a, false); } -struct ggml_tensor * ggml_log_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_log_impl(ctx, a, true); +struct bark_ggml_tensor * bark_ggml_log_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_log_impl(ctx, a, true); } -// ggml_sum +// bark_ggml_sum -struct ggml_tensor * ggml_sum( - struct ggml_context * ctx, - struct ggml_tensor * a) { +struct bark_ggml_tensor * bark_ggml_sum( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { bool is_node = false; if (a->grad) { is_node = true; } - struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_1d(ctx, a->type, 1); - result->op = GGML_OP_SUM; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_SUM; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_sum_rows +// bark_ggml_sum_rows -struct ggml_tensor * ggml_sum_rows( - struct ggml_context * ctx, - struct ggml_tensor * a) { +struct bark_ggml_tensor * bark_ggml_sum_rows( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { bool is_node = false; if (a->grad) { @@ -6003,67 +6001,67 @@ struct ggml_tensor * ggml_sum_rows( ne[i] = a->ne[i]; } - struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, a->n_dims, ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, a->type, a->n_dims, ne); - result->op = GGML_OP_SUM_ROWS; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_SUM_ROWS; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_mean +// bark_ggml_mean -struct ggml_tensor * ggml_mean( - struct ggml_context * ctx, - struct ggml_tensor * a) { +struct bark_ggml_tensor * bark_ggml_mean( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { bool is_node = false; if (a->grad) { - GGML_ASSERT(false); // TODO: implement + BARK_GGML_ASSERT(false); // TODO: implement is_node = true; } - int64_t ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne); + int64_t ne[BARK_GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] }; + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, a->n_dims, ne); - result->op = GGML_OP_MEAN; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_MEAN; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_argmax +// bark_ggml_argmax -struct ggml_tensor * ggml_argmax( - struct ggml_context * ctx, - struct ggml_tensor * a) { - GGML_ASSERT(ggml_is_matrix(a)); +struct bark_ggml_tensor * bark_ggml_argmax( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + BARK_GGML_ASSERT(bark_ggml_is_matrix(a)); bool is_node = false; if (a->grad) { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); is_node = true; } - int64_t ne[GGML_MAX_DIMS] = { a->ne[1], 1, 1, 1 }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, a->n_dims, ne); + int64_t ne[BARK_GGML_MAX_DIMS] = { a->ne[1], 1, 1, 1 }; + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_I32, a->n_dims, ne); - result->op = GGML_OP_ARGMAX; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_ARGMAX; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_repeat +// bark_ggml_repeat -struct ggml_tensor * ggml_repeat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_can_repeat(a, b)); +struct bark_ggml_tensor * bark_ggml_repeat( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + BARK_GGML_ASSERT(bark_ggml_can_repeat(a, b)); bool is_node = false; @@ -6071,22 +6069,22 @@ struct ggml_tensor * ggml_repeat( is_node = true; } - struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, a->type, b->n_dims, b->ne); - result->op = GGML_OP_REPEAT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_REPEAT; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_repeat_back +// bark_ggml_repeat_back -struct ggml_tensor * ggml_repeat_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_can_repeat(b, a)); +struct bark_ggml_tensor * bark_ggml_repeat_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + BARK_GGML_ASSERT(bark_ggml_can_repeat(b, a)); bool is_node = false; @@ -6094,26 +6092,26 @@ struct ggml_tensor * ggml_repeat_back( is_node = true; } - if (ggml_are_same_shape(a, b) && !is_node) { + if (bark_ggml_are_same_shape(a, b) && !is_node) { return a; } - struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, a->type, b->n_dims, b->ne); - result->op = GGML_OP_REPEAT_BACK; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_REPEAT_BACK; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_concat +// bark_ggml_concat -struct ggml_tensor * ggml_concat( - struct ggml_context* ctx, - struct ggml_tensor* a, - struct ggml_tensor* b) { - GGML_ASSERT(a->ne[0] == b->ne[0] && a->ne[1] == b->ne[1] && a->ne[3] == b->ne[3]); +struct bark_ggml_tensor * bark_ggml_concat( + struct bark_ggml_context* ctx, + struct bark_ggml_tensor* a, + struct bark_ggml_tensor* b) { + BARK_GGML_ASSERT(a->ne[0] == b->ne[0] && a->ne[1] == b->ne[1] && a->ne[3] == b->ne[3]); bool is_node = false; @@ -6121,162 +6119,162 @@ struct ggml_tensor * ggml_concat( is_node = true; } - struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, a->ne[0], a->ne[1], a->ne[2] + b->ne[2], a->ne[3]); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_4d(ctx, a->type, a->ne[0], a->ne[1], a->ne[2] + b->ne[2], a->ne[3]); - result->op = GGML_OP_CONCAT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_CONCAT; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_abs +// bark_ggml_abs -struct ggml_tensor * ggml_abs( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary(ctx, a, GGML_UNARY_OP_ABS); +struct bark_ggml_tensor * bark_ggml_abs( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary(ctx, a, BARK_GGML_UNARY_OP_ABS); } -struct ggml_tensor * ggml_abs_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ABS); +struct bark_ggml_tensor * bark_ggml_abs_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary_inplace(ctx, a, BARK_GGML_UNARY_OP_ABS); } -// ggml_sgn +// bark_ggml_sgn -struct ggml_tensor * ggml_sgn( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary(ctx, a, GGML_UNARY_OP_SGN); +struct bark_ggml_tensor * bark_ggml_sgn( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary(ctx, a, BARK_GGML_UNARY_OP_SGN); } -struct ggml_tensor * ggml_sgn_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SGN); +struct bark_ggml_tensor * bark_ggml_sgn_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary_inplace(ctx, a, BARK_GGML_UNARY_OP_SGN); } -// ggml_neg +// bark_ggml_neg -struct ggml_tensor * ggml_neg( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary(ctx, a, GGML_UNARY_OP_NEG); +struct bark_ggml_tensor * bark_ggml_neg( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary(ctx, a, BARK_GGML_UNARY_OP_NEG); } -struct ggml_tensor * ggml_neg_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_NEG); +struct bark_ggml_tensor * bark_ggml_neg_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary_inplace(ctx, a, BARK_GGML_UNARY_OP_NEG); } -// ggml_step +// bark_ggml_step -struct ggml_tensor * ggml_step( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary(ctx, a, GGML_UNARY_OP_STEP); +struct bark_ggml_tensor * bark_ggml_step( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary(ctx, a, BARK_GGML_UNARY_OP_STEP); } -struct ggml_tensor * ggml_step_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_STEP); +struct bark_ggml_tensor * bark_ggml_step_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary_inplace(ctx, a, BARK_GGML_UNARY_OP_STEP); } -// ggml_tanh +// bark_ggml_tanh -struct ggml_tensor * ggml_tanh( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary(ctx, a, GGML_UNARY_OP_TANH); +struct bark_ggml_tensor * bark_ggml_tanh( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary(ctx, a, BARK_GGML_UNARY_OP_TANH); } -struct ggml_tensor * ggml_tanh_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_TANH); +struct bark_ggml_tensor * bark_ggml_tanh_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary_inplace(ctx, a, BARK_GGML_UNARY_OP_TANH); } -// ggml_elu +// bark_ggml_elu -struct ggml_tensor * ggml_elu( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary(ctx, a, GGML_UNARY_OP_ELU); +struct bark_ggml_tensor * bark_ggml_elu( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary(ctx, a, BARK_GGML_UNARY_OP_ELU); } -struct ggml_tensor * ggml_elu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_ELU); +struct bark_ggml_tensor * bark_ggml_elu_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary_inplace(ctx, a, BARK_GGML_UNARY_OP_ELU); } -// ggml_relu +// bark_ggml_relu -struct ggml_tensor * ggml_relu( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary(ctx, a, GGML_UNARY_OP_RELU); +struct bark_ggml_tensor * bark_ggml_relu( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary(ctx, a, BARK_GGML_UNARY_OP_RELU); } -struct ggml_tensor * ggml_relu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU); +struct bark_ggml_tensor * bark_ggml_relu_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary_inplace(ctx, a, BARK_GGML_UNARY_OP_RELU); } -// ggml_gelu +// bark_ggml_gelu -struct ggml_tensor * ggml_gelu( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary(ctx, a, GGML_UNARY_OP_GELU); +struct bark_ggml_tensor * bark_ggml_gelu( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary(ctx, a, BARK_GGML_UNARY_OP_GELU); } -struct ggml_tensor * ggml_gelu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU); +struct bark_ggml_tensor * bark_ggml_gelu_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary_inplace(ctx, a, BARK_GGML_UNARY_OP_GELU); } -// ggml_gelu_quick +// bark_ggml_gelu_quick -struct ggml_tensor * ggml_gelu_quick( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_QUICK); +struct bark_ggml_tensor * bark_ggml_gelu_quick( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary(ctx, a, BARK_GGML_UNARY_OP_GELU_QUICK); } -struct ggml_tensor * ggml_gelu_quick_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_QUICK); +struct bark_ggml_tensor * bark_ggml_gelu_quick_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary_inplace(ctx, a, BARK_GGML_UNARY_OP_GELU_QUICK); } -// ggml_silu +// bark_ggml_silu -struct ggml_tensor * ggml_silu( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary(ctx, a, GGML_UNARY_OP_SILU); +struct bark_ggml_tensor * bark_ggml_silu( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary(ctx, a, BARK_GGML_UNARY_OP_SILU); } -struct ggml_tensor * ggml_silu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SILU); +struct bark_ggml_tensor * bark_ggml_silu_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_unary_inplace(ctx, a, BARK_GGML_UNARY_OP_SILU); } -// ggml_silu_back +// bark_ggml_silu_back -struct ggml_tensor * ggml_silu_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { +struct bark_ggml_tensor * bark_ggml_silu_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { bool is_node = false; if (a->grad || b->grad) { @@ -6284,60 +6282,60 @@ struct ggml_tensor * ggml_silu_back( is_node = true; } - struct ggml_tensor * result = ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_SILU_BACK; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_SILU_BACK; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_norm +// bark_ggml_norm -static struct ggml_tensor * ggml_norm_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_norm_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, float eps, bool inplace) { bool is_node = false; if (!inplace && (a->grad)) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - ggml_set_op_params(result, &eps, sizeof(eps)); + bark_ggml_set_op_params(result, &eps, sizeof(eps)); - result->op = GGML_OP_NORM; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_NORM; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_norm( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_norm( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, float eps) { - return ggml_norm_impl(ctx, a, eps, false); + return bark_ggml_norm_impl(ctx, a, eps, false); } -struct ggml_tensor * ggml_norm_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_norm_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, float eps) { - return ggml_norm_impl(ctx, a, eps, true); + return bark_ggml_norm_impl(ctx, a, eps, true); } -// ggml_rms_norm +// bark_ggml_rms_norm -static struct ggml_tensor * ggml_rms_norm_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_rms_norm_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, float eps, bool inplace) { bool is_node = false; @@ -6346,37 +6344,37 @@ static struct ggml_tensor * ggml_rms_norm_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - ggml_set_op_params(result, &eps, sizeof(eps)); + bark_ggml_set_op_params(result, &eps, sizeof(eps)); - result->op = GGML_OP_RMS_NORM; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_RMS_NORM; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_rms_norm( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_rms_norm( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, float eps) { - return ggml_rms_norm_impl(ctx, a, eps, false); + return bark_ggml_rms_norm_impl(ctx, a, eps, false); } -struct ggml_tensor * ggml_rms_norm_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_rms_norm_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, float eps) { - return ggml_rms_norm_impl(ctx, a, eps, true); + return bark_ggml_rms_norm_impl(ctx, a, eps, true); } -// ggml_rms_norm_back +// bark_ggml_rms_norm_back -struct ggml_tensor * ggml_rms_norm_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_rms_norm_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, float eps) { bool is_node = false; @@ -6385,65 +6383,65 @@ struct ggml_tensor * ggml_rms_norm_back( is_node = true; } - struct ggml_tensor * result = ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = bark_ggml_dup_tensor(ctx, a); - ggml_set_op_params(result, &eps, sizeof(eps)); + bark_ggml_set_op_params(result, &eps, sizeof(eps)); - result->op = GGML_OP_RMS_NORM_BACK; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_RMS_NORM_BACK; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_group_norm +// bark_ggml_group_norm -static struct ggml_tensor * ggml_group_norm_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_group_norm_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_groups, bool inplace) { bool is_node = false; if (!inplace && (a->grad)) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_GROUP_NORM; + result->op = BARK_GGML_OP_GROUP_NORM; result->op_params[0] = n_groups; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = NULL; // TODO: maybe store epsilon here? return result; } -struct ggml_tensor * ggml_group_norm( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_group_norm( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_groups) { - return ggml_group_norm_impl(ctx, a, n_groups, false); + return bark_ggml_group_norm_impl(ctx, a, n_groups, false); } -struct ggml_tensor * ggml_group_norm_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_group_norm_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_groups) { - return ggml_group_norm_impl(ctx, a, n_groups, true); + return bark_ggml_group_norm_impl(ctx, a, n_groups, true); } -// ggml_mul_mat +// bark_ggml_mul_mat -struct ggml_tensor * ggml_mul_mat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_can_mul_mat(a, b)); - GGML_ASSERT(!ggml_is_transposed(a)); +struct bark_ggml_tensor * bark_ggml_mul_mat( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + BARK_GGML_ASSERT(bark_ggml_can_mul_mat(a, b)); + BARK_GGML_ASSERT(!bark_ggml_is_transposed(a)); bool is_node = false; @@ -6452,24 +6450,24 @@ struct ggml_tensor * ggml_mul_mat( } const int64_t ne[4] = { a->ne[1], b->ne[1], b->ne[2], b->ne[3] }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne); - result->op = GGML_OP_MUL_MAT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_MUL_MAT; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_out_prod +// bark_ggml_out_prod -struct ggml_tensor * ggml_out_prod( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_can_out_prod(a, b)); - GGML_ASSERT(!ggml_is_transposed(a)); +struct bark_ggml_tensor * bark_ggml_out_prod( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + BARK_GGML_ASSERT(bark_ggml_can_out_prod(a, b)); + BARK_GGML_ASSERT(!bark_ggml_is_transposed(a)); bool is_node = false; @@ -6479,25 +6477,25 @@ struct ggml_tensor * ggml_out_prod( // a is broadcastable to b for ne[2] and ne[3] -> use b->ne[2] and b->ne[3] const int64_t ne[4] = { a->ne[0], b->ne[0], b->ne[2], b->ne[3] }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne); - result->op = GGML_OP_OUT_PROD; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_OUT_PROD; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_scale +// bark_ggml_scale -static struct ggml_tensor * ggml_scale_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_scale_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, bool inplace) { - GGML_ASSERT(ggml_is_scalar(b)); - GGML_ASSERT(ggml_is_padded_1d(a)); + BARK_GGML_ASSERT(bark_ggml_is_scalar(b)); + BARK_GGML_ASSERT(bark_ggml_is_padded_1d(a)); bool is_node = false; @@ -6505,42 +6503,42 @@ static struct ggml_tensor * ggml_scale_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_SCALE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_SCALE; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_scale( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_scale_impl(ctx, a, b, false); +struct bark_ggml_tensor * bark_ggml_scale( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_scale_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_scale_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_scale_impl(ctx, a, b, true); +struct bark_ggml_tensor * bark_ggml_scale_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_scale_impl(ctx, a, b, true); } -// ggml_set +// bark_ggml_set -static struct ggml_tensor * ggml_set_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_set_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, bool inplace) { - GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b)); + BARK_GGML_ASSERT(bark_ggml_nelements(a) >= bark_ggml_nelements(b)); bool is_node = false; @@ -6549,83 +6547,83 @@ static struct ggml_tensor * ggml_set_impl( } // make a view of the destination - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_SET; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_SET; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_set( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_set( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset) { - return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false); + return bark_ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false); } -struct ggml_tensor * ggml_set_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_set_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset) { - return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true); + return bark_ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true); } -struct ggml_tensor * ggml_set_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_set_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t offset) { - return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false); + return bark_ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false); } -struct ggml_tensor * ggml_set_1d_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_set_1d_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t offset) { - return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true); + return bark_ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true); } -struct ggml_tensor * ggml_set_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_set_2d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t offset) { - return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false); + return bark_ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false); } -struct ggml_tensor * ggml_set_2d_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_set_2d_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t offset) { - return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false); + return bark_ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false); } -// ggml_cpy +// bark_ggml_cpy -static struct ggml_tensor * ggml_cpy_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_cpy_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, bool inplace) { - GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b)); + BARK_GGML_ASSERT(bark_ggml_nelements(a) == bark_ggml_nelements(b)); bool is_node = false; @@ -6634,40 +6632,40 @@ static struct ggml_tensor * ggml_cpy_impl( } // make a view of the destination - struct ggml_tensor * result = ggml_view_tensor(ctx, b); + struct bark_ggml_tensor * result = bark_ggml_view_tensor(ctx, b); if (strlen(b->name) > 0) { - ggml_format_name(result, "%s (copy of %s)", b->name, a->name); + bark_ggml_format_name(result, "%s (copy of %s)", b->name, a->name); } else { - ggml_format_name(result, "%s (copy)", a->name); + bark_ggml_format_name(result, "%s (copy)", a->name); } - result->op = GGML_OP_CPY; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_CPY; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_cpy( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_cpy_impl(ctx, a, b, false); +struct bark_ggml_tensor * bark_ggml_cpy( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_cpy_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_cpy_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_cpy_impl(ctx, a, b, true); +struct bark_ggml_tensor * bark_ggml_cpy_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_cpy_impl(ctx, a, b, true); } -// ggml_cont +// bark_ggml_cont -static struct ggml_tensor * ggml_cont_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_cont_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, bool inplace) { bool is_node = false; @@ -6675,83 +6673,83 @@ static struct ggml_tensor * ggml_cont_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - ggml_format_name(result, "%s (cont)", a->name); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); + bark_ggml_format_name(result, "%s (cont)", a->name); - result->op = GGML_OP_CONT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_CONT; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_cont( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_cont_impl(ctx, a, false); +struct bark_ggml_tensor * bark_ggml_cont( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_cont_impl(ctx, a, false); } -struct ggml_tensor * ggml_cont_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_cont_impl(ctx, a, true); +struct bark_ggml_tensor * bark_ggml_cont_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_cont_impl(ctx, a, true); } // make contiguous, with new shape -GGML_API struct ggml_tensor * ggml_cont_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, +BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cont_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0) { - return ggml_cont_4d(ctx, a, ne0, 1, 1, 1); + return bark_ggml_cont_4d(ctx, a, ne0, 1, 1, 1); } -GGML_API struct ggml_tensor * ggml_cont_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, +BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cont_2d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1) { - return ggml_cont_4d(ctx, a, ne0, ne1, 1, 1); + return bark_ggml_cont_4d(ctx, a, ne0, ne1, 1, 1); } -GGML_API struct ggml_tensor * ggml_cont_3d( - struct ggml_context * ctx, - struct ggml_tensor * a, +BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cont_3d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2) { - return ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1); + return bark_ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1); } -struct ggml_tensor * ggml_cont_4d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_cont_4d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) { - GGML_ASSERT(ggml_nelements(a) == (ne0*ne1*ne2*ne3)); + BARK_GGML_ASSERT(bark_ggml_nelements(a) == (ne0*ne1*ne2*ne3)); bool is_node = false; - struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3); - ggml_format_name(result, "%s (cont)", a->name); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3); + bark_ggml_format_name(result, "%s (cont)", a->name); - result->op = GGML_OP_CONT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_CONT; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_reshape +// bark_ggml_reshape -struct ggml_tensor * ggml_reshape( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_is_contiguous(a)); +struct bark_ggml_tensor * bark_ggml_reshape( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + BARK_GGML_ASSERT(bark_ggml_is_contiguous(a)); // as only the shape of b is relevant, and not its memory layout, b is allowed to be non contiguous. - GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b)); + BARK_GGML_ASSERT(bark_ggml_nelements(a) == bark_ggml_nelements(b)); bool is_node = false; @@ -6761,25 +6759,25 @@ struct ggml_tensor * ggml_reshape( if (b->grad) { // gradient propagation is not supported - //GGML_ASSERT(false); + //BARK_GGML_ASSERT(false); } - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a, 0); - ggml_format_name(result, "%s (reshaped)", a->name); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a, 0); + bark_ggml_format_name(result, "%s (reshaped)", a->name); - result->op = GGML_OP_RESHAPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_RESHAPE; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_reshape_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_reshape_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0) { - GGML_ASSERT(ggml_is_contiguous(a)); - GGML_ASSERT(ggml_nelements(a) == ne0); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(a)); + BARK_GGML_ASSERT(bark_ggml_nelements(a) == ne0); bool is_node = false; @@ -6788,23 +6786,23 @@ struct ggml_tensor * ggml_reshape_1d( } const int64_t ne[1] = { ne0 }; - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a, 0); - ggml_format_name(result, "%s (reshaped)", a->name); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_impl(ctx, a->type, 1, ne, a, 0); + bark_ggml_format_name(result, "%s (reshaped)", a->name); - result->op = GGML_OP_RESHAPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_RESHAPE; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_reshape_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_reshape_2d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1) { - GGML_ASSERT(ggml_is_contiguous(a)); - GGML_ASSERT(ggml_nelements(a) == ne0*ne1); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(a)); + BARK_GGML_ASSERT(bark_ggml_nelements(a) == ne0*ne1); bool is_node = false; @@ -6813,24 +6811,24 @@ struct ggml_tensor * ggml_reshape_2d( } const int64_t ne[2] = { ne0, ne1 }; - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a, 0); - ggml_format_name(result, "%s (reshaped)", a->name); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_impl(ctx, a->type, 2, ne, a, 0); + bark_ggml_format_name(result, "%s (reshaped)", a->name); - result->op = GGML_OP_RESHAPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_RESHAPE; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_reshape_3d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_reshape_3d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2) { - GGML_ASSERT(ggml_is_contiguous(a)); - GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(a)); + BARK_GGML_ASSERT(bark_ggml_nelements(a) == ne0*ne1*ne2); bool is_node = false; @@ -6839,25 +6837,25 @@ struct ggml_tensor * ggml_reshape_3d( } const int64_t ne[3] = { ne0, ne1, ne2 }; - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a, 0); - ggml_format_name(result, "%s (reshaped)", a->name); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_impl(ctx, a->type, 3, ne, a, 0); + bark_ggml_format_name(result, "%s (reshaped)", a->name); - result->op = GGML_OP_RESHAPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_RESHAPE; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_reshape_4d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_reshape_4d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) { - GGML_ASSERT(ggml_is_contiguous(a)); - GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(a)); + BARK_GGML_ASSERT(bark_ggml_nelements(a) == ne0*ne1*ne2*ne3); bool is_node = false; @@ -6866,19 +6864,19 @@ struct ggml_tensor * ggml_reshape_4d( } const int64_t ne[4] = { ne0, ne1, ne2, ne3 }; - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a, 0); - ggml_format_name(result, "%s (reshaped)", a->name); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_impl(ctx, a->type, 4, ne, a, 0); + bark_ggml_format_name(result, "%s (reshaped)", a->name); - result->op = GGML_OP_RESHAPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_RESHAPE; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -static struct ggml_tensor * ggml_view_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_view_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_dims, const int64_t * ne, size_t offset) { @@ -6889,36 +6887,36 @@ static struct ggml_tensor * ggml_view_impl( is_node = true; } - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, n_dims, ne, a, offset); - ggml_format_name(result, "%s (view)", a->name); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_impl(ctx, a->type, n_dims, ne, a, offset); + bark_ggml_format_name(result, "%s (view)", a->name); - ggml_set_op_params(result, &offset, sizeof(offset)); + bark_ggml_set_op_params(result, &offset, sizeof(offset)); - result->op = GGML_OP_VIEW; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_VIEW; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_view_1d +// bark_ggml_view_1d -struct ggml_tensor * ggml_view_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_view_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, size_t offset) { - struct ggml_tensor * result = ggml_view_impl(ctx, a, 1, &ne0, offset); + struct bark_ggml_tensor * result = bark_ggml_view_impl(ctx, a, 1, &ne0, offset); return result; } -// ggml_view_2d +// bark_ggml_view_2d -struct ggml_tensor * ggml_view_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_view_2d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, size_t nb1, @@ -6926,7 +6924,7 @@ struct ggml_tensor * ggml_view_2d( const int64_t ne[2] = { ne0, ne1 }; - struct ggml_tensor * result = ggml_view_impl(ctx, a, 2, ne, offset); + struct bark_ggml_tensor * result = bark_ggml_view_impl(ctx, a, 2, ne, offset); result->nb[1] = nb1; result->nb[2] = result->nb[1]*ne1; @@ -6935,11 +6933,11 @@ struct ggml_tensor * ggml_view_2d( return result; } -// ggml_view_3d +// bark_ggml_view_3d -struct ggml_tensor * ggml_view_3d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_view_3d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, @@ -6949,7 +6947,7 @@ struct ggml_tensor * ggml_view_3d( const int64_t ne[3] = { ne0, ne1, ne2 }; - struct ggml_tensor * result = ggml_view_impl(ctx, a, 3, ne, offset); + struct bark_ggml_tensor * result = bark_ggml_view_impl(ctx, a, 3, ne, offset); result->nb[1] = nb1; result->nb[2] = nb2; @@ -6958,11 +6956,11 @@ struct ggml_tensor * ggml_view_3d( return result; } -// ggml_view_4d +// bark_ggml_view_4d -struct ggml_tensor * ggml_view_4d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_view_4d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, @@ -6974,7 +6972,7 @@ struct ggml_tensor * ggml_view_4d( const int64_t ne[4] = { ne0, ne1, ne2, ne3 }; - struct ggml_tensor * result = ggml_view_impl(ctx, a, 4, ne, offset); + struct bark_ggml_tensor * result = bark_ggml_view_impl(ctx, a, 4, ne, offset); result->nb[1] = nb1; result->nb[2] = nb2; @@ -6983,26 +6981,26 @@ struct ggml_tensor * ggml_view_4d( return result; } -// ggml_permute +// bark_ggml_permute -struct ggml_tensor * ggml_permute( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_permute( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int axis0, int axis1, int axis2, int axis3) { - GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS); - GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS); - GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS); - GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS); - - GGML_ASSERT(axis0 != axis1); - GGML_ASSERT(axis0 != axis2); - GGML_ASSERT(axis0 != axis3); - GGML_ASSERT(axis1 != axis2); - GGML_ASSERT(axis1 != axis3); - GGML_ASSERT(axis2 != axis3); + BARK_GGML_ASSERT(axis0 >= 0 && axis0 < BARK_GGML_MAX_DIMS); + BARK_GGML_ASSERT(axis1 >= 0 && axis1 < BARK_GGML_MAX_DIMS); + BARK_GGML_ASSERT(axis2 >= 0 && axis2 < BARK_GGML_MAX_DIMS); + BARK_GGML_ASSERT(axis3 >= 0 && axis3 < BARK_GGML_MAX_DIMS); + + BARK_GGML_ASSERT(axis0 != axis1); + BARK_GGML_ASSERT(axis0 != axis2); + BARK_GGML_ASSERT(axis0 != axis3); + BARK_GGML_ASSERT(axis1 != axis2); + BARK_GGML_ASSERT(axis1 != axis3); + BARK_GGML_ASSERT(axis2 != axis3); bool is_node = false; @@ -7010,11 +7008,11 @@ struct ggml_tensor * ggml_permute( is_node = true; } - struct ggml_tensor * result = ggml_view_tensor(ctx, a); - ggml_format_name(result, "%s (permuted)", a->name); + struct bark_ggml_tensor * result = bark_ggml_view_tensor(ctx, a); + bark_ggml_format_name(result, "%s (permuted)", a->name); - int ne[GGML_MAX_DIMS]; - int nb[GGML_MAX_DIMS]; + int ne[BARK_GGML_MAX_DIMS]; + int nb[BARK_GGML_MAX_DIMS]; ne[axis0] = a->ne[0]; ne[axis1] = a->ne[1]; @@ -7036,29 +7034,29 @@ struct ggml_tensor * ggml_permute( result->nb[2] = nb[2]; result->nb[3] = nb[3]; - result->op = GGML_OP_PERMUTE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_PERMUTE; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; int32_t params[] = { axis0, axis1, axis2, axis3 }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); return result; } -// ggml_transpose +// bark_ggml_transpose -struct ggml_tensor * ggml_transpose( - struct ggml_context * ctx, - struct ggml_tensor * a) { +struct bark_ggml_tensor * bark_ggml_transpose( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { bool is_node = false; if (a->grad) { is_node = true; } - struct ggml_tensor * result = ggml_view_tensor(ctx, a); - ggml_format_name(result, "%s (transposed)", a->name); + struct bark_ggml_tensor * result = bark_ggml_view_tensor(ctx, a); + bark_ggml_format_name(result, "%s (transposed)", a->name); result->ne[0] = a->ne[1]; result->ne[1] = a->ne[0]; @@ -7066,20 +7064,20 @@ struct ggml_tensor * ggml_transpose( result->nb[0] = a->nb[1]; result->nb[1] = a->nb[0]; - result->op = GGML_OP_TRANSPOSE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_TRANSPOSE; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_get_rows +// bark_ggml_get_rows -struct ggml_tensor * ggml_get_rows( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32); +struct bark_ggml_tensor * bark_ggml_get_rows( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + BARK_GGML_ASSERT(bark_ggml_is_matrix(a) && bark_ggml_is_vector(b) && b->type == BARK_GGML_TYPE_I32); bool is_node = false; @@ -7088,26 +7086,26 @@ struct ggml_tensor * ggml_get_rows( } // TODO: implement non F32 return - //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]); - struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0]); + //struct bark_ggml_tensor * result = bark_ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_2d(ctx, BARK_GGML_TYPE_F32, a->ne[0], b->ne[0]); - result->op = GGML_OP_GET_ROWS; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_GET_ROWS; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_get_rows_back +// bark_ggml_get_rows_back -struct ggml_tensor * ggml_get_rows_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c) { - GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32); - GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0])); +struct bark_ggml_tensor * bark_ggml_get_rows_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c) { + BARK_GGML_ASSERT(bark_ggml_is_matrix(a) && bark_ggml_is_vector(b) && b->type == BARK_GGML_TYPE_I32); + BARK_GGML_ASSERT(bark_ggml_is_matrix(c) && (a->ne[0] == c->ne[0])); bool is_node = false; @@ -7116,23 +7114,23 @@ struct ggml_tensor * ggml_get_rows_back( } // TODO: implement non F32 return - //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]); - struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]); + //struct bark_ggml_tensor * result = bark_ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_2d(ctx, BARK_GGML_TYPE_F32, c->ne[0], c->ne[1]); - result->op = GGML_OP_GET_ROWS_BACK; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_GET_ROWS_BACK; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_diag +// bark_ggml_diag -struct ggml_tensor * ggml_diag( - struct ggml_context * ctx, - struct ggml_tensor * a) { - GGML_ASSERT(a->ne[1] == 1); +struct bark_ggml_tensor * bark_ggml_diag( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + BARK_GGML_ASSERT(a->ne[1] == 1); bool is_node = false; if (a->grad) { @@ -7140,20 +7138,20 @@ struct ggml_tensor * ggml_diag( } const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] }; - struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, MAX(a->n_dims, 2), ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, a->type, MAX(a->n_dims, 2), ne); - result->op = GGML_OP_DIAG; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_DIAG; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_diag_mask_inf +// bark_ggml_diag_mask_inf -static struct ggml_tensor * ggml_diag_mask_inf_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_diag_mask_inf_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_past, bool inplace) { bool is_node = false; @@ -7162,37 +7160,37 @@ static struct ggml_tensor * ggml_diag_mask_inf_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); int32_t params[] = { n_past }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_DIAG_MASK_INF; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_DIAG_MASK_INF; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_diag_mask_inf( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_diag_mask_inf( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_past) { - return ggml_diag_mask_inf_impl(ctx, a, n_past, false); + return bark_ggml_diag_mask_inf_impl(ctx, a, n_past, false); } -struct ggml_tensor * ggml_diag_mask_inf_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_diag_mask_inf_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_past) { - return ggml_diag_mask_inf_impl(ctx, a, n_past, true); + return bark_ggml_diag_mask_inf_impl(ctx, a, n_past, true); } -// ggml_diag_mask_zero +// bark_ggml_diag_mask_zero -static struct ggml_tensor * ggml_diag_mask_zero_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_diag_mask_zero_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_past, bool inplace) { bool is_node = false; @@ -7201,37 +7199,37 @@ static struct ggml_tensor * ggml_diag_mask_zero_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); int32_t params[] = { n_past }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_DIAG_MASK_ZERO; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_DIAG_MASK_ZERO; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_diag_mask_zero( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_diag_mask_zero( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_past) { - return ggml_diag_mask_zero_impl(ctx, a, n_past, false); + return bark_ggml_diag_mask_zero_impl(ctx, a, n_past, false); } -struct ggml_tensor * ggml_diag_mask_zero_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_diag_mask_zero_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_past) { - return ggml_diag_mask_zero_impl(ctx, a, n_past, true); + return bark_ggml_diag_mask_zero_impl(ctx, a, n_past, true); } -// ggml_soft_max +// bark_ggml_soft_max -static struct ggml_tensor * ggml_soft_max_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_soft_max_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, bool inplace) { bool is_node = false; @@ -7239,33 +7237,33 @@ static struct ggml_tensor * ggml_soft_max_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_SOFT_MAX; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_SOFT_MAX; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_soft_max( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_soft_max_impl(ctx, a, false); +struct bark_ggml_tensor * bark_ggml_soft_max( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_soft_max_impl(ctx, a, false); } -struct ggml_tensor * ggml_soft_max_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_soft_max_impl(ctx, a, true); +struct bark_ggml_tensor * bark_ggml_soft_max_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a) { + return bark_ggml_soft_max_impl(ctx, a, true); } -// ggml_soft_max_back +// bark_ggml_soft_max_back -static struct ggml_tensor * ggml_soft_max_back_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_soft_max_back_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, bool inplace) { bool is_node = false; @@ -7273,36 +7271,36 @@ static struct ggml_tensor * ggml_soft_max_back_impl( is_node = true; // TODO : implement backward pass } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_SOFT_MAX_BACK; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_SOFT_MAX_BACK; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_soft_max_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_soft_max_back_impl(ctx, a, b, false); +struct bark_ggml_tensor * bark_ggml_soft_max_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_soft_max_back_impl(ctx, a, b, false); } -struct ggml_tensor * ggml_soft_max_back_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_soft_max_back_impl(ctx, a, b, true); +struct bark_ggml_tensor * bark_ggml_soft_max_back_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_soft_max_back_impl(ctx, a, b, true); } -// ggml_rope +// bark_ggml_rope -static struct ggml_tensor * ggml_rope_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_rope_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, int mode, int n_ctx, @@ -7311,9 +7309,9 @@ static struct ggml_tensor * ggml_rope_impl( float xpos_base, bool xpos_down, bool inplace) { - GGML_ASSERT(ggml_is_vector(b)); - GGML_ASSERT(b->type == GGML_TYPE_I32); - GGML_ASSERT(a->ne[2] == b->ne[0]); + BARK_GGML_ASSERT(bark_ggml_is_vector(b)); + BARK_GGML_ASSERT(b->type == BARK_GGML_TYPE_I32); + BARK_GGML_ASSERT(a->ne[2] == b->ne[0]); bool is_node = false; @@ -7321,83 +7319,83 @@ static struct ggml_tensor * ggml_rope_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); int32_t params[8] = { /*n_past*/ 0, n_dims, mode, n_ctx }; memcpy(params + 4, &freq_base, sizeof(float)); memcpy(params + 5, &freq_scale, sizeof(float)); memcpy(params + 6, &xpos_base, sizeof(float)); memcpy(params + 7, &xpos_down, sizeof(bool)); - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_ROPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_ROPE; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_rope( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_rope( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, int mode, int n_ctx) { - return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, false); + return bark_ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, false); } -struct ggml_tensor * ggml_rope_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_rope_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, int mode, int n_ctx) { - return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, true); + return bark_ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, true); } -struct ggml_tensor * ggml_rope_custom( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_rope_custom( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, int mode, int n_ctx, float freq_base, float freq_scale) { - return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, false); + return bark_ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, false); } -struct ggml_tensor * ggml_rope_custom_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_rope_custom_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, int mode, int n_ctx, float freq_base, float freq_scale) { - return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, true); + return bark_ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, true); } -struct ggml_tensor * ggml_rope_xpos_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_rope_xpos_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, float base, bool down) { - return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 10000.0f, 1.0f, base, down, true); + return bark_ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 10000.0f, 1.0f, base, down, true); } -// ggml_rope_back +// bark_ggml_rope_back -struct ggml_tensor * ggml_rope_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_rope_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, int mode, int n_ctx, @@ -7405,11 +7403,11 @@ struct ggml_tensor * ggml_rope_back( float freq_scale, float xpos_base, bool xpos_down) { - GGML_ASSERT(ggml_is_vector(b)); - GGML_ASSERT(b->type == GGML_TYPE_I32); - GGML_ASSERT(a->ne[2] == b->ne[0]); + BARK_GGML_ASSERT(bark_ggml_is_vector(b)); + BARK_GGML_ASSERT(b->type == BARK_GGML_TYPE_I32); + BARK_GGML_ASSERT(a->ne[2] == b->ne[0]); - GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet"); + BARK_GGML_ASSERT((mode & 4) == 0 && "bark_ggml_rope_back() for ChatGLM not implemented yet"); bool is_node = false; @@ -7417,84 +7415,84 @@ struct ggml_tensor * ggml_rope_back( is_node = false; // TODO: implement backward } - struct ggml_tensor * result = ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = bark_ggml_dup_tensor(ctx, a); int32_t params[8] = { /*n_past*/ 0, n_dims, mode, n_ctx }; memcpy(params + 4, &freq_base, sizeof(float)); memcpy(params + 5, &freq_scale, sizeof(float)); memcpy(params + 6, &xpos_base, sizeof(float)); memcpy(params + 7, &xpos_down, sizeof(bool)); - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_ROPE_BACK; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_ROPE_BACK; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_alibi +// bark_ggml_alibi -struct ggml_tensor * ggml_alibi( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_alibi( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_past, int n_head, float bias_max) { - GGML_ASSERT(n_past >= 0); + BARK_GGML_ASSERT(n_past >= 0); bool is_node = false; if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } // TODO: when implement backward, fix this: - //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_view_tensor(ctx, a); + //struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = bark_ggml_view_tensor(ctx, a); int32_t op_params[3] = { n_past, n_head }; memcpy(op_params + 2, &bias_max, sizeof(float)); - ggml_set_op_params(result, op_params, sizeof(op_params)); + bark_ggml_set_op_params(result, op_params, sizeof(op_params)); - result->op = GGML_OP_ALIBI; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_ALIBI; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_clamp +// bark_ggml_clamp -struct ggml_tensor * ggml_clamp( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_clamp( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, float min, float max) { bool is_node = false; if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } // TODO: when implement backward, fix this: - struct ggml_tensor * result = ggml_view_tensor(ctx, a); + struct bark_ggml_tensor * result = bark_ggml_view_tensor(ctx, a); float params[] = { min, max }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_CLAMP; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_CLAMP; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_conv_1d +// bark_ggml_conv_1d -static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) { +static int64_t bark_ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) { return (ins + 2 * p - d * (ks - 1) - 1) / s + 1; } @@ -7502,22 +7500,22 @@ static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, // a: [OC,IC, K] // b: [N, IC, IL] // result: [N, OL, IC*K] -static struct ggml_tensor * ggml_conv_1d_stage_0( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_conv_1d_stage_0( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int s0, int p0, int d0) { - GGML_ASSERT(a->ne[1] == b->ne[1]); + BARK_GGML_ASSERT(a->ne[1] == b->ne[1]); bool is_node = false; if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } - const int64_t OL = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0); + const int64_t OL = bark_ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0); const int64_t ne[4] = { a->ne[1] * a->ne[0], @@ -7525,34 +7523,34 @@ static struct ggml_tensor * ggml_conv_1d_stage_0( b->ne[2], 1, }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F16, 4, ne); int32_t params[] = { s0, p0, d0 }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_CONV_1D_STAGE_0; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_CONV_1D_STAGE_0; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_conv_1d_stage_1 +// bark_ggml_conv_1d_stage_1 // gemm: [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K] // a: [OC, IC, K] // b: [N, OL, IC * K] // result: [N, OC, OL] -static struct ggml_tensor * ggml_conv_1d_stage_1( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { +static struct bark_ggml_tensor * bark_ggml_conv_1d_stage_1( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { bool is_node = false; if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -7562,156 +7560,156 @@ static struct ggml_tensor * ggml_conv_1d_stage_1( b->ne[2], 1, }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, 4, ne); - result->op = GGML_OP_CONV_1D_STAGE_1; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_CONV_1D_STAGE_1; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_conv_1d +// bark_ggml_conv_1d -GGML_API struct ggml_tensor * ggml_conv_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +BARK_GGML_API struct bark_ggml_tensor * bark_ggml_conv_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int s0, int p0, int d0) { - struct ggml_tensor * result = ggml_conv_1d_stage_0(ctx, a, b, s0, p0, d0); - result = ggml_conv_1d_stage_1(ctx, a, result); + struct bark_ggml_tensor * result = bark_ggml_conv_1d_stage_0(ctx, a, b, s0, p0, d0); + result = bark_ggml_conv_1d_stage_1(ctx, a, result); return result; } -// GGML_API struct ggml_tensor * ggml_conv_1d( -// struct ggml_context * ctx, -// struct ggml_tensor * a, -// struct ggml_tensor * b, +// BARK_GGML_API struct bark_ggml_tensor * bark_ggml_conv_1d( +// struct bark_ggml_context * ctx, +// struct bark_ggml_tensor * a, +// struct bark_ggml_tensor * b, // int s0, // int p0, // int d0) { -// GGML_ASSERT(ggml_is_matrix(b)); -// GGML_ASSERT(a->ne[1] == b->ne[1]); +// BARK_GGML_ASSERT(bark_ggml_is_matrix(b)); +// BARK_GGML_ASSERT(a->ne[1] == b->ne[1]); // bool is_node = false; // if (a->grad || b->grad) { -// GGML_ASSERT(false); // TODO: implement backward +// BARK_GGML_ASSERT(false); // TODO: implement backward // is_node = true; // } // const int64_t ne[4] = { -// ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0), +// bark_ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0), // a->ne[2], 1, 1, // }; -// struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); +// struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, 2, ne); // int32_t params[] = { s0, p0, d0 }; -// ggml_set_op_params(result, params, sizeof(params)); +// bark_ggml_set_op_params(result, params, sizeof(params)); -// result->op = GGML_OP_CONV_1D; -// result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; +// result->op = BARK_GGML_OP_CONV_1D; +// result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; // result->src[0] = a; // result->src[1] = b; // return result; // } -// ggml_conv_1d_ph +// bark_ggml_conv_1d_ph -struct ggml_tensor* ggml_conv_1d_ph( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor* bark_ggml_conv_1d_ph( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int s, int d) { - return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d); + return bark_ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d); } -// ggml_pad_reflec_1d +// bark_ggml_pad_reflec_1d -struct ggml_tensor * ggml_pad_reflec_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_pad_reflec_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int p0, int p1) { bool is_node = false; if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } const int64_t ne[2] = { p0 + a->ne[0] + p1, a->ne[1] }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, 2, ne); int32_t params[] = { p0, p1 }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_PAD_REFLEC_1D; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_PAD_REFLEC_1D; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_conv_transpose_1d +// bark_ggml_conv_transpose_1d -static int64_t ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) { +static int64_t bark_ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) { return (ins - 1) * s - 2 * p + d * (ks - 1) + 1; } -GGML_API struct ggml_tensor * ggml_conv_transpose_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +BARK_GGML_API struct bark_ggml_tensor * bark_ggml_conv_transpose_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int s0, int p0, int d0) { - GGML_ASSERT(ggml_is_matrix(b)); - GGML_ASSERT(a->ne[2] == b->ne[1]); - GGML_ASSERT(a->ne[3] == 1); + BARK_GGML_ASSERT(bark_ggml_is_matrix(b)); + BARK_GGML_ASSERT(a->ne[2] == b->ne[1]); + BARK_GGML_ASSERT(a->ne[3] == 1); - GGML_ASSERT(p0 == 0); - GGML_ASSERT(d0 == 1); + BARK_GGML_ASSERT(p0 == 0); + BARK_GGML_ASSERT(d0 == 1); bool is_node = false; if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } const int64_t ne[4] = { - ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/), + bark_ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/), a->ne[1], b->ne[2], 1, }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, 4, ne); int32_t params[] = { s0, p0, d0 }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_CONV_TRANSPOSE_1D; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_CONV_TRANSPOSE_1D; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_conv_2d +// bark_ggml_conv_2d // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] // a: [OC,IC, KH, KW] // b: [N, IC, IH, IW] // result: [N, OH, OW, IC*KH*KW] -static struct ggml_tensor * ggml_conv_2d_stage_0( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +static struct bark_ggml_tensor * bark_ggml_conv_2d_stage_0( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int s0, int s1, int p0, @@ -7719,16 +7717,16 @@ static struct ggml_tensor * ggml_conv_2d_stage_0( int d0, int d1) { - GGML_ASSERT(a->ne[2] == b->ne[2]); + BARK_GGML_ASSERT(a->ne[2] == b->ne[2]); bool is_node = false; if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } - const int64_t OH = ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1); - const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0); + const int64_t OH = bark_ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1); + const int64_t OW = bark_ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0); const int64_t ne[4] = { a->ne[2] * a->ne[1] * a->ne[0], @@ -7736,13 +7734,13 @@ static struct ggml_tensor * ggml_conv_2d_stage_0( OH, b->ne[3], }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F16, 4, ne); int32_t params[] = { s0, s1, p0, p1, d0, d1 }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_CONV_2D_STAGE_0; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_CONV_2D_STAGE_0; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; @@ -7754,15 +7752,15 @@ static struct ggml_tensor * ggml_conv_2d_stage_0( // a: [OC, IC, KH, KW] // b: [N, OH, OW, IC * KH * KW] // result: [N, OC, OH, OW] -static struct ggml_tensor * ggml_conv_2d_stage_1( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { +static struct bark_ggml_tensor * bark_ggml_conv_2d_stage_1( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { bool is_node = false; if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -7772,10 +7770,10 @@ static struct ggml_tensor * ggml_conv_2d_stage_1( a->ne[3], b->ne[3], }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, 4, ne); - result->op = GGML_OP_CONV_2D_STAGE_1; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_CONV_2D_STAGE_1; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; @@ -7786,10 +7784,10 @@ static struct ggml_tensor * ggml_conv_2d_stage_1( // a: [OC,IC, KH, KW] // b: [N, IC, IH, IW] // result: [N, OC, OH, OW] -struct ggml_tensor * ggml_conv_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_conv_2d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int s0, int s1, int p0, @@ -7797,80 +7795,80 @@ struct ggml_tensor * ggml_conv_2d( int d0, int d1) { - struct ggml_tensor * result = ggml_conv_2d_stage_0(ctx, a, b, s0, s1, p0, p1, d0, d1); // [N, OH, OW, IC * KH * KW] - result = ggml_conv_2d_stage_1(ctx, a, result); + struct bark_ggml_tensor * result = bark_ggml_conv_2d_stage_0(ctx, a, b, s0, s1, p0, p1, d0, d1); // [N, OH, OW, IC * KH * KW] + result = bark_ggml_conv_2d_stage_1(ctx, a, result); return result; } -// ggml_conv_2d_sk_p0 -struct ggml_tensor * ggml_conv_2d_sk_p0( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_conv_2d(ctx, a, b, a->ne[0], a->ne[1], 0, 0, 1, 1); +// bark_ggml_conv_2d_sk_p0 +struct bark_ggml_tensor * bark_ggml_conv_2d_sk_p0( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_conv_2d(ctx, a, b, a->ne[0], a->ne[1], 0, 0, 1, 1); } -// ggml_conv_2d_s1_ph +// bark_ggml_conv_2d_s1_ph -struct ggml_tensor * ggml_conv_2d_s1_ph( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1); +struct bark_ggml_tensor * bark_ggml_conv_2d_s1_ph( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + return bark_ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1); } -// ggml_conv_transpose_2d_p0 +// bark_ggml_conv_transpose_2d_p0 -static int64_t ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) { +static int64_t bark_ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) { return (ins - 1) * s - 2 * p + ks; } -struct ggml_tensor * ggml_conv_transpose_2d_p0( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, +struct bark_ggml_tensor * bark_ggml_conv_transpose_2d_p0( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int stride) { - GGML_ASSERT(a->ne[3] == b->ne[2]); + BARK_GGML_ASSERT(a->ne[3] == b->ne[2]); bool is_node = false; if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } const int64_t ne[4] = { - ggml_calc_conv_transpose_output_size(b->ne[0], a->ne[0], stride, 0 /*p0*/), - ggml_calc_conv_transpose_output_size(b->ne[1], a->ne[1], stride, 0 /*p1*/), + bark_ggml_calc_conv_transpose_output_size(b->ne[0], a->ne[0], stride, 0 /*p0*/), + bark_ggml_calc_conv_transpose_output_size(b->ne[1], a->ne[1], stride, 0 /*p1*/), a->ne[2], b->ne[3], }; - struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + struct bark_ggml_tensor* result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, 4, ne); - ggml_set_op_params_i32(result, 0, stride); + bark_ggml_set_op_params_i32(result, 0, stride); - result->op = GGML_OP_CONV_TRANSPOSE_2D; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_CONV_TRANSPOSE_2D; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_pool_* +// bark_ggml_pool_* -static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, int p) { +static int64_t bark_ggml_calc_pool_output_size(int64_t ins, int ks, int s, int p) { return (ins + 2 * p - ks) / s + 1; } -// ggml_pool_1d +// bark_ggml_pool_1d -struct ggml_tensor * ggml_pool_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, - enum ggml_op_pool op, +struct bark_ggml_tensor * bark_ggml_pool_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + enum bark_ggml_op_pool op, int k0, int s0, int p0) { @@ -7878,32 +7876,32 @@ struct ggml_tensor * ggml_pool_1d( bool is_node = false; if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } const int64_t ne[3] = { - ggml_calc_pool_output_size(a->ne[0], k0, s0, p0), + bark_ggml_calc_pool_output_size(a->ne[0], k0, s0, p0), a->ne[1], }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, 2, ne); int32_t params[] = { op, k0, s0, p0 }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_POOL_1D; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_POOL_1D; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_pool_2d +// bark_ggml_pool_2d -struct ggml_tensor * ggml_pool_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, - enum ggml_op_pool op, +struct bark_ggml_tensor * bark_ggml_pool_2d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + enum bark_ggml_op_pool op, int k0, int k1, int s0, @@ -7914,70 +7912,70 @@ struct ggml_tensor * ggml_pool_2d( bool is_node = false; if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } const int64_t ne[3] = { - ggml_calc_pool_output_size(a->ne[0], k0, s0, p0), - ggml_calc_pool_output_size(a->ne[1], k1, s1, p1), + bark_ggml_calc_pool_output_size(a->ne[0], k0, s0, p0), + bark_ggml_calc_pool_output_size(a->ne[1], k1, s1, p1), a->ne[2], }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, 3, ne); int32_t params[] = { op, k0, k1, s0, s1, p0, p1 }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_POOL_2D; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_POOL_2D; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_upscale +// bark_ggml_upscale -static struct ggml_tensor * ggml_upscale_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, +static struct bark_ggml_tensor * bark_ggml_upscale_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int scale_factor) { bool is_node = false; if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } - struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, + struct bark_ggml_tensor * result = bark_ggml_new_tensor_4d(ctx, a->type, a->ne[0] * scale_factor, a->ne[1] * scale_factor, a->ne[2], a->ne[3]); - result->op = GGML_OP_UPSCALE; + result->op = BARK_GGML_OP_UPSCALE; result->op_params[0] = scale_factor; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = NULL; return result; } -struct ggml_tensor * ggml_upscale( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_upscale( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int scale_factor) { - return ggml_upscale_impl(ctx, a, scale_factor); + return bark_ggml_upscale_impl(ctx, a, scale_factor); } -// ggml_flash_attn +// bark_ggml_flash_attn -struct ggml_tensor * ggml_flash_attn( - struct ggml_context * ctx, - struct ggml_tensor * q, - struct ggml_tensor * k, - struct ggml_tensor * v, +struct bark_ggml_tensor * bark_ggml_flash_attn( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * q, + struct bark_ggml_tensor * k, + struct bark_ggml_tensor * v, bool masked) { - GGML_ASSERT(ggml_can_mul_mat(k, q)); + BARK_GGML_ASSERT(bark_ggml_can_mul_mat(k, q)); // TODO: check if vT can be multiplied by (k*qT) bool is_node = false; @@ -7986,14 +7984,14 @@ struct ggml_tensor * ggml_flash_attn( is_node = true; } - //struct ggml_tensor * result = ggml_dup_tensor(ctx, q); - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, q->n_dims, q->ne); + //struct bark_ggml_tensor * result = bark_ggml_dup_tensor(ctx, q); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, q->n_dims, q->ne); int32_t t = masked ? 1 : 0; - ggml_set_op_params(result, &t, sizeof(t)); + bark_ggml_set_op_params(result, &t, sizeof(t)); - result->op = GGML_OP_FLASH_ATTN; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_FLASH_ATTN; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = q; result->src[1] = k; result->src[2] = v; @@ -8001,16 +7999,16 @@ struct ggml_tensor * ggml_flash_attn( return result; } -// ggml_flash_ff +// bark_ggml_flash_ff -struct ggml_tensor * ggml_flash_ff( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b0, - struct ggml_tensor * b1, - struct ggml_tensor * c0, - struct ggml_tensor * c1) { - GGML_ASSERT(ggml_can_mul_mat(b0, a)); +struct bark_ggml_tensor * bark_ggml_flash_ff( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b0, + struct bark_ggml_tensor * b1, + struct bark_ggml_tensor * c0, + struct bark_ggml_tensor * c1) { + BARK_GGML_ASSERT(bark_ggml_can_mul_mat(b0, a)); // TODO: more checks bool is_node = false; @@ -8019,11 +8017,11 @@ struct ggml_tensor * ggml_flash_ff( is_node = true; } - //struct ggml_tensor * result = ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, a->ne); + //struct bark_ggml_tensor * result = bark_ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, a->n_dims, a->ne); - result->op = GGML_OP_FLASH_FF; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_FLASH_FF; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b0; result->src[2] = b1; @@ -8033,16 +8031,16 @@ struct ggml_tensor * ggml_flash_ff( return result; } -// ggml_flash_attn_back +// bark_ggml_flash_attn_back -struct ggml_tensor * ggml_flash_attn_back( - struct ggml_context * ctx, - struct ggml_tensor * q, - struct ggml_tensor * k, - struct ggml_tensor * v, - struct ggml_tensor * d, +struct bark_ggml_tensor * bark_ggml_flash_attn_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * q, + struct bark_ggml_tensor * k, + struct bark_ggml_tensor * v, + struct bark_ggml_tensor * d, bool masked) { - GGML_ASSERT(ggml_can_mul_mat(k, q)); + BARK_GGML_ASSERT(bark_ggml_can_mul_mat(k, q)); // TODO: check if vT can be multiplied by (k*qT) // d shape [D,N,ne2,ne3] @@ -8057,19 +8055,19 @@ struct ggml_tensor * ggml_flash_attn_back( const int64_t ne3 = q->ne[3]; const int64_t kvne2 = k->ne[2]; - GGML_ASSERT(k->ne[0] == D); - GGML_ASSERT(v->ne[0] == M); - GGML_ASSERT(v->ne[1] == D); - GGML_ASSERT(d->ne[0] == D); - GGML_ASSERT(d->ne[1] == N); - GGML_ASSERT(k->ne[2] == kvne2); - GGML_ASSERT(k->ne[3] == ne3); - GGML_ASSERT(v->ne[2] == kvne2); - GGML_ASSERT(v->ne[3] == ne3); - GGML_ASSERT(d->ne[2] == ne2); - GGML_ASSERT(d->ne[3] == ne3); + BARK_GGML_ASSERT(k->ne[0] == D); + BARK_GGML_ASSERT(v->ne[0] == M); + BARK_GGML_ASSERT(v->ne[1] == D); + BARK_GGML_ASSERT(d->ne[0] == D); + BARK_GGML_ASSERT(d->ne[1] == N); + BARK_GGML_ASSERT(k->ne[2] == kvne2); + BARK_GGML_ASSERT(k->ne[3] == ne3); + BARK_GGML_ASSERT(v->ne[2] == kvne2); + BARK_GGML_ASSERT(v->ne[3] == ne3); + BARK_GGML_ASSERT(d->ne[2] == ne2); + BARK_GGML_ASSERT(d->ne[3] == ne3); - GGML_ASSERT(ne2 % kvne2 == 0); + BARK_GGML_ASSERT(ne2 % kvne2 == 0); bool is_node = false; @@ -8081,28 +8079,28 @@ struct ggml_tensor * ggml_flash_attn_back( // store gradients of q, k and v as continuous tensors concatenated in result. // note: v and gradv are actually transposed, i.e. v->ne[0] != D. - const int64_t elem_q = ggml_nelements(q); - const int64_t elem_k = ggml_nelements(k); - const int64_t elem_v = ggml_nelements(v); + const int64_t elem_q = bark_ggml_nelements(q); + const int64_t elem_k = bark_ggml_nelements(k); + const int64_t elem_v = bark_ggml_nelements(v); - enum ggml_type result_type = GGML_TYPE_F32; - GGML_ASSERT(ggml_blck_size(result_type) == 1); - const size_t tsize = ggml_type_size(result_type); + enum bark_ggml_type result_type = BARK_GGML_TYPE_F32; + BARK_GGML_ASSERT(bark_ggml_blck_size(result_type) == 1); + const size_t tsize = bark_ggml_type_size(result_type); const size_t offs_q = 0; - const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN); - const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN); - const size_t end = offs_v + GGML_PAD(elem_v * tsize, GGML_MEM_ALIGN); + const size_t offs_k = offs_q + BARK_GGML_PAD(elem_q * tsize, BARK_GGML_MEM_ALIGN); + const size_t offs_v = offs_k + BARK_GGML_PAD(elem_k * tsize, BARK_GGML_MEM_ALIGN); + const size_t end = offs_v + BARK_GGML_PAD(elem_v * tsize, BARK_GGML_MEM_ALIGN); const size_t nelements = (end + tsize - 1)/tsize; - struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nelements); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, nelements); int32_t masked_i = masked ? 1 : 0; - ggml_set_op_params(result, &masked_i, sizeof(masked_i)); + bark_ggml_set_op_params(result, &masked_i, sizeof(masked_i)); - result->op = GGML_OP_FLASH_ATTN_BACK; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_FLASH_ATTN_BACK; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = q; result->src[1] = k; result->src[2] = v; @@ -8111,19 +8109,19 @@ struct ggml_tensor * ggml_flash_attn_back( return result; } -// ggml_win_part +// bark_ggml_win_part -struct ggml_tensor * ggml_win_part( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_win_part( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int w) { - GGML_ASSERT(a->ne[3] == 1); - GGML_ASSERT(a->type == GGML_TYPE_F32); + BARK_GGML_ASSERT(a->ne[3] == 1); + BARK_GGML_ASSERT(a->type == BARK_GGML_TYPE_F32); bool is_node = false; if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -8137,93 +8135,93 @@ struct ggml_tensor * ggml_win_part( const int64_t ne[4] = { a->ne[0], w, w, np, }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, 4, ne); int32_t params[] = { npx, npy, w }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_WIN_PART; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_WIN_PART; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_win_unpart +// bark_ggml_win_unpart -struct ggml_tensor * ggml_win_unpart( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_win_unpart( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int w0, int h0, int w) { - GGML_ASSERT(a->type == GGML_TYPE_F32); + BARK_GGML_ASSERT(a->type == BARK_GGML_TYPE_F32); bool is_node = false; if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } const int64_t ne[4] = { a->ne[0], w0, h0, 1, }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F32, 3, ne); int32_t params[] = { w }; - ggml_set_op_params(result, params, sizeof(params)); + bark_ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_WIN_UNPART; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_WIN_UNPART; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -// ggml_get_rel_pos +// bark_ggml_get_rel_pos -struct ggml_tensor * ggml_get_rel_pos( - struct ggml_context * ctx, - struct ggml_tensor * a, +struct bark_ggml_tensor * bark_ggml_get_rel_pos( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int qh, int kh) { - GGML_ASSERT(qh == kh); - GGML_ASSERT(2*MAX(qh, kh) - 1 == a->ne[1]); + BARK_GGML_ASSERT(qh == kh); + BARK_GGML_ASSERT(2*MAX(qh, kh) - 1 == a->ne[1]); bool is_node = false; if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward + BARK_GGML_ASSERT(false); // TODO: implement backward is_node = true; } const int64_t ne[4] = { a->ne[0], kh, qh, 1, }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 3, ne); + struct bark_ggml_tensor * result = bark_ggml_new_tensor(ctx, BARK_GGML_TYPE_F16, 3, ne); - result->op = GGML_OP_GET_REL_POS; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_GET_REL_POS; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = NULL; return result; } -// ggml_add_rel_pos +// bark_ggml_add_rel_pos -static struct ggml_tensor * ggml_add_rel_pos_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * pw, - struct ggml_tensor * ph, +static struct bark_ggml_tensor * bark_ggml_add_rel_pos_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * pw, + struct bark_ggml_tensor * ph, bool inplace) { - GGML_ASSERT(ggml_are_same_shape(pw, ph)); - GGML_ASSERT(ggml_is_contiguous(a)); - GGML_ASSERT(ggml_is_contiguous(pw)); - GGML_ASSERT(ggml_is_contiguous(ph)); - GGML_ASSERT(ph->type == GGML_TYPE_F32); - GGML_ASSERT(pw->type == GGML_TYPE_F32); - GGML_ASSERT(pw->ne[3] == a->ne[2]); - GGML_ASSERT(pw->ne[0]*pw->ne[0] == a->ne[0]); - GGML_ASSERT(pw->ne[1]*pw->ne[2] == a->ne[1]); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(pw, ph)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(a)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(pw)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(ph)); + BARK_GGML_ASSERT(ph->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT(pw->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT(pw->ne[3] == a->ne[2]); + BARK_GGML_ASSERT(pw->ne[0]*pw->ne[0] == a->ne[0]); + BARK_GGML_ASSERT(pw->ne[1]*pw->ne[2] == a->ne[1]); bool is_node = false; @@ -8231,11 +8229,11 @@ static struct ggml_tensor * ggml_add_rel_pos_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - ggml_set_op_params_i32(result, 0, inplace ? 1 : 0); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); + bark_ggml_set_op_params_i32(result, 0, inplace ? 1 : 0); - result->op = GGML_OP_ADD_REL_POS; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_ADD_REL_POS; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = pw; result->src[2] = ph; @@ -8243,28 +8241,28 @@ static struct ggml_tensor * ggml_add_rel_pos_impl( return result; } -struct ggml_tensor * ggml_add_rel_pos( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * pw, - struct ggml_tensor * ph) { - return ggml_add_rel_pos_impl(ctx, a, pw, ph, false); +struct bark_ggml_tensor * bark_ggml_add_rel_pos( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * pw, + struct bark_ggml_tensor * ph) { + return bark_ggml_add_rel_pos_impl(ctx, a, pw, ph, false); } -struct ggml_tensor * ggml_add_rel_pos_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * pw, - struct ggml_tensor * ph) { - return ggml_add_rel_pos_impl(ctx, a, pw, ph, true); +struct bark_ggml_tensor * bark_ggml_add_rel_pos_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * pw, + struct bark_ggml_tensor * ph) { + return bark_ggml_add_rel_pos_impl(ctx, a, pw, ph, true); } // gmml_unary -static struct ggml_tensor * ggml_unary_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - enum ggml_unary_op op, +static struct bark_ggml_tensor * bark_ggml_unary_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + enum bark_ggml_unary_op op, bool inplace) { bool is_node = false; @@ -8272,37 +8270,37 @@ static struct ggml_tensor * ggml_unary_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - ggml_set_op_params_i32(result, 0, (int32_t) op); + bark_ggml_set_op_params_i32(result, 0, (int32_t) op); - result->op = GGML_OP_UNARY; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_UNARY; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_unary( - struct ggml_context * ctx, - struct ggml_tensor * a, - enum ggml_unary_op op) { - return ggml_unary_impl(ctx, a, op, false); +struct bark_ggml_tensor * bark_ggml_unary( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + enum bark_ggml_unary_op op) { + return bark_ggml_unary_impl(ctx, a, op, false); } -struct ggml_tensor * ggml_unary_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - enum ggml_unary_op op) { - return ggml_unary_impl(ctx, a, op, true); +struct bark_ggml_tensor * bark_ggml_unary_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + enum bark_ggml_unary_op op) { + return bark_ggml_unary_impl(ctx, a, op, true); } -// ggml_map_unary +// bark_ggml_map_unary -static struct ggml_tensor * ggml_map_unary_impl_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_unary_op_f32_t fun, +static struct bark_ggml_tensor * bark_ggml_map_unary_impl_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + const bark_ggml_unary_op_f32_t fun, bool inplace) { bool is_node = false; @@ -8310,40 +8308,40 @@ static struct ggml_tensor * ggml_map_unary_impl_f32( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - ggml_set_op_params(result, (const void *) &fun, sizeof(fun)); + bark_ggml_set_op_params(result, (const void *) &fun, sizeof(fun)); - result->op = GGML_OP_MAP_UNARY; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_MAP_UNARY; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_map_unary_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_unary_op_f32_t fun) { - return ggml_map_unary_impl_f32(ctx, a, fun, false); +struct bark_ggml_tensor * bark_ggml_map_unary_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + const bark_ggml_unary_op_f32_t fun) { + return bark_ggml_map_unary_impl_f32(ctx, a, fun, false); } -struct ggml_tensor * ggml_map_unary_inplace_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_unary_op_f32_t fun) { - return ggml_map_unary_impl_f32(ctx, a, fun, true); +struct bark_ggml_tensor * bark_ggml_map_unary_inplace_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + const bark_ggml_unary_op_f32_t fun) { + return bark_ggml_map_unary_impl_f32(ctx, a, fun, true); } -// ggml_map_binary +// bark_ggml_map_binary -static struct ggml_tensor * ggml_map_binary_impl_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_binary_op_f32_t fun, +static struct bark_ggml_tensor * bark_ggml_map_binary_impl_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + const bark_ggml_binary_op_f32_t fun, bool inplace) { - GGML_ASSERT(ggml_are_same_shape(a, b)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(a, b)); bool is_node = false; @@ -8351,40 +8349,40 @@ static struct ggml_tensor * ggml_map_binary_impl_f32( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - ggml_set_op_params(result, (const void *) &fun, sizeof(fun)); + bark_ggml_set_op_params(result, (const void *) &fun, sizeof(fun)); - result->op = GGML_OP_MAP_BINARY; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_MAP_BINARY; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_map_binary_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_binary_op_f32_t fun) { - return ggml_map_binary_impl_f32(ctx, a, b, fun, false); +struct bark_ggml_tensor * bark_ggml_map_binary_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + const bark_ggml_binary_op_f32_t fun) { + return bark_ggml_map_binary_impl_f32(ctx, a, b, fun, false); } -struct ggml_tensor * ggml_map_binary_inplace_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_binary_op_f32_t fun) { - return ggml_map_binary_impl_f32(ctx, a, b, fun, true); +struct bark_ggml_tensor * bark_ggml_map_binary_inplace_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + const bark_ggml_binary_op_f32_t fun) { + return bark_ggml_map_binary_impl_f32(ctx, a, b, fun, true); } -// ggml_map_custom1_f32 +// bark_ggml_map_custom1_f32 -static struct ggml_tensor * ggml_map_custom1_impl_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_custom1_op_f32_t fun, +static struct bark_ggml_tensor * bark_ggml_map_custom1_impl_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + const bark_ggml_custom1_op_f32_t fun, bool inplace) { bool is_node = false; @@ -8392,38 +8390,38 @@ static struct ggml_tensor * ggml_map_custom1_impl_f32( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - ggml_set_op_params(result, (const void *) &fun, sizeof(fun)); + bark_ggml_set_op_params(result, (const void *) &fun, sizeof(fun)); - result->op = GGML_OP_MAP_CUSTOM1_F32; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_MAP_CUSTOM1_F32; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_map_custom1_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_custom1_op_f32_t fun) { - return ggml_map_custom1_impl_f32(ctx, a, fun, false); +struct bark_ggml_tensor * bark_ggml_map_custom1_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + const bark_ggml_custom1_op_f32_t fun) { + return bark_ggml_map_custom1_impl_f32(ctx, a, fun, false); } -struct ggml_tensor * ggml_map_custom1_inplace_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_custom1_op_f32_t fun) { - return ggml_map_custom1_impl_f32(ctx, a, fun, true); +struct bark_ggml_tensor * bark_ggml_map_custom1_inplace_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + const bark_ggml_custom1_op_f32_t fun) { + return bark_ggml_map_custom1_impl_f32(ctx, a, fun, true); } -// ggml_map_custom2_f32 +// bark_ggml_map_custom2_f32 -static struct ggml_tensor * ggml_map_custom2_impl_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_custom2_op_f32_t fun, +static struct bark_ggml_tensor * bark_ggml_map_custom2_impl_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + const bark_ggml_custom2_op_f32_t fun, bool inplace) { bool is_node = false; @@ -8431,42 +8429,42 @@ static struct ggml_tensor * ggml_map_custom2_impl_f32( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - ggml_set_op_params(result, (const void *) &fun, sizeof(fun)); + bark_ggml_set_op_params(result, (const void *) &fun, sizeof(fun)); - result->op = GGML_OP_MAP_CUSTOM2_F32; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_MAP_CUSTOM2_F32; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_map_custom2_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_custom2_op_f32_t fun) { - return ggml_map_custom2_impl_f32(ctx, a, b, fun, false); +struct bark_ggml_tensor * bark_ggml_map_custom2_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + const bark_ggml_custom2_op_f32_t fun) { + return bark_ggml_map_custom2_impl_f32(ctx, a, b, fun, false); } -struct ggml_tensor * ggml_map_custom2_inplace_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_custom2_op_f32_t fun) { - return ggml_map_custom2_impl_f32(ctx, a, b, fun, true); +struct bark_ggml_tensor * bark_ggml_map_custom2_inplace_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + const bark_ggml_custom2_op_f32_t fun) { + return bark_ggml_map_custom2_impl_f32(ctx, a, b, fun, true); } -// ggml_map_custom3_f32 +// bark_ggml_map_custom3_f32 -static struct ggml_tensor * ggml_map_custom3_impl_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c, - const ggml_custom3_op_f32_t fun, +static struct bark_ggml_tensor * bark_ggml_map_custom3_impl_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c, + const bark_ggml_custom3_op_f32_t fun, bool inplace) { bool is_node = false; @@ -8474,12 +8472,12 @@ static struct ggml_tensor * ggml_map_custom3_impl_f32( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - ggml_set_op_params(result, (const void *) &fun, sizeof(fun)); + bark_ggml_set_op_params(result, (const void *) &fun, sizeof(fun)); - result->op = GGML_OP_MAP_CUSTOM3_F32; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_MAP_CUSTOM3_F32; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; result->src[2] = c; @@ -8487,39 +8485,39 @@ static struct ggml_tensor * ggml_map_custom3_impl_f32( return result; } -struct ggml_tensor * ggml_map_custom3_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c, - const ggml_custom3_op_f32_t fun) { - return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false); +struct bark_ggml_tensor * bark_ggml_map_custom3_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c, + const bark_ggml_custom3_op_f32_t fun) { + return bark_ggml_map_custom3_impl_f32(ctx, a, b, c, fun, false); } -struct ggml_tensor * ggml_map_custom3_inplace_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c, - const ggml_custom3_op_f32_t fun) { - return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true); +struct bark_ggml_tensor * bark_ggml_map_custom3_inplace_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c, + const bark_ggml_custom3_op_f32_t fun) { + return bark_ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true); } -// ggml_map_custom1 -struct ggml_map_custom1_op_params { - ggml_custom1_op_t fun; +// bark_ggml_map_custom1 +struct bark_ggml_map_custom1_op_params { + bark_ggml_custom1_op_t fun; int n_tasks; void * userdata; }; -static struct ggml_tensor * ggml_map_custom1_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_custom1_op_t fun, +static struct bark_ggml_tensor * bark_ggml_map_custom1_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + const bark_ggml_custom1_op_t fun, int n_tasks, void * userdata, bool inplace) { - GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0); + BARK_GGML_ASSERT(n_tasks == BARK_GGML_N_TASKS_MAX || n_tasks > 0); bool is_node = false; @@ -8527,57 +8525,57 @@ static struct ggml_tensor * ggml_map_custom1_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - struct ggml_map_custom1_op_params params = { + struct bark_ggml_map_custom1_op_params params = { /*.fun =*/ fun, /*.n_tasks =*/ n_tasks, /*.userdata =*/ userdata }; - ggml_set_op_params(result, (const void *) ¶ms, sizeof(params)); + bark_ggml_set_op_params(result, (const void *) ¶ms, sizeof(params)); - result->op = GGML_OP_MAP_CUSTOM1; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_MAP_CUSTOM1; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } -struct ggml_tensor * ggml_map_custom1( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_custom1_op_t fun, +struct bark_ggml_tensor * bark_ggml_map_custom1( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + const bark_ggml_custom1_op_t fun, int n_tasks, void * userdata) { - return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, false); + return bark_ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, false); } -struct ggml_tensor * ggml_map_custom1_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_custom1_op_t fun, +struct bark_ggml_tensor * bark_ggml_map_custom1_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + const bark_ggml_custom1_op_t fun, int n_tasks, void * userdata) { - return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, true); + return bark_ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, true); } -// ggml_map_custom2 +// bark_ggml_map_custom2 -struct ggml_map_custom2_op_params { - ggml_custom2_op_t fun; +struct bark_ggml_map_custom2_op_params { + bark_ggml_custom2_op_t fun; int n_tasks; void * userdata; }; -static struct ggml_tensor * ggml_map_custom2_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_custom2_op_t fun, +static struct bark_ggml_tensor * bark_ggml_map_custom2_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + const bark_ggml_custom2_op_t fun, int n_tasks, void * userdata, bool inplace) { - GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0); + BARK_GGML_ASSERT(n_tasks == BARK_GGML_N_TASKS_MAX || n_tasks > 0); bool is_node = false; @@ -8585,61 +8583,61 @@ static struct ggml_tensor * ggml_map_custom2_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - struct ggml_map_custom2_op_params params = { + struct bark_ggml_map_custom2_op_params params = { /*.fun =*/ fun, /*.n_tasks =*/ n_tasks, /*.userdata =*/ userdata }; - ggml_set_op_params(result, (const void *) ¶ms, sizeof(params)); + bark_ggml_set_op_params(result, (const void *) ¶ms, sizeof(params)); - result->op = GGML_OP_MAP_CUSTOM2; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_MAP_CUSTOM2; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_map_custom2( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_custom2_op_t fun, +struct bark_ggml_tensor * bark_ggml_map_custom2( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + const bark_ggml_custom2_op_t fun, int n_tasks, void * userdata) { - return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, false); + return bark_ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, false); } -struct ggml_tensor * ggml_map_custom2_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_custom2_op_t fun, +struct bark_ggml_tensor * bark_ggml_map_custom2_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + const bark_ggml_custom2_op_t fun, int n_tasks, void * userdata) { - return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, true); + return bark_ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, true); } -// ggml_map_custom3 +// bark_ggml_map_custom3 -struct ggml_map_custom3_op_params { - ggml_custom3_op_t fun; +struct bark_ggml_map_custom3_op_params { + bark_ggml_custom3_op_t fun; int n_tasks; void * userdata; }; -static struct ggml_tensor * ggml_map_custom3_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c, - const ggml_custom3_op_t fun, +static struct bark_ggml_tensor * bark_ggml_map_custom3_impl( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c, + const bark_ggml_custom3_op_t fun, int n_tasks, void * userdata, bool inplace) { - GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0); + BARK_GGML_ASSERT(n_tasks == BARK_GGML_N_TASKS_MAX || n_tasks > 0); bool is_node = false; @@ -8647,17 +8645,17 @@ static struct ggml_tensor * ggml_map_custom3_impl( is_node = true; } - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = inplace ? bark_ggml_view_tensor(ctx, a) : bark_ggml_dup_tensor(ctx, a); - struct ggml_map_custom3_op_params params = { + struct bark_ggml_map_custom3_op_params params = { /*.fun =*/ fun, /*.n_tasks =*/ n_tasks, /*.userdata =*/ userdata }; - ggml_set_op_params(result, (const void *) ¶ms, sizeof(params)); + bark_ggml_set_op_params(result, (const void *) ¶ms, sizeof(params)); - result->op = GGML_OP_MAP_CUSTOM3; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_MAP_CUSTOM3; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; result->src[2] = c; @@ -8665,64 +8663,64 @@ static struct ggml_tensor * ggml_map_custom3_impl( return result; } -struct ggml_tensor * ggml_map_custom3( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c, - const ggml_custom3_op_t fun, +struct bark_ggml_tensor * bark_ggml_map_custom3( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c, + const bark_ggml_custom3_op_t fun, int n_tasks, void * userdata) { - return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, false); + return bark_ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, false); } -struct ggml_tensor * ggml_map_custom3_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c, - const ggml_custom3_op_t fun, +struct bark_ggml_tensor * bark_ggml_map_custom3_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c, + const bark_ggml_custom3_op_t fun, int n_tasks, void * userdata) { - return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true); + return bark_ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true); } -// ggml_cross_entropy_loss +// bark_ggml_cross_entropy_loss -struct ggml_tensor * ggml_cross_entropy_loss( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_are_same_shape(a, b)); +struct bark_ggml_tensor * bark_ggml_cross_entropy_loss( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(a, b)); bool is_node = false; if (a->grad || b->grad) { is_node = true; } - struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1); + struct bark_ggml_tensor * result = bark_ggml_new_tensor_1d(ctx, a->type, 1); - result->op = GGML_OP_CROSS_ENTROPY_LOSS; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->op = BARK_GGML_OP_CROSS_ENTROPY_LOSS; + result->grad = is_node ? bark_ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; } -// ggml_cross_entropy_loss_back +// bark_ggml_cross_entropy_loss_back -struct ggml_tensor * ggml_cross_entropy_loss_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c) { - GGML_ASSERT(ggml_are_same_shape(a, b)); - GGML_ASSERT(ggml_is_scalar(c)); +struct bark_ggml_tensor * bark_ggml_cross_entropy_loss_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(a, b)); + BARK_GGML_ASSERT(bark_ggml_is_scalar(c)); - struct ggml_tensor * result = ggml_dup_tensor(ctx, a); + struct bark_ggml_tensor * result = bark_ggml_dup_tensor(ctx, a); - result->op = GGML_OP_CROSS_ENTROPY_LOSS_BACK; + result->op = BARK_GGML_OP_CROSS_ENTROPY_LOSS_BACK; result->grad = NULL; result->src[0] = a; result->src[1] = b; @@ -8733,26 +8731,26 @@ struct ggml_tensor * ggml_cross_entropy_loss_back( //////////////////////////////////////////////////////////////////////////////// -void ggml_set_param( - struct ggml_context * ctx, - struct ggml_tensor * tensor) { +void bark_ggml_set_param( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * tensor) { tensor->is_param = true; - GGML_ASSERT(tensor->grad == NULL); - tensor->grad = ggml_dup_tensor(ctx, tensor); + BARK_GGML_ASSERT(tensor->grad == NULL); + tensor->grad = bark_ggml_dup_tensor(ctx, tensor); } -// ggml_compute_forward_dup +// bark_ggml_compute_forward_dup -static void ggml_compute_forward_dup_same_cont( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - GGML_ASSERT(src0->type == dst->type); +static void bark_ggml_compute_forward_dup_same_cont( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_nelements(dst) == bark_ggml_nelements(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(dst) && bark_ggml_is_contiguous(src0)); + BARK_GGML_ASSERT(src0->type == dst->type); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -8763,7 +8761,7 @@ static void ggml_compute_forward_dup_same_cont( const int nth = params->nth; // number of threads // parallelize by elements - const int ne = ggml_nelements(dst); + const int ne = bark_ggml_nelements(dst); const int dr = (ne + nth - 1) / nth; const int ie0 = dr * ith; const int ie1 = MIN(ie0 + dr, ne); @@ -8772,27 +8770,27 @@ static void ggml_compute_forward_dup_same_cont( memcpy( ((char *) dst->data + ie0*nb0), ((char *) src0->data + ie0*nb00), - (ie1 - ie0) * ggml_type_size(src0->type)); + (ie1 - ie0) * bark_ggml_type_size(src0->type)); } } -static void ggml_compute_forward_dup_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); +static void bark_ggml_compute_forward_dup_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_nelements(dst) == bark_ggml_nelements(src0)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS const int ith = params->ith; // thread index const int nth = params->nth; // number of threads - if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) { - ggml_compute_forward_dup_same_cont(params, src0, dst); + if (bark_ggml_is_contiguous(src0) && bark_ggml_is_contiguous(dst) && src0->type == dst->type) { + bark_ggml_compute_forward_dup_same_cont(params, src0, dst); return; } @@ -8806,7 +8804,7 @@ static void ggml_compute_forward_dup_f16( if (src0->type == dst->type && ne00 == ne0 && - nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) { + nb00 == bark_ggml_type_size(src0->type) && nb0 == bark_ggml_type_size(dst->type)) { // copy by rows const size_t rs = ne00*nb00; for (int64_t i03 = 0; i03 < ne03; i03++) { @@ -8824,9 +8822,9 @@ static void ggml_compute_forward_dup_f16( // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy - if (ggml_is_contiguous(dst)) { - if (nb00 == sizeof(ggml_fp16_t)) { - if (dst->type == GGML_TYPE_F16) { + if (bark_ggml_is_contiguous(dst)) { + if (nb00 == sizeof(bark_ggml_fp16_t)) { + if (dst->type == BARK_GGML_TYPE_F16) { size_t id = 0; const size_t rs = ne00 * nb00; char * dst_ptr = (char *) dst->data; @@ -8842,7 +8840,7 @@ static void ggml_compute_forward_dup_f16( id += rs * (ne01 - ir1); } } - } else if (dst->type == GGML_TYPE_F32) { + } else if (dst->type == BARK_GGML_TYPE_F32) { size_t id = 0; float * dst_ptr = (float *) dst->data; @@ -8850,9 +8848,9 @@ static void ggml_compute_forward_dup_f16( for (int i02 = 0; i02 < ne02; i02++) { id += ne00 * ir0; for (int i01 = ir0; i01 < ir1; i01++) { - const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); + const bark_ggml_fp16_t * src0_ptr = (bark_ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); for (int i00 = 0; i00 < ne00; i00++) { - dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]); + dst_ptr[id] = BARK_GGML_FP16_TO_FP32(src0_ptr[i00]); id++; } } @@ -8860,21 +8858,21 @@ static void ggml_compute_forward_dup_f16( } } } else if (type_traits[dst->type].from_float) { - ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float; + bark_ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float; float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; size_t id = 0; - size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type)); + size_t rs = nb0 * (ne00 / bark_ggml_blck_size(dst->type)); char * dst_ptr = (char *) dst->data; for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { id += rs * ir0; for (int i01 = ir0; i01 < ir1; i01++) { - const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); + const bark_ggml_fp16_t * src0_ptr = (bark_ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); for (int i00 = 0; i00 < ne00; i00++) { - src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]); + src0_f32[i00] = BARK_GGML_FP16_TO_FP32(src0_ptr[i00]); } quantize_row_q(src0_f32, dst_ptr + id, ne00); @@ -8884,12 +8882,12 @@ static void ggml_compute_forward_dup_f16( } } } else { - GGML_ASSERT(false); // TODO: implement + BARK_GGML_ASSERT(false); // TODO: implement } } else { //printf("%s: this is not optimal - fix me\n", __func__); - if (dst->type == GGML_TYPE_F32) { + if (dst->type == BARK_GGML_TYPE_F32) { size_t id = 0; float * dst_ptr = (float *) dst->data; @@ -8898,25 +8896,25 @@ static void ggml_compute_forward_dup_f16( id += ne00 * ir0; for (int i01 = ir0; i01 < ir1; i01++) { for (int i00 = 0; i00 < ne00; i00++) { - const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + const bark_ggml_fp16_t * src0_ptr = (bark_ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr); + dst_ptr[id] = BARK_GGML_FP16_TO_FP32(*src0_ptr); id++; } } id += ne00 * (ne01 - ir1); } } - } else if (dst->type == GGML_TYPE_F16) { + } else if (dst->type == BARK_GGML_TYPE_F16) { size_t id = 0; - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; + bark_ggml_fp16_t * dst_ptr = (bark_ggml_fp16_t *) dst->data; for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { id += ne00 * ir0; for (int i01 = ir0; i01 < ir1; i01++) { for (int i00 = 0; i00 < ne00; i00++) { - const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); + const bark_ggml_fp16_t * src0_ptr = (bark_ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); dst_ptr[id] = *src0_ptr; id++; @@ -8926,7 +8924,7 @@ static void ggml_compute_forward_dup_f16( } } } else { - GGML_ASSERT(false); // TODO: implement + BARK_GGML_ASSERT(false); // TODO: implement } } return; @@ -8938,7 +8936,7 @@ static void ggml_compute_forward_dup_f16( int64_t i12 = 0; int64_t i13 = 0; - if (dst->type == GGML_TYPE_F16) { + if (dst->type == BARK_GGML_TYPE_F16) { for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { i10 += ne00 * ir0; @@ -8959,7 +8957,7 @@ static void ggml_compute_forward_dup_f16( const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t)); + memcpy(dst_ptr, src0_ptr, sizeof(bark_ggml_fp16_t)); if (++i10 == ne00) { i10 = 0; @@ -8990,7 +8988,7 @@ static void ggml_compute_forward_dup_f16( } } } - } else if (dst->type == GGML_TYPE_F32) { + } else if (dst->type == BARK_GGML_TYPE_F32) { for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { i10 += ne00 * ir0; @@ -9011,7 +9009,7 @@ static void ggml_compute_forward_dup_f16( const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr); + *(float *) dst_ptr = BARK_GGML_FP16_TO_FP32(*(const bark_ggml_fp16_t *) src0_ptr); if (++i10 == ne0) { i10 = 0; @@ -9043,27 +9041,27 @@ static void ggml_compute_forward_dup_f16( } } } else { - GGML_ASSERT(false); // TODO: implement + BARK_GGML_ASSERT(false); // TODO: implement } } -static void ggml_compute_forward_dup_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); +static void bark_ggml_compute_forward_dup_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_nelements(dst) == bark_ggml_nelements(src0)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS const int ith = params->ith; // thread index const int nth = params->nth; // number of threads - if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) { - ggml_compute_forward_dup_same_cont(params, src0, dst); + if (bark_ggml_is_contiguous(src0) && bark_ggml_is_contiguous(dst) && src0->type == dst->type) { + bark_ggml_compute_forward_dup_same_cont(params, src0, dst); return; } @@ -9077,7 +9075,7 @@ static void ggml_compute_forward_dup_f32( if (src0->type == dst->type && ne00 == ne0 && - nb00 == ggml_type_size(src0->type) && nb0 == ggml_type_size(dst->type)) { + nb00 == bark_ggml_type_size(src0->type) && nb0 == bark_ggml_type_size(dst->type)) { // copy by rows const size_t rs = ne00*nb00; for (int64_t i03 = 0; i03 < ne03; i03++) { @@ -9093,10 +9091,10 @@ static void ggml_compute_forward_dup_f32( return; } - if (ggml_is_contiguous(dst)) { + if (bark_ggml_is_contiguous(dst)) { // TODO: simplify if (nb00 == sizeof(float)) { - if (dst->type == GGML_TYPE_F32) { + if (dst->type == BARK_GGML_TYPE_F32) { size_t id = 0; const size_t rs = ne00 * nb00; char * dst_ptr = (char *) dst->data; @@ -9113,10 +9111,10 @@ static void ggml_compute_forward_dup_f32( } } } else if (type_traits[dst->type].from_float) { - ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float; + bark_ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float; size_t id = 0; - size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type)); + size_t rs = nb0 * (ne00 / bark_ggml_blck_size(dst->type)); char * dst_ptr = (char *) dst->data; for (int i03 = 0; i03 < ne03; i03++) { @@ -9131,12 +9129,12 @@ static void ggml_compute_forward_dup_f32( } } } else { - GGML_ASSERT(false); // TODO: implement + BARK_GGML_ASSERT(false); // TODO: implement } } else { //printf("%s: this is not optimal - fix me\n", __func__); - if (dst->type == GGML_TYPE_F32) { + if (dst->type == BARK_GGML_TYPE_F32) { size_t id = 0; float * dst_ptr = (float *) dst->data; @@ -9154,9 +9152,9 @@ static void ggml_compute_forward_dup_f32( id += ne00 * (ne01 - ir1); } } - } else if (dst->type == GGML_TYPE_F16) { + } else if (dst->type == BARK_GGML_TYPE_F16) { size_t id = 0; - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; + bark_ggml_fp16_t * dst_ptr = (bark_ggml_fp16_t *) dst->data; for (int i03 = 0; i03 < ne03; i03++) { for (int i02 = 0; i02 < ne02; i02++) { @@ -9165,7 +9163,7 @@ static void ggml_compute_forward_dup_f32( for (int i00 = 0; i00 < ne00; i00++) { const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr); + dst_ptr[id] = BARK_GGML_FP32_TO_FP16(*src0_ptr); id++; } } @@ -9173,7 +9171,7 @@ static void ggml_compute_forward_dup_f32( } } } else { - GGML_ASSERT(false); // TODO: implement + BARK_GGML_ASSERT(false); // TODO: implement } } @@ -9187,7 +9185,7 @@ static void ggml_compute_forward_dup_f32( int64_t i12 = 0; int64_t i13 = 0; - if (dst->type == GGML_TYPE_F32) { + if (dst->type == BARK_GGML_TYPE_F32) { for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { i10 += ne00 * ir0; @@ -9239,7 +9237,7 @@ static void ggml_compute_forward_dup_f32( } } } - } else if (dst->type == GGML_TYPE_F16) { + } else if (dst->type == BARK_GGML_TYPE_F16) { for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { i10 += ne00 * ir0; @@ -9260,7 +9258,7 @@ static void ggml_compute_forward_dup_f32( const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr); + *(bark_ggml_fp16_t *) dst_ptr = BARK_GGML_FP32_TO_FP16(*(const float *) src0_ptr); if (++i10 == ne0) { i10 = 0; @@ -9292,56 +9290,56 @@ static void ggml_compute_forward_dup_f32( } } } else { - GGML_ASSERT(false); // TODO: implement + BARK_GGML_ASSERT(false); // TODO: implement } } -static void ggml_compute_forward_dup( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) { - ggml_compute_forward_dup_same_cont(params, src0, dst); +static void bark_ggml_compute_forward_dup( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + if (bark_ggml_is_contiguous(src0) && bark_ggml_is_contiguous(dst) && src0->type == dst->type) { + bark_ggml_compute_forward_dup_same_cont(params, src0, dst); return; } switch (src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_dup_f16(params, src0, dst); + bark_ggml_compute_forward_dup_f16(params, src0, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_dup_f32(params, src0, dst); + bark_ggml_compute_forward_dup_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_add +// bark_ggml_compute_forward_add -static void ggml_compute_forward_add_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_can_repeat_rows(src1, src0) && ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_add_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_can_repeat_rows(src1, src0) && bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS - GGML_ASSERT( nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT( nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -9365,10 +9363,10 @@ static void ggml_compute_forward_add_f32( float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01); float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11); -#ifdef GGML_USE_ACCELERATE +#ifdef BARK_GGML_USE_ACCELERATE vDSP_vadd(src0_ptr, 1, src1_ptr, 1, dst_ptr, 1, ne00); #else - ggml_vec_add_f32(ne00, dst_ptr, src0_ptr, src1_ptr); + bark_ggml_vec_add_f32(ne00, dst_ptr, src0_ptr, src1_ptr); #endif } } else { @@ -9395,30 +9393,30 @@ static void ggml_compute_forward_add_f32( } } -static void ggml_compute_forward_add_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_add_f16_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, src1) && bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT(dst->type == BARK_GGML_TYPE_F16); - GGML_ASSERT( nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + BARK_GGML_ASSERT( nb0 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -9434,45 +9432,45 @@ static void ggml_compute_forward_add_f16_f32( const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); + bark_ggml_fp16_t * dst_ptr = (bark_ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); + bark_ggml_fp16_t * src0_ptr = (bark_ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11); for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]); + dst_ptr[i] = BARK_GGML_FP32_TO_FP16(BARK_GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]); } } } else { // src1 is not contiguous - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } } -static void ggml_compute_forward_add_f16_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_add_f16_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, src1) && bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F16); + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(dst->type == BARK_GGML_TYPE_F16); - GGML_ASSERT( nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + BARK_GGML_ASSERT( nb0 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -9481,62 +9479,62 @@ static void ggml_compute_forward_add_f16_f16( const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); - if (nb10 == sizeof(ggml_fp16_t)) { + if (nb10 == sizeof(bark_ggml_fp16_t)) { for (int ir = ir0; ir < ir1; ++ir) { // src0, src1 and dst are same shape => same indices const int i3 = ir/(ne2*ne1); const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); - ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11); + bark_ggml_fp16_t * dst_ptr = (bark_ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); + bark_ggml_fp16_t * src0_ptr = (bark_ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); + bark_ggml_fp16_t * src1_ptr = (bark_ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11); for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i])); + dst_ptr[i] = BARK_GGML_FP32_TO_FP16(BARK_GGML_FP16_TO_FP32(src0_ptr[i]) + BARK_GGML_FP16_TO_FP32(src1_ptr[i])); } } } else { // src1 is not contiguous - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } } -static void ggml_compute_forward_add_q_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_add_q_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, src1) && bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; - const enum ggml_type type = src0->type; - const enum ggml_type dtype = dst->type; - ggml_to_float_t const dequantize_row_q = type_traits[type].to_float; - ggml_from_float_t const quantize_row_q = type_traits[dtype].from_float; + const enum bark_ggml_type type = src0->type; + const enum bark_ggml_type dtype = dst->type; + bark_ggml_to_float_t const dequantize_row_q = type_traits[type].to_float; + bark_ggml_from_float_t const quantize_row_q = type_traits[dtype].from_float; // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == ggml_type_size(type)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == bark_ggml_type_size(type)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); // dst cannot be transposed or permuted - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); + BARK_GGML_ASSERT(nb0 <= nb1); + BARK_GGML_ASSERT(nb1 <= nb2); + BARK_GGML_ASSERT(nb2 <= nb3); - GGML_ASSERT(ggml_is_quantized(src0->type)); - GGML_ASSERT(src1->type == GGML_TYPE_F32); + BARK_GGML_ASSERT(bark_ggml_is_quantized(src0->type)); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -9571,7 +9569,7 @@ static void ggml_compute_forward_add_q_f32( // unquantize row from src0 to temp buffer dequantize_row_q(src0_row, wdata, ne00); // add src1 - ggml_vec_acc_f32(ne00, wdata, src1_row); + bark_ggml_vec_acc_f32(ne00, wdata, src1_row); // quantize row to dst if (quantize_row_q != NULL) { quantize_row_q(wdata, dst_row, ne00); @@ -9581,71 +9579,71 @@ static void ggml_compute_forward_add_q_f32( } } -static void ggml_compute_forward_add( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_add( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_add_f32(params, src0, src1, dst); + bark_ggml_compute_forward_add_f32(params, src0, src1, dst); } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - if (src1->type == GGML_TYPE_F16) { - ggml_compute_forward_add_f16_f16(params, src0, src1, dst); + if (src1->type == BARK_GGML_TYPE_F16) { + bark_ggml_compute_forward_add_f16_f16(params, src0, src1, dst); } - else if (src1->type == GGML_TYPE_F32) { - ggml_compute_forward_add_f16_f32(params, src0, src1, dst); + else if (src1->type == BARK_GGML_TYPE_F32) { + bark_ggml_compute_forward_add_f16_f32(params, src0, src1, dst); } else { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } } break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q6_K: { - ggml_compute_forward_add_q_f32(params, src0, src1, dst); + bark_ggml_compute_forward_add_q_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_add1 +// bark_ggml_compute_forward_add1 -static void ggml_compute_forward_add1_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); +static void bark_ggml_compute_forward_add1_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); + BARK_GGML_ASSERT(bark_ggml_is_scalar(src1)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS - GGML_ASSERT( nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT( nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -9660,8 +9658,8 @@ static void ggml_compute_forward_add1_f32( const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); -#ifdef GGML_USE_ACCELERATE - UNUSED(ggml_vec_add1_f32); +#ifdef BARK_GGML_USE_ACCELERATE + UNUSED(bark_ggml_vec_add1_f32); vDSP_vadd( (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1, @@ -9669,7 +9667,7 @@ static void ggml_compute_forward_add1_f32( (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1, ne0); #else - ggml_vec_add1_f32(ne0, + bark_ggml_vec_add1_f32(ne0, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), *(float *) src1->data); @@ -9677,15 +9675,15 @@ static void ggml_compute_forward_add1_f32( } } -static void ggml_compute_forward_add1_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); +static void bark_ggml_compute_forward_add1_f16_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); + BARK_GGML_ASSERT(bark_ggml_is_scalar(src1)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -9695,16 +9693,16 @@ static void ggml_compute_forward_add1_f16_f32( const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT(dst->type == BARK_GGML_TYPE_F16); - GGML_ASSERT( nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + BARK_GGML_ASSERT( nb0 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -9719,42 +9717,42 @@ static void ggml_compute_forward_add1_f16_f32( const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); + bark_ggml_fp16_t * dst_ptr = (bark_ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); + bark_ggml_fp16_t * src0_ptr = (bark_ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v); + dst_ptr[i] = BARK_GGML_FP32_TO_FP16(BARK_GGML_FP16_TO_FP32(src0_ptr[i]) + v); } } } -static void ggml_compute_forward_add1_f16_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); +static void bark_ggml_compute_forward_add1_f16_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); + BARK_GGML_ASSERT(bark_ggml_is_scalar(src1)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } // scalar to add - const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data); + const float v = BARK_GGML_FP16_TO_FP32(*(bark_ggml_fp16_t *) src1->data); const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F16); + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(dst->type == BARK_GGML_TYPE_F16); - GGML_ASSERT( nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + BARK_GGML_ASSERT( nb0 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -9769,23 +9767,23 @@ static void ggml_compute_forward_add1_f16_f16( const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); + bark_ggml_fp16_t * dst_ptr = (bark_ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); + bark_ggml_fp16_t * src0_ptr = (bark_ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v); + dst_ptr[i] = BARK_GGML_FP32_TO_FP16(BARK_GGML_FP16_TO_FP32(src0_ptr[i]) + v); } } } -static void ggml_compute_forward_add1_q_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); +static void bark_ggml_compute_forward_add1_q_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); + BARK_GGML_ASSERT(bark_ggml_is_scalar(src1)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -9795,25 +9793,25 @@ static void ggml_compute_forward_add1_q_f32( const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS - const enum ggml_type type = src0->type; - ggml_to_float_t const dequantize_row_q = type_traits[type].to_float; - ggml_from_float_t const quantize_row_q = type_traits[type].from_float; + const enum bark_ggml_type type = src0->type; + bark_ggml_to_float_t const dequantize_row_q = type_traits[type].to_float; + bark_ggml_from_float_t const quantize_row_q = type_traits[type].from_float; // we don't support permuted src0 - GGML_ASSERT(nb00 == ggml_type_size(type)); + BARK_GGML_ASSERT(nb00 == bark_ggml_type_size(type)); // dst cannot be transposed or permuted - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); + BARK_GGML_ASSERT(nb0 <= nb1); + BARK_GGML_ASSERT(nb1 <= nb2); + BARK_GGML_ASSERT(nb2 <= nb3); - GGML_ASSERT(ggml_is_quantized(src0->type)); - GGML_ASSERT(dst->type == src0->type); - GGML_ASSERT(src1->type == GGML_TYPE_F32); + BARK_GGML_ASSERT(bark_ggml_is_quantized(src0->type)); + BARK_GGML_ASSERT(dst->type == src0->type); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -9838,64 +9836,64 @@ static void ggml_compute_forward_add1_q_f32( // unquantize row from src0 to temp buffer dequantize_row_q(src0_row, wdata, ne0); // add src1 - ggml_vec_acc1_f32(ne0, wdata, v); + bark_ggml_vec_acc1_f32(ne0, wdata, v); // quantize row to dst quantize_row_q(wdata, dst_row, ne0); } } -static void ggml_compute_forward_add1( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_add1( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_add1_f32(params, src0, src1, dst); + bark_ggml_compute_forward_add1_f32(params, src0, src1, dst); } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - if (src1->type == GGML_TYPE_F16) { - ggml_compute_forward_add1_f16_f16(params, src0, src1, dst); + if (src1->type == BARK_GGML_TYPE_F16) { + bark_ggml_compute_forward_add1_f16_f16(params, src0, src1, dst); } - else if (src1->type == GGML_TYPE_F32) { - ggml_compute_forward_add1_f16_f32(params, src0, src1, dst); + else if (src1->type == BARK_GGML_TYPE_F32) { + bark_ggml_compute_forward_add1_f16_f32(params, src0, src1, dst); } else { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } } break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q8_1: + case BARK_GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q6_K: { - ggml_compute_forward_add1_q_f32(params, src0, src1, dst); + bark_ggml_compute_forward_add1_q_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_acc +// bark_ggml_compute_forward_acc -static void ggml_compute_forward_acc_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); +static void bark_ggml_compute_forward_acc_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(dst) && bark_ggml_is_contiguous(src0)); // view src0 and dst with these strides and data offset inbytes during acc // nb0 is implicitely element_size because src0 and dst are contiguous @@ -9905,40 +9903,40 @@ static void ggml_compute_forward_acc_f32( size_t offset = ((int32_t *) dst->op_params)[3]; bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - if (!inplace && (params->type == GGML_TASK_INIT)) { + if (!inplace && (params->type == BARK_GGML_TASK_INIT)) { // memcpy needs to be synchronized across threads to avoid race conditions. // => do it in INIT phase memcpy( ((char *) dst->data), ((char *) src0->data), - ggml_nbytes(dst)); + bark_ggml_nbytes(dst)); } - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(src1); + const int nr = bark_ggml_nrows(src1); const int nc = src1->ne[0]; - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) // src0 and dst as viewed during acc - const size_t nb0 = ggml_element_size(src0); + const size_t nb0 = bark_ggml_element_size(src0); const size_t nb00 = nb0; const size_t nb01 = nb1; const size_t nb02 = nb2; const size_t nb03 = nb3; - GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst)); - GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0)); + BARK_GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < bark_ggml_nbytes(dst)); + BARK_GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < bark_ggml_nbytes(src0)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -9954,13 +9952,13 @@ static void ggml_compute_forward_acc_f32( const int i2 = (ir - i3*ne12*ne11)/ne11; const int i1 = (ir - i3*ne12*ne11 - i2*ne11); -#ifdef GGML_USE_ACCELERATE +#ifdef BARK_GGML_USE_ACCELERATE vDSP_vadd( (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1, (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc); #else - ggml_vec_add_f32(nc, + bark_ggml_vec_add_f32(nc, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11)); @@ -9968,56 +9966,56 @@ static void ggml_compute_forward_acc_f32( } } -static void ggml_compute_forward_acc( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_acc( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_acc_f32(params, src0, src1, dst); - } break; - case GGML_TYPE_F16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_F32: + { + bark_ggml_compute_forward_acc_f32(params, src0, src1, dst); + } break; + case BARK_GGML_TYPE_F16: + case BARK_GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q8_1: + case BARK_GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q6_K: default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_sub +// bark_ggml_compute_forward_sub -static void ggml_compute_forward_sub_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_sub_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + assert(bark_ggml_are_same_shape(src0, src1) && bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS - GGML_ASSERT( nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT( nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); if (nb10 == sizeof(float)) { for (int ir = 0; ir < nr; ++ir) { @@ -10026,14 +10024,14 @@ static void ggml_compute_forward_sub_f32( const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); -#ifdef GGML_USE_ACCELERATE +#ifdef BARK_GGML_USE_ACCELERATE vDSP_vsub( (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1, (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1, ne0); #else - ggml_vec_sub_f32(ne0, + bark_ggml_vec_sub_f32(ne0, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11)); @@ -10060,54 +10058,54 @@ static void ggml_compute_forward_sub_f32( } } -static void ggml_compute_forward_sub( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_sub( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_sub_f32(params, src0, src1, dst); + bark_ggml_compute_forward_sub_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_mul +// bark_ggml_compute_forward_mul -static void ggml_compute_forward_mul_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_can_repeat_rows(src1, src0) && ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_mul_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_can_repeat_rows(src1, src0) && bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int ith = params->ith; const int nth = params->nth; -#ifdef GGML_USE_CLBLAST - if (src1->backend == GGML_BACKEND_GPU) { +#ifdef BARK_GGML_USE_CLBLAST + if (src1->backend == BARK_GGML_BACKEND_GPU) { if (ith == 0) { - ggml_cl_mul(src0, src1, dst); + bark_ggml_cl_mul(src0, src1, dst); } return; } #endif - const int64_t nr = ggml_nrows(src0); + const int64_t nr = bark_ggml_nrows(src0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS - GGML_ASSERT( nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(ne00 == ne10); + BARK_GGML_ASSERT( nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT(ne00 == ne10); if (nb10 == sizeof(float)) { for (int64_t ir = ith; ir < nr; ir += nth) { @@ -10124,12 +10122,12 @@ static void ggml_compute_forward_mul_f32( float * src0_ptr = (float *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01); float * src1_ptr = (float *) ((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11); -#ifdef GGML_USE_ACCELERATE - UNUSED(ggml_vec_mul_f32); +#ifdef BARK_GGML_USE_ACCELERATE + UNUSED(bark_ggml_vec_mul_f32); vDSP_vmul( src0_ptr, 1, src1_ptr, 1, dst_ptr, 1, ne00); #else - ggml_vec_mul_f32(ne00, dst_ptr, src0_ptr, src1_ptr); + bark_ggml_vec_mul_f32(ne00, dst_ptr, src0_ptr, src1_ptr); #endif // } // } @@ -10159,45 +10157,45 @@ static void ggml_compute_forward_mul_f32( } } -static void ggml_compute_forward_mul( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src1->type == GGML_TYPE_F32 && "only f32 src1 supported for now"); +static void bark_ggml_compute_forward_mul( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32 && "only f32 src1 supported for now"); switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_mul_f32(params, src0, src1, dst); + bark_ggml_compute_forward_mul_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_div +// bark_ggml_compute_forward_div -static void ggml_compute_forward_div_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_div_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + assert(bark_ggml_are_same_shape(src0, src1) && bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS - GGML_ASSERT( nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT( nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); if (nb10 == sizeof(float)) { for (int ir = 0; ir < nr; ++ir) { @@ -10206,8 +10204,8 @@ static void ggml_compute_forward_div_f32( const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); -#ifdef GGML_USE_ACCELERATE - UNUSED(ggml_vec_div_f32); +#ifdef BARK_GGML_USE_ACCELERATE + UNUSED(bark_ggml_vec_div_f32); vDSP_vdiv( (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1, @@ -10215,7 +10213,7 @@ static void ggml_compute_forward_div_f32( (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1, ne0); #else - ggml_vec_div_f32(ne0, + bark_ggml_vec_div_f32(ne0, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11)); @@ -10242,175 +10240,175 @@ static void ggml_compute_forward_div_f32( } } -static void ggml_compute_forward_div( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_div( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_div_f32(params, src0, src1, dst); + bark_ggml_compute_forward_div_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_sqr +// bark_ggml_compute_forward_sqr -static void ggml_compute_forward_sqr_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_sqr_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; assert( dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_sqr_f32(nc, + bark_ggml_vec_sqr_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_sqr( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_sqr( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_sqr_f32(params, src0, dst); + bark_ggml_compute_forward_sqr_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_sqrt +// bark_ggml_compute_forward_sqrt -static void ggml_compute_forward_sqrt_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_sqrt_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; assert( dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_sqrt_f32(nc, + bark_ggml_vec_sqrt_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_sqrt( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_sqrt( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_sqrt_f32(params, src0, dst); + bark_ggml_compute_forward_sqrt_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_log +// bark_ggml_compute_forward_log -static void ggml_compute_forward_log_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(params->ith == 0); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_log_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(params->ith == 0); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; - GGML_ASSERT( dst->nb[0] == sizeof(float)); - GGML_ASSERT(src0->nb[0] == sizeof(float)); + BARK_GGML_ASSERT( dst->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_log_f32(nc, + bark_ggml_vec_log_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_log( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_log( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_log_f32(params, src0, dst); + bark_ggml_compute_forward_log_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_sum +// bark_ggml_compute_forward_sum -static void ggml_compute_forward_sum_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_sum_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_is_scalar(dst)); + assert(bark_ggml_is_scalar(dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - assert(ggml_is_scalar(dst)); + assert(bark_ggml_is_scalar(dst)); assert(src0->nb[0] == sizeof(float)); - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) - ggml_float sum = 0; - ggml_float row_sum = 0; + bark_ggml_float sum = 0; + bark_ggml_float row_sum = 0; for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f32_ggf(ne00, + bark_ggml_vec_sum_f32_ggf(ne00, &row_sum, (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03)); sum += row_sum; @@ -10420,21 +10418,21 @@ static void ggml_compute_forward_sum_f32( ((float *) dst->data)[0] = sum; } -static void ggml_compute_forward_sum_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_sum_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_is_scalar(dst)); + assert(bark_ggml_is_scalar(dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - assert(src0->nb[0] == sizeof(ggml_fp16_t)); + assert(src0->nb[0] == sizeof(bark_ggml_fp16_t)); - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) float sum = 0; float row_sum = 0; @@ -10442,57 +10440,57 @@ static void ggml_compute_forward_sum_f16( for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f16_ggf(ne00, + bark_ggml_vec_sum_f16_ggf(ne00, &row_sum, - (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + (bark_ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); sum += row_sum; } } } - ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum); + ((bark_ggml_fp16_t *) dst->data)[0] = BARK_GGML_FP32_TO_FP16(sum); } -static void ggml_compute_forward_sum( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_sum( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_sum_f32(params, src0, dst); + bark_ggml_compute_forward_sum_f32(params, src0, dst); } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_sum_f16(params, src0, dst); + bark_ggml_compute_forward_sum_f16(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_sum_rows +// bark_ggml_compute_forward_sum_rows -static void ggml_compute_forward_sum_rows_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(params->ith == 0); +static void bark_ggml_compute_forward_sum_rows_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(dst->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(src0->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(dst->nb[0] == sizeof(float)); - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS - GGML_ASSERT(ne0 == 1); - GGML_ASSERT(ne1 == ne01); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); + BARK_GGML_ASSERT(ne0 == 1); + BARK_GGML_ASSERT(ne1 == ne01); + BARK_GGML_ASSERT(ne2 == ne02); + BARK_GGML_ASSERT(ne3 == ne03); for (int64_t i3 = 0; i3 < ne03; i3++) { for (int64_t i2 = 0; i2 < ne02; i2++) { @@ -10500,44 +10498,44 @@ static void ggml_compute_forward_sum_rows_f32( float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03); float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3); float row_sum = 0; - ggml_vec_sum_f32(ne00, &row_sum, src_row); + bark_ggml_vec_sum_f32(ne00, &row_sum, src_row); dst_row[0] = row_sum; } } } } -static void ggml_compute_forward_sum_rows( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_sum_rows( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_sum_rows_f32(params, src0, dst); + bark_ggml_compute_forward_sum_rows_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_mean +// bark_ggml_compute_forward_mean -static void ggml_compute_forward_mean_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_mean_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } assert(src0->nb[0] == sizeof(float)); - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS assert(ne0 == 1); assert(ne1 == ne01); @@ -10552,7 +10550,7 @@ static void ggml_compute_forward_mean_f32( for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f32(ne00, + bark_ggml_vec_sum_f32(ne00, (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3), (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03)); @@ -10562,31 +10560,31 @@ static void ggml_compute_forward_mean_f32( } } -static void ggml_compute_forward_mean( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_mean( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_mean_f32(params, src0, dst); + bark_ggml_compute_forward_mean_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_argmax +// bark_ggml_compute_forward_argmax -static void ggml_compute_forward_argmax_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_argmax_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -10603,51 +10601,51 @@ static void ggml_compute_forward_argmax_f32( float * src = (float *) ((char *) src0->data + i1*nb01); int32_t * dst_ = (int32_t *) ((char *) dst->data + i1*nb0); int v = 0; - ggml_vec_argmax_f32(ne00, &v, src); + bark_ggml_vec_argmax_f32(ne00, &v, src); dst_[0] = v; } } -static void ggml_compute_forward_argmax( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_argmax( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_argmax_f32(params, src0, dst); + bark_ggml_compute_forward_argmax_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_repeat +// bark_ggml_compute_forward_repeat -static void ggml_compute_forward_repeat_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(params->ith == 0); - GGML_ASSERT(ggml_can_repeat(src0, dst)); +static void bark_ggml_compute_forward_repeat_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(params->ith == 0); + BARK_GGML_ASSERT(bark_ggml_can_repeat(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS - // guaranteed to be an integer due to the check in ggml_can_repeat + // guaranteed to be an integer due to the check in bark_ggml_can_repeat const int nr0 = (int)(ne0/ne00); const int nr1 = (int)(ne1/ne01); const int nr2 = (int)(ne2/ne02); const int nr3 = (int)(ne3/ne03); // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT(nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); // TODO: maybe this is not optimal? for (int i3 = 0; i3 < nr3; i3++) { @@ -10657,7 +10655,7 @@ static void ggml_compute_forward_repeat_f32( for (int i1 = 0; i1 < nr1; i1++) { for (int k1 = 0; k1 < ne01; k1++) { for (int i0 = 0; i0 < nr0; i0++) { - ggml_vec_cpy_f32(ne00, + bark_ggml_vec_cpy_f32(ne00, (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0), (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01)); } @@ -10669,28 +10667,28 @@ static void ggml_compute_forward_repeat_f32( } } -static void ggml_compute_forward_repeat_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(params->ith == 0); - GGML_ASSERT(ggml_can_repeat(src0, dst)); +static void bark_ggml_compute_forward_repeat_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(params->ith == 0); + BARK_GGML_ASSERT(bark_ggml_can_repeat(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_TENSOR_UNARY_OP_LOCALS; + BARK_GGML_TENSOR_UNARY_OP_LOCALS; - // guaranteed to be an integer due to the check in ggml_can_repeat + // guaranteed to be an integer due to the check in bark_ggml_can_repeat const int nr0 = (int)(ne0/ne00); const int nr1 = (int)(ne1/ne01); const int nr2 = (int)(ne2/ne02); const int nr3 = (int)(ne3/ne03); // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + BARK_GGML_ASSERT(nb0 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); // TODO: maybe this is not optimal? for (int i3 = 0; i3 < nr3; i3++) { @@ -10700,9 +10698,9 @@ static void ggml_compute_forward_repeat_f16( for (int i1 = 0; i1 < nr1; i1++) { for (int k1 = 0; k1 < ne01; k1++) { for (int i0 = 0; i0 < nr0; i0++) { - ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0); - ggml_fp16_t * x = (ggml_fp16_t *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01); - // ggml_vec_cpy_f16(ne00, y, x) + bark_ggml_fp16_t * y = (bark_ggml_fp16_t *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0); + bark_ggml_fp16_t * x = (bark_ggml_fp16_t *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01); + // bark_ggml_vec_cpy_f16(ne00, y, x) for (int i = 0; i < ne00; ++i) { y[i] = x[i]; } @@ -10715,58 +10713,58 @@ static void ggml_compute_forward_repeat_f16( } } -static void ggml_compute_forward_repeat( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_repeat( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_repeat_f16(params, src0, dst); + bark_ggml_compute_forward_repeat_f16(params, src0, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_repeat_f32(params, src0, dst); + bark_ggml_compute_forward_repeat_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_repeat_back +// bark_ggml_compute_forward_repeat_back -static void ggml_compute_forward_repeat_back_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(params->ith == 0); - GGML_ASSERT(ggml_can_repeat(dst, src0)); +static void bark_ggml_compute_forward_repeat_back_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(params->ith == 0); + BARK_GGML_ASSERT(bark_ggml_can_repeat(dst, src0)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS - // guaranteed to be an integer due to the check in ggml_can_repeat + // guaranteed to be an integer due to the check in bark_ggml_can_repeat const int nr0 = (int)(ne00/ne0); const int nr1 = (int)(ne01/ne1); const int nr2 = (int)(ne02/ne2); const int nr3 = (int)(ne03/ne3); // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT(nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); - if (ggml_is_contiguous(dst)) { - ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0); + if (bark_ggml_is_contiguous(dst)) { + bark_ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0); } else { for (int k3 = 0; k3 < ne3; k3++) { for (int k2 = 0; k2 < ne2; k2++) { for (int k1 = 0; k1 < ne1; k1++) { - ggml_vec_set_f32(ne0, + bark_ggml_vec_set_f32(ne0, (float *) ((char *) dst->data + k1*nb1 + k2*nb2 + k3*nb3), 0); } @@ -10782,7 +10780,7 @@ static void ggml_compute_forward_repeat_back_f32( for (int i1 = 0; i1 < nr1; i1++) { for (int k1 = 0; k1 < ne1; k1++) { for (int i0 = 0; i0 < nr0; i0++) { - ggml_vec_acc_f32(ne0, + bark_ggml_vec_acc_f32(ne0, (float *) ((char *) dst->data + ( k3)*nb3 + ( k2)*nb2 + ( k1)*nb1), (float *) ((char *) src0->data + (i3*ne3 + k3)*nb03 + (i2*ne2 + k2)*nb02 + (i1*ne1 + k1)*nb01 + (i0*ne0)*nb00)); } @@ -10794,44 +10792,44 @@ static void ggml_compute_forward_repeat_back_f32( } } -static void ggml_compute_forward_repeat_back( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_repeat_back( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_repeat_back_f32(params, src0, dst); + bark_ggml_compute_forward_repeat_back_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_concat +// bark_ggml_compute_forward_concat -static void ggml_compute_forward_concat_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_concat_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_ASSERT(src0->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); for (int i3 = 0; i3 < ne3; i3++) { for (int i2 = ith; i2 < ne2; i2++) { @@ -10859,328 +10857,328 @@ static void ggml_compute_forward_concat_f32( } } -static void ggml_compute_forward_concat( - const struct ggml_compute_params* params, - const struct ggml_tensor* src0, - const struct ggml_tensor* src1, - struct ggml_tensor* dst) { +static void bark_ggml_compute_forward_concat( + const struct bark_ggml_compute_params* params, + const struct bark_ggml_tensor* src0, + const struct bark_ggml_tensor* src1, + struct bark_ggml_tensor* dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_concat_f32(params, src0, src1, dst); + bark_ggml_compute_forward_concat_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_abs +// bark_ggml_compute_forward_abs -static void ggml_compute_forward_abs_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_abs_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_abs_f32(nc, + bark_ggml_vec_abs_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_abs( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_abs( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_abs_f32(params, src0, dst); + bark_ggml_compute_forward_abs_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_sgn +// bark_ggml_compute_forward_sgn -static void ggml_compute_forward_sgn_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_sgn_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_sgn_f32(nc, + bark_ggml_vec_sgn_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_sgn( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_sgn( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_sgn_f32(params, src0, dst); + bark_ggml_compute_forward_sgn_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_neg +// bark_ggml_compute_forward_neg -static void ggml_compute_forward_neg_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_neg_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_neg_f32(nc, + bark_ggml_vec_neg_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_neg( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_neg( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_neg_f32(params, src0, dst); + bark_ggml_compute_forward_neg_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_step +// bark_ggml_compute_forward_step -static void ggml_compute_forward_step_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_step_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_step_f32(nc, + bark_ggml_vec_step_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_step( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_step( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_step_f32(params, src0, dst); + bark_ggml_compute_forward_step_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_tanh +// bark_ggml_compute_forward_tanh -static void ggml_compute_forward_tanh_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_tanh_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_tanh_f32(nc, + bark_ggml_vec_tanh_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_tanh( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_tanh( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_tanh_f32(params, src0, dst); + bark_ggml_compute_forward_tanh_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_elu +// bark_ggml_compute_forward_elu -static void ggml_compute_forward_elu_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_elu_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_elu_f32(nc, + bark_ggml_vec_elu_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_elu( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_elu( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_elu_f32(params, src0, dst); + bark_ggml_compute_forward_elu_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_relu +// bark_ggml_compute_forward_relu -static void ggml_compute_forward_relu_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_relu_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); + assert(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; assert(dst->nb[0] == sizeof(float)); assert(src0->nb[0] == sizeof(float)); for (int i = 0; i < n; i++) { - ggml_vec_relu_f32(nc, + bark_ggml_vec_relu_f32(nc, (float *) ((char *) dst->data + i*( dst->nb[1])), (float *) ((char *) src0->data + i*(src0->nb[1]))); } } -static void ggml_compute_forward_relu( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_relu( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_relu_f32(params, src0, dst); + bark_ggml_compute_forward_relu_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_gelu +// bark_ggml_compute_forward_gelu -static void ggml_compute_forward_gelu_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0)); - GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_gelu_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_is_contiguous_except_dim_1(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous_except_dim_1(dst)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -11188,7 +11186,7 @@ static void ggml_compute_forward_gelu_f32( const int nth = params->nth; const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -11198,7 +11196,7 @@ static void ggml_compute_forward_gelu_f32( const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_f32(nc, + bark_ggml_vec_gelu_f32(nc, (float *) ((char *) dst->data + i1*( dst->nb[1])), (float *) ((char *) src0->data + i1*(src0->nb[1]))); @@ -11213,33 +11211,33 @@ static void ggml_compute_forward_gelu_f32( } } -static void ggml_compute_forward_gelu( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_gelu( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_gelu_f32(params, src0, dst); + bark_ggml_compute_forward_gelu_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_gelu_quick +// bark_ggml_compute_forward_gelu_quick -static void ggml_compute_forward_gelu_quick_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0)); - GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_gelu_quick_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_is_contiguous_except_dim_1(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous_except_dim_1(dst)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -11247,7 +11245,7 @@ static void ggml_compute_forward_gelu_quick_f32( const int nth = params->nth; const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -11257,7 +11255,7 @@ static void ggml_compute_forward_gelu_quick_f32( const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_quick_f32(nc, + bark_ggml_vec_gelu_quick_f32(nc, (float *) ((char *) dst->data + i1*( dst->nb[1])), (float *) ((char *) src0->data + i1*(src0->nb[1]))); @@ -11272,33 +11270,33 @@ static void ggml_compute_forward_gelu_quick_f32( } } -static void ggml_compute_forward_gelu_quick( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_gelu_quick( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_gelu_quick_f32(params, src0, dst); + bark_ggml_compute_forward_gelu_quick_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_silu +// bark_ggml_compute_forward_silu -static void ggml_compute_forward_silu_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0)); - GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_silu_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_is_contiguous_except_dim_1(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous_except_dim_1(dst)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -11306,7 +11304,7 @@ static void ggml_compute_forward_silu_f32( const int nth = params->nth; const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -11316,7 +11314,7 @@ static void ggml_compute_forward_silu_f32( const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_f32(nc, + bark_ggml_vec_silu_f32(nc, (float *) ((char *) dst->data + i1*( dst->nb[1])), (float *) ((char *) src0->data + i1*(src0->nb[1]))); @@ -11331,36 +11329,36 @@ static void ggml_compute_forward_silu_f32( } } -static void ggml_compute_forward_silu( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_silu( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_silu_f32(params, src0, dst); + bark_ggml_compute_forward_silu_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_silu_back +// bark_ggml_compute_forward_silu_back -static void ggml_compute_forward_silu_back_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * grad, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous_except_dim_1(grad)); - GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0)); - GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_are_same_shape(src0, grad)); +static void bark_ggml_compute_forward_silu_back_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * grad, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_is_contiguous_except_dim_1(grad)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous_except_dim_1(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous_except_dim_1(dst)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, grad)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -11368,7 +11366,7 @@ static void ggml_compute_forward_silu_back_f32( const int nth = params->nth; const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -11378,7 +11376,7 @@ static void ggml_compute_forward_silu_back_f32( const int ir1 = MIN(ir0 + dr, nr); for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_backward_f32(nc, + bark_ggml_vec_silu_backward_f32(nc, (float *) ((char *) dst->data + i1*( dst->nb[1])), (float *) ((char *) src0->data + i1*(src0->nb[1])), (float *) ((char *) grad->data + i1*(grad->nb[1]))); @@ -11394,41 +11392,41 @@ static void ggml_compute_forward_silu_back_f32( } } -static void ggml_compute_forward_silu_back( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * grad, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_silu_back( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * grad, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_silu_back_f32(params, src0, grad, dst); + bark_ggml_compute_forward_silu_back_f32(params, src0, grad, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_norm +// bark_ggml_compute_forward_norm -static void ggml_compute_forward_norm_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_norm_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_ASSERT(src0->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS float eps; memcpy(&eps, dst->op_params, sizeof(float)); @@ -11439,65 +11437,65 @@ static void ggml_compute_forward_norm_f32( for (int64_t i01 = ith; i01 < ne01; i01 += nth) { const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); - ggml_float sum = 0.0; + bark_ggml_float sum = 0.0; for (int64_t i00 = 0; i00 < ne00; i00++) { - sum += (ggml_float)x[i00]; + sum += (bark_ggml_float)x[i00]; } float mean = sum/ne00; float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); - ggml_float sum2 = 0.0; + bark_ggml_float sum2 = 0.0; for (int64_t i00 = 0; i00 < ne00; i00++) { float v = x[i00] - mean; y[i00] = v; - sum2 += (ggml_float)(v*v); + sum2 += (bark_ggml_float)(v*v); } float variance = sum2/ne00; const float scale = 1.0f/sqrtf(variance + eps); - ggml_vec_scale_f32(ne00, y, scale); + bark_ggml_vec_scale_f32(ne00, y, scale); } } } } -static void ggml_compute_forward_norm( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_norm( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_norm_f32(params, src0, dst); + bark_ggml_compute_forward_norm_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_group_rms_norm +// bark_ggml_compute_forward_group_rms_norm -static void ggml_compute_forward_rms_norm_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_rms_norm_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_ASSERT(src0->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS float eps; memcpy(&eps, dst->op_params, sizeof(float)); @@ -11508,9 +11506,9 @@ static void ggml_compute_forward_rms_norm_f32( for (int64_t i01 = ith; i01 < ne01; i01 += nth) { const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); - ggml_float sum = 0.0; + bark_ggml_float sum = 0.0; for (int64_t i00 = 0; i00 < ne00; i00++) { - sum += (ggml_float)(x[i00] * x[i00]); + sum += (bark_ggml_float)(x[i00] * x[i00]); } const float mean = sum/ne00; @@ -11524,45 +11522,45 @@ static void ggml_compute_forward_rms_norm_f32( const float scale = 1.0f/sqrtf(mean + eps); - ggml_vec_scale_f32(ne00, y, scale); + bark_ggml_vec_scale_f32(ne00, y, scale); } } } } -static void ggml_compute_forward_rms_norm( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_rms_norm( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_rms_norm_f32(params, src0, dst); + bark_ggml_compute_forward_rms_norm_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -static void ggml_compute_forward_rms_norm_back_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1)); +static void bark_ggml_compute_forward_rms_norm_back_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst) && bark_ggml_are_same_shape(src0, src1)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_ASSERT(src0->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS float eps; memcpy(&eps, dst->op_params, sizeof(float)); @@ -11579,12 +11577,12 @@ static void ggml_compute_forward_rms_norm_back_f32( const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13); - ggml_float sum_xx = 0.0; - ggml_float sum_xdz = 0.0; + bark_ggml_float sum_xx = 0.0; + bark_ggml_float sum_xdz = 0.0; for (int64_t i00 = 0; i00 < ne00; i00++) { - sum_xx += (ggml_float)(x[i00] * x[i00]); - sum_xdz += (ggml_float)(x[i00] * dz[i00]); + sum_xx += (bark_ggml_float)(x[i00] * x[i00]); + sum_xdz += (bark_ggml_float)(x[i00] * dz[i00]); } //const float mean = (float)(sum_xx)/ne00; @@ -11592,7 +11590,7 @@ static void ggml_compute_forward_rms_norm_back_f32( const float sum_eps = (float)(sum_xx) + eps*ne00; //const float mean_xdz = (float)(sum_xdz)/ne00; // we could cache rms from forward pass to improve performance. - // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms. + // to do this implement bark_ggml_rms and compose bark_ggml_rms_norm using bark_ggml_rms. //const float rms = sqrtf(mean_eps); const float rrms = 1.0f / sqrtf(mean_eps); //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3) @@ -11693,51 +11691,51 @@ static void ggml_compute_forward_rms_norm_back_f32( // dx := scale(dx, rrms) float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); - ggml_vec_cpy_f32 (ne00, dx, x); - // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps); - ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps); - ggml_vec_acc_f32 (ne00, dx, dz); - ggml_vec_scale_f32(ne00, dx, rrms); + bark_ggml_vec_cpy_f32 (ne00, dx, x); + // bark_ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps); + bark_ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps); + bark_ggml_vec_acc_f32 (ne00, dx, dz); + bark_ggml_vec_scale_f32(ne00, dx, rrms); } } } } -static void ggml_compute_forward_rms_norm_back( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_rms_norm_back( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_rms_norm_back_f32(params, src0, src1, dst); + bark_ggml_compute_forward_rms_norm_back_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_group_norm +// bark_ggml_compute_forward_group_norm -static void ggml_compute_forward_group_norm_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_group_norm_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_ASSERT(src0->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; const int nth = params->nth; - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS const float eps = 1e-6f; // TODO: make this a parameter @@ -11755,18 +11753,18 @@ static void ggml_compute_forward_group_norm_f32( int step = end - start; for (int64_t i03 = 0; i03 < ne03; i03++) { - ggml_float sum = 0.0; + bark_ggml_float sum = 0.0; for (int64_t i02 = start; i02 < end; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { const float * x = (float *)((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); for (int64_t i00 = 0; i00 < ne00; i00++) { - sum += (ggml_float)x[i00]; + sum += (bark_ggml_float)x[i00]; } } } float mean = sum / (ne00 * ne01 * step); - ggml_float sum2 = 0.0; + bark_ggml_float sum2 = 0.0; for (int64_t i02 = start; i02 < end; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { @@ -11777,7 +11775,7 @@ static void ggml_compute_forward_group_norm_f32( for (int64_t i00 = 0; i00 < ne00; i00++) { float v = x[i00] - mean; y[i00] = v; - sum2 += (ggml_float)(v * v); + sum2 += (bark_ggml_float)(v * v); } } } @@ -11787,38 +11785,38 @@ static void ggml_compute_forward_group_norm_f32( for (int64_t i02 = start; i02 < end; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { float * y = (float *)((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - ggml_vec_scale_f32(ne00, y, scale); + bark_ggml_vec_scale_f32(ne00, y, scale); } } } } } -static void ggml_compute_forward_group_norm( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_group_norm( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_group_norm_f32(params, src0, dst); + bark_ggml_compute_forward_group_norm_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_mul_mat +// bark_ggml_compute_forward_mul_mat -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) +#if defined(BARK_GGML_USE_ACCELERATE) || defined(BARK_GGML_USE_OPENBLAS) // helper function to determine if it is better to use BLAS or not // for large matrices, BLAS is faster -static bool ggml_compute_forward_mul_mat_use_blas( - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static bool bark_ggml_compute_forward_mul_mat_use_blas( + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { //const int64_t ne00 = src0->ne[0]; //const int64_t ne01 = src0->ne[1]; @@ -11828,8 +11826,8 @@ static bool ggml_compute_forward_mul_mat_use_blas( const int64_t ne1 = dst->ne[1]; // TODO: find the optimal values for these - if (ggml_is_contiguous(src0) && - ggml_is_contiguous(src1) && + if (bark_ggml_is_contiguous(src0) && + bark_ggml_is_contiguous(src1) && (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) { /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/ @@ -11840,41 +11838,41 @@ static bool ggml_compute_forward_mul_mat_use_blas( } #endif -static void ggml_compute_forward_mul_mat( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); +static void bark_ggml_compute_forward_mul_mat( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; - const enum ggml_type type = src0->type; + const enum bark_ggml_type type = src0->type; - const bool src1_cont = ggml_is_contiguous(src1); + const bool src1_cont = bark_ggml_is_contiguous(src1); - ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot; - enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type; - ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float; + bark_ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot; + enum bark_ggml_type const vec_dot_type = type_traits[type].vec_dot_type; + bark_ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float; - GGML_ASSERT(ne0 == ne01); - GGML_ASSERT(ne1 == ne11); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); + BARK_GGML_ASSERT(ne0 == ne01); + BARK_GGML_ASSERT(ne1 == ne11); + BARK_GGML_ASSERT(ne2 == ne12); + BARK_GGML_ASSERT(ne3 == ne13); // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == ggml_type_size(type)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == bark_ggml_type_size(type)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); + BARK_GGML_ASSERT(nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb0 <= nb1); + BARK_GGML_ASSERT(nb1 <= nb2); + BARK_GGML_ASSERT(nb2 <= nb3); // broadcast factors const int64_t r2 = ne12/ne02; @@ -11883,26 +11881,26 @@ static void ggml_compute_forward_mul_mat( // nb01 >= nb00 - src0 is not transposed // compute by src0 rows -#if defined(GGML_USE_CLBLAST) - if (ggml_cl_can_mul_mat(src0, src1, dst)) { - if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) { - ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize); +#if defined(BARK_GGML_USE_CLBLAST) + if (bark_ggml_cl_can_mul_mat(src0, src1, dst)) { + if (params->ith == 0 && params->type == BARK_GGML_TASK_COMPUTE) { + bark_ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize); } return; } #endif -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) - if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { +#if defined(BARK_GGML_USE_ACCELERATE) || defined(BARK_GGML_USE_OPENBLAS) + if (bark_ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { if (params->ith != 0) { return; } - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -11917,9 +11915,9 @@ static void ggml_compute_forward_mul_mat( float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); - if (type != GGML_TYPE_F32) { + if (type != BARK_GGML_TYPE_F32) { float * const wdata = params->wdata; - ggml_to_float_t const to_float = type_traits[type].to_float; + bark_ggml_to_float_t const to_float = type_traits[type].to_float; size_t id = 0; for (int64_t i01 = 0; i01 < ne01; ++i01) { @@ -11939,16 +11937,16 @@ static void ggml_compute_forward_mul_mat( } } - //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3); + //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (bark_ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3); return; } #endif - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { if (src1->type != vec_dot_type) { char * wdata = params->wdata; - const size_t row_size = ne10*ggml_type_size(vec_dot_type)/ggml_blck_size(vec_dot_type); + const size_t row_size = ne10*bark_ggml_type_size(vec_dot_type)/bark_ggml_blck_size(vec_dot_type); for (int64_t i13 = 0; i13 < ne13; ++i13) { for (int64_t i12 = 0; i12 < ne12; ++i12) { @@ -11963,12 +11961,12 @@ static void ggml_compute_forward_mul_mat( return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; - const size_t row_size = ne10*ggml_type_size(vec_dot_type)/ggml_blck_size(vec_dot_type); + const size_t row_size = ne10*bark_ggml_type_size(vec_dot_type)/bark_ggml_blck_size(vec_dot_type); const int64_t nr0 = ne01; // src0 rows const int64_t nr1 = ne11*ne12*ne13; // src1 rows @@ -12051,52 +12049,52 @@ static void ggml_compute_forward_mul_mat( } } -// ggml_compute_forward_out_prod +// bark_ggml_compute_forward_out_prod -static void ggml_compute_forward_out_prod_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - // int64_t t0 = ggml_perf_time_us(); +static void bark_ggml_compute_forward_out_prod_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + // int64_t t0 = bark_ggml_perf_time_us(); // UNUSED(t0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne03 == ne13); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); + BARK_GGML_ASSERT(ne02 == ne12); + BARK_GGML_ASSERT(ne03 == ne13); + BARK_GGML_ASSERT(ne2 == ne12); + BARK_GGML_ASSERT(ne3 == ne13); // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - // GGML_ASSERT(nb0 <= nb1); - // GGML_ASSERT(nb1 <= nb2); - // GGML_ASSERT(nb2 <= nb3); + BARK_GGML_ASSERT(nb0 == sizeof(float)); + // BARK_GGML_ASSERT(nb0 <= nb1); + // BARK_GGML_ASSERT(nb1 <= nb2); + // BARK_GGML_ASSERT(nb2 <= nb3); - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne10); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); + BARK_GGML_ASSERT(ne0 == ne00); + BARK_GGML_ASSERT(ne1 == ne10); + BARK_GGML_ASSERT(ne2 == ne02); + BARK_GGML_ASSERT(ne3 == ne03); // nb01 >= nb00 - src0 is not transposed // compute by src0 rows - // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod - // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST) + // TODO: #if defined(BARK_GGML_USE_CUBLAS) bark_ggml_cuda_out_prod + // TODO: #if defined(BARK_GGML_USE_ACCELERATE) || defined(BARK_GGML_USE_OPENBLAS) || defined(BARK_GGML_USE_CLBLAST) - if (params->type == GGML_TASK_INIT) { - ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0); + if (params->type == BARK_GGML_TASK_INIT) { + bark_ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0); return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -12120,7 +12118,7 @@ static void ggml_compute_forward_out_prod_f32( const int64_t ir1 = MIN(ir0 + dr, nr); // block-tiling attempt - const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32); + const int64_t blck_0 = MAX(BARK_GGML_VEC_MAD_UNROLL, 32); const int64_t blck_1 = 16; for (int64_t bir = ir0; bir < ir1; bir += blck_1) { @@ -12140,16 +12138,16 @@ static void ggml_compute_forward_out_prod_f32( const int64_t i12 = i2; const int64_t i13 = i3; -#if GGML_VEC_MAD_UNROLL > 2 - const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL); - for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) { +#if BARK_GGML_VEC_MAD_UNROLL > 2 + const int64_t bne01_unroll = bne01 - (bne01 % BARK_GGML_VEC_MAD_UNROLL); + for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += BARK_GGML_VEC_MAD_UNROLL) { const int64_t i11 = i01; float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03)); float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); - ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1); + bark_ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1); } for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) { const int64_t i11 = i01; @@ -12158,7 +12156,7 @@ static void ggml_compute_forward_out_prod_f32( float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); - ggml_vec_mad_f32(ne0, d, s0, *s1); + bark_ggml_vec_mad_f32(ne0, d, s0, *s1); } #else for (int64_t i01 = bi01; i01 < bne01; ++i01) { @@ -12168,14 +12166,14 @@ static void ggml_compute_forward_out_prod_f32( float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); - ggml_vec_mad_f32(ne0, d, s0, *s1); + bark_ggml_vec_mad_f32(ne0, d, s0, *s1); } #endif } } } - //int64_t t1 = ggml_perf_time_us(); + //int64_t t1 = bark_ggml_perf_time_us(); //static int64_t acc = 0; //acc += t1 - t0; //if (t1 - t0 > 10) { @@ -12189,53 +12187,53 @@ static void ggml_compute_forward_out_prod_f32( //} } -static void ggml_compute_forward_out_prod_q_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - // int64_t t0 = ggml_perf_time_us(); +static void bark_ggml_compute_forward_out_prod_q_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + // int64_t t0 = bark_ggml_perf_time_us(); // UNUSED(t0); - GGML_TENSOR_BINARY_OP_LOCALS; + BARK_GGML_TENSOR_BINARY_OP_LOCALS; const int ith = params->ith; const int nth = params->nth; - const enum ggml_type type = src0->type; - ggml_to_float_t const dequantize_row_q = type_traits[type].to_float; + const enum bark_ggml_type type = src0->type; + bark_ggml_to_float_t const dequantize_row_q = type_traits[type].to_float; - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne03 == ne13); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); + BARK_GGML_ASSERT(ne02 == ne12); + BARK_GGML_ASSERT(ne03 == ne13); + BARK_GGML_ASSERT(ne2 == ne12); + BARK_GGML_ASSERT(ne3 == ne13); // we don't support permuted src0 dim0 - GGML_ASSERT(nb00 == ggml_type_size(type)); + BARK_GGML_ASSERT(nb00 == bark_ggml_type_size(type)); // dst dim0 cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - // GGML_ASSERT(nb0 <= nb1); - // GGML_ASSERT(nb1 <= nb2); - // GGML_ASSERT(nb2 <= nb3); + BARK_GGML_ASSERT(nb0 == sizeof(float)); + // BARK_GGML_ASSERT(nb0 <= nb1); + // BARK_GGML_ASSERT(nb1 <= nb2); + // BARK_GGML_ASSERT(nb2 <= nb3); - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne10); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); + BARK_GGML_ASSERT(ne0 == ne00); + BARK_GGML_ASSERT(ne1 == ne10); + BARK_GGML_ASSERT(ne2 == ne02); + BARK_GGML_ASSERT(ne3 == ne03); // nb01 >= nb00 - src0 is not transposed // compute by src0 rows - // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod - // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST) + // TODO: #if defined(BARK_GGML_USE_CUBLAS) bark_ggml_cuda_out_prod + // TODO: #if defined(BARK_GGML_USE_ACCELERATE) || defined(BARK_GGML_USE_OPENBLAS) || defined(BARK_GGML_USE_CLBLAST) - if (params->type == GGML_TASK_INIT) { - ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0); + if (params->type == BARK_GGML_TASK_INIT) { + bark_ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0); return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -12281,11 +12279,11 @@ static void ggml_compute_forward_out_prod_q_f32( float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); dequantize_row_q(s0, wdata, ne0); - ggml_vec_mad_f32(ne0, d, wdata, *s1); + bark_ggml_vec_mad_f32(ne0, d, wdata, *s1); } } - //int64_t t1 = ggml_perf_time_us(); + //int64_t t1 = bark_ggml_perf_time_us(); //static int64_t acc = 0; //acc += t1 - t0; //if (t1 - t0 > 10) { @@ -12299,54 +12297,54 @@ static void ggml_compute_forward_out_prod_q_f32( //} } -static void ggml_compute_forward_out_prod( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_out_prod( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q6_K: { - ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst); + bark_ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst); } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - GGML_ASSERT(false); // todo - // ggml_compute_forward_out_prod_f16_f32(params, src0, src1, dst); + BARK_GGML_ASSERT(false); // todo + // bark_ggml_compute_forward_out_prod_f16_f32(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_out_prod_f32(params, src0, src1, dst); + bark_ggml_compute_forward_out_prod_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_scale +// bark_ggml_compute_forward_scale -static void ggml_compute_forward_scale_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); +static void bark_ggml_compute_forward_scale_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(dst)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); + BARK_GGML_ASSERT(bark_ggml_is_scalar(src1)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -12357,7 +12355,7 @@ static void ggml_compute_forward_scale_f32( const int nth = params->nth; const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -12375,36 +12373,36 @@ static void ggml_compute_forward_scale_f32( // src0 is same shape as dst => same indices memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float)); } - ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v); + bark_ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v); } } -static void ggml_compute_forward_scale( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_scale( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_scale_f32(params, src0, src1, dst); + bark_ggml_compute_forward_scale_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_set +// bark_ggml_compute_forward_set -static void ggml_compute_forward_set_i32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); +static void bark_ggml_compute_forward_set_i32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(dst) && bark_ggml_is_contiguous(src0)); // view src0 and dst with these strides and data offset inbytes during set // nb0 is implicitely element_size because src0 and dst are contiguous @@ -12414,39 +12412,39 @@ static void ggml_compute_forward_set_i32( size_t offset = ((int32_t *) dst->op_params)[3]; bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - if (!inplace && (params->type == GGML_TASK_INIT)) { + if (!inplace && (params->type == BARK_GGML_TASK_INIT)) { // memcpy needs to be synchronized across threads to avoid race conditions. // => do it in INIT phase memcpy( ((char *) dst->data), ((char *) src0->data), - ggml_nbytes(dst)); + bark_ggml_nbytes(dst)); } - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(src1); + const int nr = bark_ggml_nrows(src1); const int nc = src1->ne[0]; - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne); - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb); + BARK_GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne); + BARK_GGML_TENSOR_LOCALS(size_t, nb1, src1, nb); // src0 and dst as viewed during set - const size_t nb0 = ggml_element_size(src0); + const size_t nb0 = bark_ggml_element_size(src0); const int im0 = (ne10 == 0 ? 0 : ne10-1); const int im1 = (ne11 == 0 ? 0 : ne11-1); const int im2 = (ne12 == 0 ? 0 : ne12-1); const int im3 = (ne13 == 0 ? 0 : ne13-1); - GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst)); + BARK_GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= bark_ggml_nbytes(dst)); - GGML_ASSERT(nb10 == sizeof(int32_t)); + BARK_GGML_ASSERT(nb10 == sizeof(int32_t)); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -12462,19 +12460,19 @@ static void ggml_compute_forward_set_i32( const int i2 = (ir - i3*ne12*ne11)/ne11; const int i1 = (ir - i3*ne12*ne11 - i2*ne11); - ggml_vec_cpy_i32(nc, + bark_ggml_vec_cpy_i32(nc, (int32_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), (int32_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11)); } } -static void ggml_compute_forward_set_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); +static void bark_ggml_compute_forward_set_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(dst) && bark_ggml_is_contiguous(src0)); // view src0 and dst with these strides and data offset inbytes during set // nb0 is implicitely element_size because src0 and dst are contiguous @@ -12484,39 +12482,39 @@ static void ggml_compute_forward_set_f32( size_t offset = ((int32_t *) dst->op_params)[3]; bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - if (!inplace && (params->type == GGML_TASK_INIT)) { + if (!inplace && (params->type == BARK_GGML_TASK_INIT)) { // memcpy needs to be synchronized across threads to avoid race conditions. // => do it in INIT phase memcpy( ((char *) dst->data), ((char *) src0->data), - ggml_nbytes(dst)); + bark_ggml_nbytes(dst)); } - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(src1); + const int nr = bark_ggml_nrows(src1); const int nc = src1->ne[0]; - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) // src0 and dst as viewed during set - const size_t nb0 = ggml_element_size(src0); + const size_t nb0 = bark_ggml_element_size(src0); const int im0 = (ne10 == 0 ? 0 : ne10-1); const int im1 = (ne11 == 0 ? 0 : ne11-1); const int im2 = (ne12 == 0 ? 0 : ne12-1); const int im3 = (ne13 == 0 ? 0 : ne13-1); - GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst)); + BARK_GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= bark_ggml_nbytes(dst)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -12532,127 +12530,127 @@ static void ggml_compute_forward_set_f32( const int i2 = (ir - i3*ne12*ne11)/ne11; const int i1 = (ir - i3*ne12*ne11 - i2*ne11); - ggml_vec_cpy_f32(nc, + bark_ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11)); } } -static void ggml_compute_forward_set( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_set( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_set_f32(params, src0, src1, dst); - } break; - case GGML_TYPE_I32: - { - ggml_compute_forward_set_i32(params, src0, src1, dst); - } break; - case GGML_TYPE_F16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_F32: + { + bark_ggml_compute_forward_set_f32(params, src0, src1, dst); + } break; + case BARK_GGML_TYPE_I32: + { + bark_ggml_compute_forward_set_i32(params, src0, src1, dst); + } break; + case BARK_GGML_TYPE_F16: + case BARK_GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q8_1: + case BARK_GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q6_K: default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_cpy +// bark_ggml_compute_forward_cpy -static void ggml_compute_forward_cpy( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - ggml_compute_forward_dup(params, src0, dst); +static void bark_ggml_compute_forward_cpy( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + bark_ggml_compute_forward_dup(params, src0, dst); } -// ggml_compute_forward_cont +// bark_ggml_compute_forward_cont -static void ggml_compute_forward_cont( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - ggml_compute_forward_dup(params, src0, dst); +static void bark_ggml_compute_forward_cont( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + bark_ggml_compute_forward_dup(params, src0, dst); } -// ggml_compute_forward_reshape +// bark_ggml_compute_forward_reshape -static void ggml_compute_forward_reshape( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_reshape( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { // NOP UNUSED(params); UNUSED(src0); UNUSED(dst); } -// ggml_compute_forward_view +// bark_ggml_compute_forward_view -static void ggml_compute_forward_view( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0) { +static void bark_ggml_compute_forward_view( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0) { // NOP UNUSED(params); UNUSED(src0); } -// ggml_compute_forward_permute +// bark_ggml_compute_forward_permute -static void ggml_compute_forward_permute( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0) { +static void bark_ggml_compute_forward_permute( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0) { // NOP UNUSED(params); UNUSED(src0); } -// ggml_compute_forward_transpose +// bark_ggml_compute_forward_transpose -static void ggml_compute_forward_transpose( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0) { +static void bark_ggml_compute_forward_transpose( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0) { // NOP UNUSED(params); UNUSED(src0); } -// ggml_compute_forward_get_rows +// bark_ggml_compute_forward_get_rows -static void ggml_compute_forward_get_rows_q( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_get_rows_q( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); - const enum ggml_type type = src0->type; - ggml_to_float_t const dequantize_row_q = type_traits[type].to_float; + const int nr = bark_ggml_nelements(src1); + const enum bark_ggml_type type = src0->type; + bark_ggml_to_float_t const dequantize_row_q = type_traits[type].to_float; assert( dst->ne[0] == nc); assert( dst->ne[1] == nr); - assert(src0->nb[0] == ggml_type_size(type)); + assert(src0->nb[0] == bark_ggml_type_size(type)); for (int i = 0; i < nr; ++i) { const int r = ((int32_t *) src1->data)[i]; @@ -12663,47 +12661,47 @@ static void ggml_compute_forward_get_rows_q( } } -static void ggml_compute_forward_get_rows_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_get_rows_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); + const int nr = bark_ggml_nelements(src1); assert( dst->ne[0] == nc); assert( dst->ne[1] == nr); - assert(src0->nb[0] == sizeof(ggml_fp16_t)); + assert(src0->nb[0] == sizeof(bark_ggml_fp16_t)); for (int i = 0; i < nr; ++i) { const int r = ((int32_t *) src1->data)[i]; for (int j = 0; j < nc; ++j) { - ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j]; - ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = GGML_FP16_TO_FP32(v); + bark_ggml_fp16_t v = ((bark_ggml_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j]; + ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = BARK_GGML_FP16_TO_FP32(v); } } } -static void ggml_compute_forward_get_rows_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_get_rows_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); + const int nr = bark_ggml_nelements(src1); assert( dst->ne[0] == nc); assert( dst->ne[1] == nr); @@ -12712,43 +12710,43 @@ static void ggml_compute_forward_get_rows_f32( for (int i = 0; i < nr; ++i) { const int r = ((int32_t *) src1->data)[i]; - ggml_vec_cpy_f32(nc, + bark_ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i*dst->nb[1]), (float *) ((char *) src0->data + r*src0->nb[1])); } } -static void ggml_compute_forward_get_rows( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_get_rows( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q8_1: + case BARK_GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q6_K: { - ggml_compute_forward_get_rows_q(params, src0, src1, dst); + bark_ggml_compute_forward_get_rows_q(params, src0, src1, dst); } break; - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_get_rows_f16(params, src0, src1, dst); + bark_ggml_compute_forward_get_rows_f16(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_get_rows_f32(params, src0, src1, dst); + bark_ggml_compute_forward_get_rows_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } @@ -12771,93 +12769,93 @@ static void ggml_compute_forward_get_rows( //} } -// ggml_compute_forward_get_rows_back +// bark_ggml_compute_forward_get_rows_back -static void ggml_compute_forward_get_rows_back_f32_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(params->ith == 0); - GGML_ASSERT(ggml_is_contiguous(dst)); +static void bark_ggml_compute_forward_get_rows_back_f32_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(params->ith == 0); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(dst)); - // ggml_compute_forward_dup_same_cont(params, opt0, dst); + // bark_ggml_compute_forward_dup_same_cont(params, opt0, dst); - if (params->type == GGML_TASK_INIT) { - memset(dst->data, 0, ggml_nbytes(dst)); + if (params->type == BARK_GGML_TASK_INIT) { + memset(dst->data, 0, bark_ggml_nbytes(dst)); } - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); + const int nr = bark_ggml_nelements(src1); - GGML_ASSERT( dst->ne[0] == nc); - GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t)); + BARK_GGML_ASSERT( dst->ne[0] == nc); + BARK_GGML_ASSERT(src0->nb[0] == sizeof(bark_ggml_fp16_t)); for (int i = 0; i < nr; ++i) { const int r = ((int32_t *) src1->data)[i]; for (int j = 0; j < nc; ++j) { - ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j]; - ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v); + bark_ggml_fp16_t v = ((bark_ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j]; + ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += BARK_GGML_FP16_TO_FP32(v); } } } -static void ggml_compute_forward_get_rows_back_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(params->ith == 0); - GGML_ASSERT(ggml_is_contiguous(dst)); +static void bark_ggml_compute_forward_get_rows_back_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(params->ith == 0); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(dst)); - // ggml_compute_forward_dup_same_cont(params, opt0, dst); + // bark_ggml_compute_forward_dup_same_cont(params, opt0, dst); - if (params->type == GGML_TASK_INIT) { - memset(dst->data, 0, ggml_nbytes(dst)); + if (params->type == BARK_GGML_TASK_INIT) { + memset(dst->data, 0, bark_ggml_nbytes(dst)); } - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); + const int nr = bark_ggml_nelements(src1); - GGML_ASSERT( dst->ne[0] == nc); - GGML_ASSERT(src0->nb[0] == sizeof(float)); + BARK_GGML_ASSERT( dst->ne[0] == nc); + BARK_GGML_ASSERT(src0->nb[0] == sizeof(float)); for (int i = 0; i < nr; ++i) { const int r = ((int32_t *) src1->data)[i]; - ggml_vec_add_f32(nc, + bark_ggml_vec_add_f32(nc, (float *) ((char *) dst->data + r*dst->nb[1]), (float *) ((char *) dst->data + r*dst->nb[1]), (float *) ((char *) src0->data + i*src0->nb[1])); } } -static void ggml_compute_forward_get_rows_back( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_get_rows_back( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, dst); + bark_ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_get_rows_back_f32(params, src0, src1, dst); + bark_ggml_compute_forward_get_rows_back_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } @@ -12880,30 +12878,30 @@ static void ggml_compute_forward_get_rows_back( //} } -// ggml_compute_forward_diag +// bark_ggml_compute_forward_diag -static void ggml_compute_forward_diag_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(params->ith == 0); +static void bark_ggml_compute_forward_diag_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } // TODO: handle transposed/permuted matrices - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS - GGML_ASSERT(ne00 == ne0); - GGML_ASSERT(ne00 == ne1); - GGML_ASSERT(ne01 == 1); - GGML_ASSERT(ne02 == ne2); - GGML_ASSERT(ne03 == ne3); + BARK_GGML_ASSERT(ne00 == ne0); + BARK_GGML_ASSERT(ne00 == ne1); + BARK_GGML_ASSERT(ne01 == 1); + BARK_GGML_ASSERT(ne02 == ne2); + BARK_GGML_ASSERT(ne03 == ne3); - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT(nb0 == sizeof(float)); for (int i3 = 0; i3 < ne3; i3++) { for (int i2 = 0; i2 < ne2; i2++) { @@ -12922,28 +12920,28 @@ static void ggml_compute_forward_diag_f32( } } -static void ggml_compute_forward_diag( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_diag( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_diag_f32(params, src0, dst); + bark_ggml_compute_forward_diag_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_diag_mask_inf +// bark_ggml_compute_forward_diag_mask_inf -static void ggml_compute_forward_diag_mask_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst, +static void bark_ggml_compute_forward_diag_mask_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst, const float value) { const int ith = params->ith; @@ -12952,32 +12950,32 @@ static void ggml_compute_forward_diag_mask_f32( const int n_past = ((int32_t *) dst->op_params)[0]; const bool inplace = src0->data == dst->data; - GGML_ASSERT(n_past >= 0); + BARK_GGML_ASSERT(n_past >= 0); - if (!inplace && (params->type == GGML_TASK_INIT)) { + if (!inplace && (params->type == BARK_GGML_TASK_INIT)) { // memcpy needs to be synchronized across threads to avoid race conditions. // => do it in INIT phase - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + BARK_GGML_ASSERT(bark_ggml_nelements(dst) == bark_ggml_nelements(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(dst) && bark_ggml_is_contiguous(src0)); memcpy( ((char *) dst->data), ((char *) src0->data), - ggml_nbytes(dst)); + bark_ggml_nbytes(dst)); } - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } // TODO: handle transposed/permuted matrices - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; const int nr = src0->ne[1]; const int nz = n/nr; - GGML_ASSERT( dst->nb[0] == sizeof(float)); - GGML_ASSERT(src0->nb[0] == sizeof(float)); + BARK_GGML_ASSERT( dst->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(src0->nb[0] == sizeof(float)); for (int k = 0; k < nz; k++) { for (int j = ith; j < nr; j += nth) { @@ -12990,49 +12988,49 @@ static void ggml_compute_forward_diag_mask_f32( } } -static void ggml_compute_forward_diag_mask_inf( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_diag_mask_inf( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_diag_mask_f32(params, src0, dst, -INFINITY); + bark_ggml_compute_forward_diag_mask_f32(params, src0, dst, -INFINITY); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -static void ggml_compute_forward_diag_mask_zero( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_diag_mask_zero( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_diag_mask_f32(params, src0, dst, 0); + bark_ggml_compute_forward_diag_mask_f32(params, src0, dst, 0); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_soft_max +// bark_ggml_compute_forward_soft_max -static void ggml_compute_forward_soft_max_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_soft_max_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(dst)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -13042,7 +13040,7 @@ static void ggml_compute_forward_soft_max_f32( const int nth = params->nth; const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -13063,9 +13061,9 @@ static void ggml_compute_forward_soft_max_f32( #endif float max = -INFINITY; - ggml_vec_max_f32(nc, &max, sp); + bark_ggml_vec_max_f32(nc, &max, sp); - ggml_float sum = 0.0; + bark_ggml_float sum = 0.0; uint16_t scvt; for (int i = 0; i < nc; i++) { @@ -13073,10 +13071,10 @@ static void ggml_compute_forward_soft_max_f32( dp[i] = 0.0f; } else { // const float val = (sp[i] == -INFINITY) ? 0.0 : exp(sp[i] - max); - ggml_fp16_t s = GGML_FP32_TO_FP16(sp[i] - max); + bark_ggml_fp16_t s = BARK_GGML_FP32_TO_FP16(sp[i] - max); memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]); - sum += (ggml_float)val; + const float val = BARK_GGML_FP16_TO_FP32(table_exp_f16[scvt]); + sum += (bark_ggml_float)val; dp[i] = val; } } @@ -13084,7 +13082,7 @@ static void ggml_compute_forward_soft_max_f32( assert(sum > 0.0); sum = 1.0/sum; - ggml_vec_scale_f32(nc, dp, sum); + bark_ggml_vec_scale_f32(nc, dp, sum); #ifndef NDEBUG for (int i = 0; i < nc; ++i) { @@ -13095,36 +13093,36 @@ static void ggml_compute_forward_soft_max_f32( } } -static void ggml_compute_forward_soft_max( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_soft_max( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_soft_max_f32(params, src0, dst); + bark_ggml_compute_forward_soft_max_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_soft_max_back +// bark_ggml_compute_forward_soft_max_back -static void ggml_compute_forward_soft_max_back_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(src1)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_are_same_shape(src1, dst)); +static void bark_ggml_compute_forward_soft_max_back_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src1)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(dst)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src1, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -13134,7 +13132,7 @@ static void ggml_compute_forward_soft_max_back_f32( const int nth = params->nth; const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -13176,10 +13174,10 @@ static void ggml_compute_forward_soft_max_back_f32( // linear runtime, no additional memory float dot_y_dy = 0; - ggml_vec_dot_f32 (nc, &dot_y_dy, y, dy); - ggml_vec_cpy_f32 (nc, dx, dy); - ggml_vec_acc1_f32(nc, dx, -dot_y_dy); - ggml_vec_mul_f32 (nc, dx, dx, y); + bark_ggml_vec_dot_f32 (nc, &dot_y_dy, y, dy); + bark_ggml_vec_cpy_f32 (nc, dx, dy); + bark_ggml_vec_acc1_f32(nc, dx, -dot_y_dy); + bark_ggml_vec_mul_f32 (nc, dx, dx, y); #ifndef NDEBUG for (int i = 0; i < nc; ++i) { @@ -13190,32 +13188,32 @@ static void ggml_compute_forward_soft_max_back_f32( } } -static void ggml_compute_forward_soft_max_back( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_soft_max_back( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_soft_max_back_f32(params, src0, src1, dst); + bark_ggml_compute_forward_soft_max_back_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_alibi +// bark_ggml_compute_forward_alibi -static void ggml_compute_forward_alibi_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_alibi_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -13231,7 +13229,7 @@ static void ggml_compute_forward_alibi_f32( const int ne2 = src0->ne[2]; // n_head -> this is k //const int ne3 = src0->ne[3]; // 1 -> bsz - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int ne2_ne3 = n/ne1; // ne2*ne3 const int nb0 = src0->nb[0]; @@ -13239,8 +13237,8 @@ static void ggml_compute_forward_alibi_f32( const int nb2 = src0->nb[2]; //const int nb3 = src0->nb[3]; - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(n_head == ne2); + BARK_GGML_ASSERT(nb0 == sizeof(float)); + BARK_GGML_ASSERT(n_head == ne2); // add alibi to src0 (KQ_scaled) const int n_heads_log2_floor = 1 << (int) floor(log2(n_head)); @@ -13271,13 +13269,13 @@ static void ggml_compute_forward_alibi_f32( } } -static void ggml_compute_forward_alibi_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_alibi_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -13291,7 +13289,7 @@ static void ggml_compute_forward_alibi_f16( const int ne2 = src0->ne[2]; // n_head -> this is k //const int ne3 = src0->ne[3]; // 1 -> bsz - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int ne2_ne3 = n/ne1; // ne2*ne3 const int nb0 = src0->nb[0]; @@ -13299,9 +13297,9 @@ static void ggml_compute_forward_alibi_f16( const int nb2 = src0->nb[2]; //const int nb3 = src0->nb[3]; - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - //GGML_ASSERT(ne1 + n_past == ne0); (void) n_past; - GGML_ASSERT(n_head == ne2); + BARK_GGML_ASSERT(nb0 == sizeof(bark_ggml_fp16_t)); + //BARK_GGML_ASSERT(ne1 + n_past == ne0); (void) n_past; + BARK_GGML_ASSERT(n_head == ne2); // add alibi to src0 (KQ_scaled) const int n_heads_log2_floor = 1 << (int) floor(log2(n_head)); @@ -13312,7 +13310,7 @@ static void ggml_compute_forward_alibi_f16( for (int i = 0; i < ne0; i++) { for (int j = 0; j < ne1; j++) { for (int k = 0; k < ne2_ne3; k++) { - ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2); + bark_ggml_fp16_t * const src = (bark_ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2); float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2); // TODO: k*nb2 or k*nb3 @@ -13326,56 +13324,56 @@ static void ggml_compute_forward_alibi_f16( } // we return F32 - pdst[0] = i * m_k + GGML_FP16_TO_FP32(src[0]); + pdst[0] = i * m_k + BARK_GGML_FP16_TO_FP32(src[0]); } } } } -static void ggml_compute_forward_alibi( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_alibi( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_alibi_f16(params, src0, dst); + bark_ggml_compute_forward_alibi_f16(params, src0, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_alibi_f32(params, src0, dst); + bark_ggml_compute_forward_alibi_f32(params, src0, dst); } break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_Q8_K: - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_COUNT: + case BARK_GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q8_1: + case BARK_GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q8_K: + case BARK_GGML_TYPE_I8: + case BARK_GGML_TYPE_I16: + case BARK_GGML_TYPE_I32: + case BARK_GGML_TYPE_COUNT: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_clamp +// bark_ggml_compute_forward_clamp -static void ggml_compute_forward_clamp_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_clamp_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -13387,7 +13385,7 @@ static void ggml_compute_forward_clamp_f32( const int ith = params->ith; const int nth = params->nth; - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; const size_t nb00 = src0->nb[0]; @@ -13396,8 +13394,8 @@ static void ggml_compute_forward_clamp_f32( const size_t nb0 = dst->nb[0]; const size_t nb1 = dst->nb[1]; - GGML_ASSERT( nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT( nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); for (int j = ith; j < n; j += nth) { float * dst_ptr = (float *) ((char *) dst->data + j*nb1); @@ -13409,46 +13407,46 @@ static void ggml_compute_forward_clamp_f32( } } -static void ggml_compute_forward_clamp( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_clamp( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_clamp_f32(params, src0, dst); + bark_ggml_compute_forward_clamp_f32(params, src0, dst); } break; - case GGML_TYPE_F16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_Q8_K: - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_COUNT: + case BARK_GGML_TYPE_F16: + case BARK_GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q8_1: + case BARK_GGML_TYPE_Q2_K: + case BARK_GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q8_K: + case BARK_GGML_TYPE_I8: + case BARK_GGML_TYPE_I16: + case BARK_GGML_TYPE_I32: + case BARK_GGML_TYPE_COUNT: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_rope +// bark_ggml_compute_forward_rope -static void ggml_compute_forward_rope_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { +static void bark_ggml_compute_forward_rope_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -13468,20 +13466,20 @@ static void ggml_compute_forward_rope_f32( memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float)); memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool)); - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); //printf("n_past = %d, ne2 = %d\n", n_past, ne2); - GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(dst); + const int nr = bark_ggml_nrows(dst); - GGML_ASSERT(n_dims <= ne0); - GGML_ASSERT(n_dims % 2 == 0); + BARK_GGML_ASSERT(n_dims <= ne0); + BARK_GGML_ASSERT(n_dims % 2 == 0); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -13581,12 +13579,12 @@ static void ggml_compute_forward_rope_f32( } } -static void ggml_compute_forward_rope_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { +static void bark_ggml_compute_forward_rope_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -13600,20 +13598,20 @@ static void ggml_compute_forward_rope_f16( memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float)); memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float)); - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); //printf("n_past = %d, ne2 = %d\n", n_past, ne2); - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + BARK_GGML_ASSERT(nb0 == sizeof(bark_ggml_fp16_t)); const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(dst); + const int nr = bark_ggml_nrows(dst); - GGML_ASSERT(n_dims <= ne0); - GGML_ASSERT(n_dims % 2 == 0); + BARK_GGML_ASSERT(n_dims <= ne0); + BARK_GGML_ASSERT(n_dims % 2 == 0); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -13653,18 +13651,18 @@ static void ggml_compute_forward_rope_f16( theta *= theta_scale; block_theta *= theta_scale; - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + const bark_ggml_fp16_t * const src = (bark_ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + bark_ggml_fp16_t * dst_data = (bark_ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]); - const float x2 = GGML_FP16_TO_FP32(src[n_dims]); - const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]); + const float x0 = BARK_GGML_FP16_TO_FP32(src[0]); + const float x1 = BARK_GGML_FP16_TO_FP32(src[n_dims/2]); + const float x2 = BARK_GGML_FP16_TO_FP32(src[n_dims]); + const float x3 = BARK_GGML_FP16_TO_FP32(src[n_dims/2*3]); - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); - dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta); - dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta); + dst_data[0] = BARK_GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[n_dims/2] = BARK_GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + dst_data[n_dims] = BARK_GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta); + dst_data[n_dims/2*3] = BARK_GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta); } } if (!is_neox) { for (int64_t i0 = 0; i0 < ne0; i0 += 2) { @@ -13673,14 +13671,14 @@ static void ggml_compute_forward_rope_f16( theta *= theta_scale; - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + const bark_ggml_fp16_t * const src = (bark_ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + bark_ggml_fp16_t * dst_data = (bark_ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[1]); + const float x0 = BARK_GGML_FP16_TO_FP32(src[0]); + const float x1 = BARK_GGML_FP16_TO_FP32(src[1]); - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + dst_data[0] = BARK_GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[1] = BARK_GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); } } else { // TODO: this might be wrong for ne0 != n_dims - need double check @@ -13694,14 +13692,14 @@ static void ggml_compute_forward_rope_f16( const int64_t i0 = ib*n_dims + ic/2; - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + const bark_ggml_fp16_t * const src = (bark_ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + bark_ggml_fp16_t * dst_data = (bark_ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]); + const float x0 = BARK_GGML_FP16_TO_FP32(src[0]); + const float x1 = BARK_GGML_FP16_TO_FP32(src[n_dims/2]); - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + dst_data[0] = BARK_GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[n_dims/2] = BARK_GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); } } } @@ -13710,36 +13708,36 @@ static void ggml_compute_forward_rope_f16( } } -static void ggml_compute_forward_rope( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_rope( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_rope_f16(params, src0, src1, dst); + bark_ggml_compute_forward_rope_f16(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_rope_f32(params, src0, src1, dst); + bark_ggml_compute_forward_rope_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_rope_back +// bark_ggml_compute_forward_rope_back -static void ggml_compute_forward_rope_back_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_rope_back_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -13763,7 +13761,7 @@ static void ggml_compute_forward_rope_back_f32( memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float)); memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool)); - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); //printf("n_past = %d, ne2 = %d\n", n_past, ne2); @@ -13773,7 +13771,7 @@ static void ggml_compute_forward_rope_back_f32( const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(dst); + const int nr = bark_ggml_nrows(dst); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -13845,13 +13843,13 @@ static void ggml_compute_forward_rope_back_f32( } } -static void ggml_compute_forward_rope_back_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_rope_back_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -13863,17 +13861,17 @@ static void ggml_compute_forward_rope_back_f16( const int n_dims = ((int32_t *) dst->op_params)[1]; const int mode = ((int32_t *) dst->op_params)[2]; - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); //printf("n_past = %d, ne2 = %d\n", n_past, ne2); - assert(nb0 == sizeof(ggml_fp16_t)); + assert(nb0 == sizeof(bark_ggml_fp16_t)); const int ith = params->ith; const int nth = params->nth; - const int nr = ggml_nrows(dst); + const int nr = bark_ggml_nrows(dst); // rows per thread const int dr = (nr + nth - 1)/nth; @@ -13907,14 +13905,14 @@ static void ggml_compute_forward_rope_back_f16( theta *= theta_scale; - const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + const bark_ggml_fp16_t * const dy = (bark_ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + bark_ggml_fp16_t * dx = (bark_ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - const float dy0 = GGML_FP16_TO_FP32(dy[0]); - const float dy1 = GGML_FP16_TO_FP32(dy[1]); + const float dy0 = BARK_GGML_FP16_TO_FP32(dy[0]); + const float dy1 = BARK_GGML_FP16_TO_FP32(dy[1]); - dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta); - dx[1] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta); + dx[0] = BARK_GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta); + dx[1] = BARK_GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta); } } else { for (int64_t ib = 0; ib < ne0/n_dims; ++ib) { @@ -13926,14 +13924,14 @@ static void ggml_compute_forward_rope_back_f16( const int64_t i0 = ib*n_dims + ic/2; - const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + const bark_ggml_fp16_t * const dy = (bark_ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + bark_ggml_fp16_t * dx = (bark_ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - const float dy0 = GGML_FP16_TO_FP32(dy[0]); - const float dy1 = GGML_FP16_TO_FP32(dy[n_dims/2]); + const float dy0 = BARK_GGML_FP16_TO_FP32(dy[0]); + const float dy1 = BARK_GGML_FP16_TO_FP32(dy[n_dims/2]); - dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta); - dx[n_dims/2] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta); + dx[0] = BARK_GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta); + dx[n_dims/2] = BARK_GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta); } } } @@ -13942,42 +13940,42 @@ static void ggml_compute_forward_rope_back_f16( } } -static void ggml_compute_forward_rope_back( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_rope_back( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_rope_back_f16(params, src0, src1, dst); + bark_ggml_compute_forward_rope_back_f16(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_rope_back_f32(params, src0, src1, dst); + bark_ggml_compute_forward_rope_back_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_conv_1d +// bark_ggml_compute_forward_conv_1d -static void ggml_compute_forward_conv_1d_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); +static void bark_ggml_compute_forward_conv_1d_f16_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT( dst->type == BARK_GGML_TYPE_F32); - int64_t t0 = ggml_perf_time_us(); + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; @@ -13991,24 +13989,24 @@ static void ggml_compute_forward_conv_1d_f16_f32( const int32_t p0 = ((const int32_t*)(dst->op_params))[1]; const int32_t d0 = ((const int32_t*)(dst->op_params))[2]; - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { memset(params->wdata, 0, params->wsize); - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + bark_ggml_fp16_t * const wdata = (bark_ggml_fp16_t *) params->wdata + 0; for (int64_t i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i11*nb11); - ggml_fp16_t * dst_data = wdata; + bark_ggml_fp16_t * dst_data = wdata; for (int64_t i0 = 0; i0 < ne0; i0++) { for (int64_t ik = 0; ik < nk; ik++) { const int idx0 = i0*s0 + ik*d0 - p0; if(!(idx0 < 0 || idx0 >= ne10)) { - dst_data[i0*ew0 + i11*nk + ik] = GGML_FP32_TO_FP16(src[idx0]); + dst_data[i0*ew0 + i11*nk + ik] = BARK_GGML_FP32_TO_FP16(src[idx0]); } } } @@ -14017,7 +14015,7 @@ static void ggml_compute_forward_conv_1d_f16_f32( return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -14031,34 +14029,34 @@ static void ggml_compute_forward_conv_1d_f16_f32( const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + bark_ggml_fp16_t * const wdata = (bark_ggml_fp16_t *) params->wdata + 0; for (int i2 = 0; i2 < ne2; i2++) { for (int i1 = ir0; i1 < ir1; i1++) { float * dst_data = (float *)((char *) dst->data + i2*nb2 + i1*nb1); for (int i0 = 0; i0 < ne0; i0++) { - ggml_vec_dot_f16(ew0, dst_data + i0, - (ggml_fp16_t *) ((char *) src0->data + i1*nb02), - (ggml_fp16_t *) wdata + i2*nb2 + i0*ew0); + bark_ggml_vec_dot_f16(ew0, dst_data + i0, + (bark_ggml_fp16_t *) ((char *) src0->data + i1*nb02), + (bark_ggml_fp16_t *) wdata + i2*nb2 + i0*ew0); } } } } -static void ggml_compute_forward_conv_1d_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); +static void bark_ggml_compute_forward_conv_1d_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT( dst->type == BARK_GGML_TYPE_F32); - int64_t t0 = ggml_perf_time_us(); + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; @@ -14071,10 +14069,10 @@ static void ggml_compute_forward_conv_1d_f32( const int32_t p0 = ((const int32_t*)(dst->op_params))[1]; const int32_t d0 = ((const int32_t*)(dst->op_params))[2]; - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { memset(params->wdata, 0, params->wsize); float * const wdata = (float *) params->wdata + 0; @@ -14097,7 +14095,7 @@ static void ggml_compute_forward_conv_1d_f32( return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -14118,7 +14116,7 @@ static void ggml_compute_forward_conv_1d_f32( float * dst_data = (float *)((char *) dst->data + i2*nb2 + i1*nb1); for (int i0 = 0; i0 < ne0; i0++) { - ggml_vec_dot_f32(ew0, dst_data + i0, + bark_ggml_vec_dot_f32(ew0, dst_data + i0, (float *) ((char *) src0->data + i1*nb02), (float *) wdata + i2*nb2 + i0*ew0); } @@ -14126,10 +14124,10 @@ static void ggml_compute_forward_conv_1d_f32( } } -// TODO: reuse ggml_mul_mat or implement ggml_im2col and remove stage_0 and stage_1 +// TODO: reuse bark_ggml_mul_mat or implement bark_ggml_im2col and remove stage_0 and stage_1 static void gemm_f16_out_f32(int64_t m, int64_t n, int64_t k, - ggml_fp16_t * A, - ggml_fp16_t * B, + bark_ggml_fp16_t * A, + bark_ggml_fp16_t * B, float * C, const int ith, const int nth) { // does not seem to make a difference @@ -14168,7 +14166,7 @@ static void gemm_f16_out_f32(int64_t m, int64_t n, int64_t k, int64_t blck_m = 16; // int64_t CACHE_SIZE = 2 * 1024 * 1024; // 2MB - // int64_t blck_size = CACHE_SIZE / (sizeof(float) + 2 * sizeof(ggml_fp16_t) * K); + // int64_t blck_size = CACHE_SIZE / (sizeof(float) + 2 * sizeof(bark_ggml_fp16_t) * K); // if (blck_size > 0) { // blck_0 = 4; // blck_1 = blck_size / blck_0; @@ -14185,7 +14183,7 @@ static void gemm_f16_out_f32(int64_t m, int64_t n, int64_t k, // printf("i j k => %d %d %d\n", i, j, K); for (int ii = i; ii < i + blck_m && ii < m1; ii++) { for (int jj = j; jj < j + blck_n && jj < n1; jj++) { - ggml_vec_dot_f16(k, + bark_ggml_vec_dot_f16(k, C + ii*n + jj, A + ii * k, B + jj * k); @@ -14198,19 +14196,19 @@ static void gemm_f16_out_f32(int64_t m, int64_t n, int64_t k, // src0: kernel [OC, IC, K] // src1: signal [N, IC, IL] // dst: result [N, OL, IC*K] -static void ggml_compute_forward_conv_1d_stage_0_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F16); - - int64_t t0 = ggml_perf_time_us(); +static void bark_ggml_compute_forward_conv_1d_stage_0_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT( dst->type == BARK_GGML_TYPE_F16); + + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_BINARY_OP_LOCALS; + BARK_GGML_TENSOR_BINARY_OP_LOCALS; const int64_t N = ne12; const int64_t IC = ne11; @@ -14227,35 +14225,35 @@ static void ggml_compute_forward_conv_1d_stage_0_f32( const int32_t p0 = ((const int32_t*)(dst->op_params))[1]; const int32_t d0 = ((const int32_t*)(dst->op_params))[2]; - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); - if (params->type == GGML_TASK_INIT) { - memset(dst->data, 0, ggml_nbytes(dst)); + if (params->type == BARK_GGML_TASK_INIT) { + memset(dst->data, 0, bark_ggml_nbytes(dst)); return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } // im2col: [N, IC, IL] => [N, OL, IC*K] { - ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; + bark_ggml_fp16_t * const wdata = (bark_ggml_fp16_t *) dst->data; for (int64_t in = 0; in < N; in++) { for (int64_t iol = 0; iol < OL; iol++) { for (int64_t iic = ith; iic < IC; iic+=nth) { // micro kernel - ggml_fp16_t * dst_data = wdata + (in*OL + iol)*(IC*K); // [IC, K] + bark_ggml_fp16_t * dst_data = wdata + (in*OL + iol)*(IC*K); // [IC, K] const float * const src_data = (float *)((char *) src1->data + in*nb12 + iic*nb11); // [IL] for (int64_t ik = 0; ik < K; ik++) { const int64_t iil = iol*s0 + ik*d0 - p0; if (!(iil < 0 || iil >= IL)) { - dst_data[iic*K + ik] = GGML_FP32_TO_FP16(src_data[iil]); + dst_data[iic*K + ik] = BARK_GGML_FP32_TO_FP16(src_data[iil]); } } } @@ -14268,31 +14266,31 @@ static void ggml_compute_forward_conv_1d_stage_0_f32( // src0: [OC, IC, K] // src1: [N, OL, IC * K] // result: [N, OC, OL] -static void ggml_compute_forward_conv_1d_stage_1_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F16); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - int64_t t0 = ggml_perf_time_us(); +static void bark_ggml_compute_forward_conv_1d_stage_1_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT( dst->type == BARK_GGML_TYPE_F32); + + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_TENSOR_BINARY_OP_LOCALS; + BARK_GGML_TENSOR_BINARY_OP_LOCALS; - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb10 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb0 == sizeof(float)); const int N = ne12; const int OL = ne11; @@ -14310,109 +14308,109 @@ static void ggml_compute_forward_conv_1d_stage_1_f16( // [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K] for (int i = 0; i < N; i++) { - ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k] - ggml_fp16_t * B = (ggml_fp16_t *)src1->data + i * m * k; // [n, k] + bark_ggml_fp16_t * A = (bark_ggml_fp16_t *)src0->data; // [m, k] + bark_ggml_fp16_t * B = (bark_ggml_fp16_t *)src1->data + i * m * k; // [n, k] float * C = (float *)dst->data + i * m * n; // [m, n] gemm_f16_out_f32(m, n, k, A, B, C, ith, nth); } } -static void ggml_compute_forward_conv_1d( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_conv_1d( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch(src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_conv_1d_f16_f32(params, src0, src1, dst); + bark_ggml_compute_forward_conv_1d_f16_f32(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_conv_1d_f32(params, src0, src1, dst); + bark_ggml_compute_forward_conv_1d_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -static void ggml_compute_forward_conv_1d_stage_0( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_conv_1d_stage_0( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch(src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_conv_1d_stage_0_f32(params, src0, src1, dst); + bark_ggml_compute_forward_conv_1d_stage_0_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -static void ggml_compute_forward_conv_1d_stage_1( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_conv_1d_stage_1( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch(src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_conv_1d_stage_1_f16(params, src0, src1, dst); + bark_ggml_compute_forward_conv_1d_stage_1_f16(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_conv_transpose_1d +// bark_ggml_compute_forward_conv_transpose_1d -static void ggml_compute_forward_conv_transpose_1d_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); +static void bark_ggml_compute_forward_conv_transpose_1d_f16_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT( dst->type == BARK_GGML_TYPE_F32); - int64_t t0 = ggml_perf_time_us(); + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); const int64_t K = ne00; const int64_t OC = ne01; const int64_t IC = ne02; const int64_t L = ne10; - GGML_ASSERT(IC == ne11); + BARK_GGML_ASSERT(IC == ne11); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { memset(params->wdata, 0, params->wsize); // permute kernel data (src0) from [K, OC, IC] to [IC, K, OC] { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + bark_ggml_fp16_t * const wdata = (bark_ggml_fp16_t *) params->wdata + 0; for (int64_t ic = 0; ic < IC; ic++) { for (int64_t oc = 0; oc < OC; oc++) { - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + ic*nb02 + oc*nb01); - ggml_fp16_t * dst_data = wdata + oc*K*IC; + const bark_ggml_fp16_t * const src = (bark_ggml_fp16_t *)((char *) src0->data + ic*nb02 + oc*nb01); + bark_ggml_fp16_t * dst_data = wdata + oc*K*IC; for (int64_t k = 0; k < K; k++) { dst_data[k*IC + ic] = src[k]; } @@ -14422,24 +14420,24 @@ static void ggml_compute_forward_conv_transpose_1d_f16_f32( // permute source data (src1) from [L, IC] to [IC, L] { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + K*OC*IC; - ggml_fp16_t * dst_data = wdata; + bark_ggml_fp16_t * const wdata = (bark_ggml_fp16_t *) params->wdata + K*OC*IC; + bark_ggml_fp16_t * dst_data = wdata; for (int64_t ic = 0; ic < IC; ic++) { const float * const src = (float *)((char *) src1->data + ic*nb11); for (int64_t l = 0; l < L; l++) { - dst_data[l*IC + ic] = GGML_FP32_TO_FP16(src[l]); + dst_data[l*IC + ic] = BARK_GGML_FP32_TO_FP16(src[l]); } } } // need to zero dst since we are accumulating into it - memset(dst->data, 0, ggml_nbytes(dst)); + memset(dst->data, 0, bark_ggml_nbytes(dst)); return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -14455,14 +14453,14 @@ static void ggml_compute_forward_conv_transpose_1d_f16_f32( const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + bark_ggml_fp16_t * const wdata = (bark_ggml_fp16_t *) params->wdata + 0; for (int oc = ir0; oc < ir1; oc++) { float * dst_data = (float *)((char *) dst->data + oc*nb1); for (int l = 0; l < L; l++) { for (int k = 0; k < K; k++) { float v = 0; - ggml_vec_dot_f16(IC, &v, + bark_ggml_vec_dot_f16(IC, &v, wdata + oc*K*IC + k*IC, wdata + IC*K*OC + l*IC); dst_data[l*s0 + k] += v; @@ -14471,34 +14469,34 @@ static void ggml_compute_forward_conv_transpose_1d_f16_f32( } } -static void ggml_compute_forward_conv_transpose_1d_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); +static void bark_ggml_compute_forward_conv_transpose_1d_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT( dst->type == BARK_GGML_TYPE_F32); - int64_t t0 = ggml_perf_time_us(); + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(float)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); const int64_t K = ne00; const int64_t OC = ne01; const int64_t IC = ne02; const int64_t L = ne10; - GGML_ASSERT(IC == ne11); + BARK_GGML_ASSERT(IC == ne11); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { memset(params->wdata, 0, params->wsize); // reshape kernel data (src0) from [K, OC, IC] to [IC, K, OC] @@ -14532,7 +14530,7 @@ static void ggml_compute_forward_conv_transpose_1d_f32( return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -14555,7 +14553,7 @@ static void ggml_compute_forward_conv_transpose_1d_f32( for (int l = 0; l < L; l++) { for (int k = 0; k < K; k++) { float v = 0; - ggml_vec_dot_f32(IC, &v, + bark_ggml_vec_dot_f32(IC, &v, wdata + oc*K*IC + k*IC, wdata + IC*K*OC + l*IC); dst_data[l*s0 + k] += v; @@ -14564,43 +14562,43 @@ static void ggml_compute_forward_conv_transpose_1d_f32( } } -static void ggml_compute_forward_conv_transpose_1d( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_conv_transpose_1d( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_conv_transpose_1d_f16_f32(params, src0, src1, dst); + bark_ggml_compute_forward_conv_transpose_1d_f16_f32(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_conv_transpose_1d_f32(params, src0, src1, dst); + bark_ggml_compute_forward_conv_transpose_1d_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_pad_reflec_1d +// bark_ggml_compute_forward_pad_reflec_1d -static void ggml_compute_forward_pad_reflec_1d( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); +static void bark_ggml_compute_forward_pad_reflec_1d( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT( dst->type == BARK_GGML_TYPE_F32); const int32_t * opts = (const int32_t *) dst->op_params; const int p0 = opts[0]; const int p1 = opts[1]; - GGML_ASSERT(p0 >= 0); - GGML_ASSERT(p1 >= 0); + BARK_GGML_ASSERT(p0 >= 0); + BARK_GGML_ASSERT(p1 >= 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -14618,31 +14616,31 @@ static void ggml_compute_forward_pad_reflec_1d( float * left = (float *) ((char *) dst->data + i1*nb1 + p0*nb0); float * right = (float *) ((char *) dst->data + i1*nb1 + (ne0-p1-1)*nb0); - ggml_vec_cpy_f32(ne00, left, (float *) ((char *) src0->data + i1*nb01)); + bark_ggml_vec_cpy_f32(ne00, left, (float *) ((char *) src0->data + i1*nb01)); for (int i0 = 1; i0 <= p0; i0++) { left[-i0] = left[i0]; } for (int i0 = 1; i0 <= p1; i0++) { right[i0] = right[-i0]; } } } -// ggml_compute_forward_conv_2d +// bark_ggml_compute_forward_conv_2d // src0: kernel [OC, IC, KH, KW] // src1: image [N, IC, IH, IW] // dst: result [N, OH, OW, IC*KH*KW] -static void ggml_compute_forward_conv_2d_stage_0_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F16); - - int64_t t0 = ggml_perf_time_us(); +static void bark_ggml_compute_forward_conv_2d_stage_0_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT( dst->type == BARK_GGML_TYPE_F16); + + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_BINARY_OP_LOCALS; + BARK_GGML_TENSOR_BINARY_OP_LOCALS; const int64_t N = ne13; const int64_t IC = ne12; @@ -14667,21 +14665,21 @@ static void ggml_compute_forward_conv_2d_stage_0_f32( const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); - if (params->type == GGML_TASK_INIT) { - memset(dst->data, 0, ggml_nbytes(dst)); + if (params->type == BARK_GGML_TASK_INIT) { + memset(dst->data, 0, bark_ggml_nbytes(dst)); return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] { - ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; + bark_ggml_fp16_t * const wdata = (bark_ggml_fp16_t *) dst->data; for (int64_t in = 0; in < N; in++) { for (int64_t ioh = 0; ioh < OH; ioh++) { @@ -14689,7 +14687,7 @@ static void ggml_compute_forward_conv_2d_stage_0_f32( for (int64_t iic = ith; iic < IC; iic+=nth) { // micro kernel - ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW] + bark_ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW] const float * const src_data = (float *)((char *) src1->data + in*nb13 + iic*nb12); // [IH, IW] for (int64_t ikh = 0; ikh < KH; ikh++) { @@ -14698,7 +14696,7 @@ static void ggml_compute_forward_conv_2d_stage_0_f32( const int64_t iih = ioh*s1 + ikh*d1 - p1; if (!(iih < 0 || iih >= IH || iiw < 0 || iiw >= IW)) { - dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]); + dst_data[iic*(KH*KW) + ikh*KW + ikw] = BARK_GGML_FP32_TO_FP16(src_data[iih*IW + iiw]); } } } @@ -14713,31 +14711,31 @@ static void ggml_compute_forward_conv_2d_stage_0_f32( // src0: [OC, IC, KH, KW] // src1: [N, OH, OW, IC * KH * KW] // result: [N, OC, OH, OW] -static void ggml_compute_forward_conv_2d_stage_1_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F16); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - int64_t t0 = ggml_perf_time_us(); +static void bark_ggml_compute_forward_conv_2d_stage_1_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT( dst->type == BARK_GGML_TYPE_F32); + + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_TENSOR_BINARY_OP_LOCALS; + BARK_GGML_TENSOR_BINARY_OP_LOCALS; - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb10 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb0 == sizeof(float)); const int N = ne13; const int OH = ne12; @@ -14757,27 +14755,27 @@ static void ggml_compute_forward_conv_2d_stage_1_f16( // [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW] for (int i = 0; i < N; i++) { - ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k] - ggml_fp16_t * B = (ggml_fp16_t *)src1->data + i * m * k; // [n, k] + bark_ggml_fp16_t * A = (bark_ggml_fp16_t *)src0->data; // [m, k] + bark_ggml_fp16_t * B = (bark_ggml_fp16_t *)src1->data + i * m * k; // [n, k] float * C = (float *)dst->data + i * m * n; // [m, n] gemm_f16_out_f32(m, n, k, A, B, C, ith, nth); } } -static void ggml_compute_forward_conv_2d_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); +static void bark_ggml_compute_forward_conv_2d_f16_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT( dst->type == BARK_GGML_TYPE_F32); - int64_t t0 = ggml_perf_time_us(); + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS // src1: image [N, IC, IH, IW] // src0: kernel [OC, IC, KH, KW] @@ -14819,17 +14817,17 @@ static void ggml_compute_forward_conv_2d_f16_f32( const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { memset(params->wdata, 0, params->wsize); // prepare source data (src1) // im2col: [N, IC, IH, IW] => [N*OH*OW, IC*KH*KW] { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + bark_ggml_fp16_t * const wdata = (bark_ggml_fp16_t *) params->wdata + 0; for (int in = 0; in < N; in++) { for (int iic = 0; iic < IC; iic++) { @@ -14837,7 +14835,7 @@ static void ggml_compute_forward_conv_2d_f16_f32( for (int iow = 0; iow < OW; iow++) { // micro kernel - ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW] + bark_ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW] const float * const src_data = (float *)((char *) src1->data + in*nb13 + iic*nb12); // [IH, IW] for (int ikh = 0; ikh < KH; ikh++) { @@ -14846,7 +14844,7 @@ static void ggml_compute_forward_conv_2d_f16_f32( const int iih = ioh*s1 + ikh*d1 - p1; if (!(iih < 0 || iih >= IH || iiw < 0 || iiw >= IW)) { - dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]); + dst_data[iic*(KH*KW) + ikh*KW + ikw] = BARK_GGML_FP32_TO_FP16(src_data[iih*IW + iiw]); } } } @@ -14859,11 +14857,11 @@ static void ggml_compute_forward_conv_2d_f16_f32( return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + bark_ggml_fp16_t * const wdata = (bark_ggml_fp16_t *) params->wdata + 0; // wdata: [N*OH*OW, IC*KH*KW] // dst: result [N, OC, OH, OW] // src0: kernel [OC, IC, KH, KW] @@ -14874,113 +14872,113 @@ static void ggml_compute_forward_conv_2d_f16_f32( // [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW] for (int i = 0; i < N; i++) { - ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k] - ggml_fp16_t * B = (ggml_fp16_t *)wdata + i * m * k; // [n, k] + bark_ggml_fp16_t * A = (bark_ggml_fp16_t *)src0->data; // [m, k] + bark_ggml_fp16_t * B = (bark_ggml_fp16_t *)wdata + i * m * k; // [n, k] float * C = (float *)dst->data + i * m * n; // [m * k] gemm_f16_out_f32(m, n, k, A, B, C, ith, nth); } } -static void ggml_compute_forward_conv_2d( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_conv_2d( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_conv_2d_f16_f32(params, src0, src1, dst); + bark_ggml_compute_forward_conv_2d_f16_f32(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - //ggml_compute_forward_conv_2d_f32(params, src0, src1, dst); - GGML_ASSERT(false); + //bark_ggml_compute_forward_conv_2d_f32(params, src0, src1, dst); + BARK_GGML_ASSERT(false); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -static void ggml_compute_forward_conv_2d_stage_0( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_conv_2d_stage_0( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_conv_2d_stage_0_f32(params, src0, src1, dst); + bark_ggml_compute_forward_conv_2d_stage_0_f32(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -static void ggml_compute_forward_conv_2d_stage_1( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_conv_2d_stage_1( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_conv_2d_stage_1_f16(params, src0, src1, dst); + bark_ggml_compute_forward_conv_2d_stage_1_f16(params, src0, src1, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_conv_transpose_2d +// bark_ggml_compute_forward_conv_transpose_2d -static void ggml_compute_forward_conv_transpose_2d( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); +static void bark_ggml_compute_forward_conv_transpose_2d( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(src0->type == BARK_GGML_TYPE_F16); + BARK_GGML_ASSERT(src1->type == BARK_GGML_TYPE_F32); + BARK_GGML_ASSERT( dst->type == BARK_GGML_TYPE_F32); - int64_t t0 = ggml_perf_time_us(); + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_BINARY_OP_LOCALS + BARK_GGML_TENSOR_BINARY_OP_LOCALS const int ith = params->ith; const int nth = params->nth; const int nk = ne00*ne01*ne02*ne03; - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); + BARK_GGML_ASSERT(nb00 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nb10 == sizeof(float)); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { memset(params->wdata, 0, params->wsize); // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout) { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + bark_ggml_fp16_t * const wdata = (bark_ggml_fp16_t *) params->wdata + 0; for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02); - ggml_fp16_t * dst_data = wdata + i02*ne01*ne00*ne03; + const bark_ggml_fp16_t * const src = (bark_ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02); + bark_ggml_fp16_t * dst_data = wdata + i02*ne01*ne00*ne03; for (int64_t i01 = 0; i01 < ne01; i01++) { for (int64_t i00 = 0; i00 < ne00; i00++) { dst_data[i01*ne00*ne03 + i00*ne03 + i03] = src[i01 * ne00 + i00]; @@ -14992,13 +14990,13 @@ static void ggml_compute_forward_conv_transpose_2d( // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh) { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; + bark_ggml_fp16_t * const wdata = (bark_ggml_fp16_t *) params->wdata + nk; for (int i12 = 0; i12 < ne12; i12++) { for (int i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11); - ggml_fp16_t * dst_data = wdata + i11*ne10*ne12; + bark_ggml_fp16_t * dst_data = wdata + i11*ne10*ne12; for (int i10 = 0; i10 < ne10; i10++) { - dst_data[i10*ne12 + i12] = GGML_FP32_TO_FP16(src[i10]); + dst_data[i10*ne12 + i12] = BARK_GGML_FP32_TO_FP16(src[i10]); } } } @@ -15007,11 +15005,11 @@ static void ggml_compute_forward_conv_transpose_2d( return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int32_t stride = ggml_get_op_params_i32(dst, 0); + const int32_t stride = bark_ggml_get_op_params_i32(dst, 0); // total patches in dst const int np = ne2; @@ -15023,19 +15021,19 @@ static void ggml_compute_forward_conv_transpose_2d( const int ip0 = dp*ith; const int ip1 = MIN(ip0 + dp, np); - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - ggml_fp16_t * const wdata_src = wdata + nk; + bark_ggml_fp16_t * const wdata = (bark_ggml_fp16_t *) params->wdata + 0; + bark_ggml_fp16_t * const wdata_src = wdata + nk; for (int i2 = ip0; i2 < ip1; i2++) { // Cout float * dst_data = (float *)((char *) dst->data + i2*nb2); - ggml_fp16_t * wdata_kernel = wdata + i2*ne01*ne00*ne03; + bark_ggml_fp16_t * wdata_kernel = wdata + i2*ne01*ne00*ne03; for (int i11 = 0; i11 < ne11; i11++) { for (int i10 = 0; i10 < ne10; i10++) { const int i1n = i11*ne10*ne12 + i10*ne12; for (int i01 = 0; i01 < ne01; i01++) { for (int i00 = 0; i00 < ne00; i00++) { float v = 0; - ggml_vec_dot_f16(ne03, &v, + bark_ggml_vec_dot_f16(ne03, &v, wdata_src + i1n, wdata_kernel + i01*ne00*ne03 + i00*ne03); dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v; @@ -15046,23 +15044,23 @@ static void ggml_compute_forward_conv_transpose_2d( } } -// ggml_compute_forward_pool_1d_sk_p0 +// bark_ggml_compute_forward_pool_1d_sk_p0 -static void ggml_compute_forward_pool_1d_sk_p0( - const struct ggml_compute_params * params, - const enum ggml_op_pool op, - const struct ggml_tensor * src, +static void bark_ggml_compute_forward_pool_1d_sk_p0( + const struct bark_ggml_compute_params * params, + const enum bark_ggml_op_pool op, + const struct bark_ggml_tensor * src, const int k, - struct ggml_tensor * dst) { - assert(src->type == GGML_TYPE_F32); + struct bark_ggml_tensor * dst) { + assert(src->type == BARK_GGML_TYPE_F32); assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const char * cdata = (const char *)src->data; - const char * const data_end = cdata + ggml_nbytes(src); + const char * const data_end = cdata + bark_ggml_nbytes(src); float * drow = (float *)dst->data; const int64_t rs = dst->ne[0]; @@ -15074,22 +15072,22 @@ static void ggml_compute_forward_pool_1d_sk_p0( for (int64_t i = 0; i < rs; ++i) { switch (op) { - case GGML_OP_POOL_AVG: drow[i] = 0; break; - case GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break; - case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break; + case BARK_GGML_OP_POOL_AVG: drow[i] = 0; break; + case BARK_GGML_OP_POOL_MAX: drow[i] = -FLT_MAX; break; + case BARK_GGML_OP_POOL_COUNT: BARK_GGML_ASSERT(false); break; } for (int ki = 0; ki < k; ++ki) { switch (op) { - case GGML_OP_POOL_AVG: drow[i] += srow[j]; break; - case GGML_OP_POOL_MAX: if (srow[j] > drow[i]) drow[i] = srow[j]; break; - case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break; + case BARK_GGML_OP_POOL_AVG: drow[i] += srow[j]; break; + case BARK_GGML_OP_POOL_MAX: if (srow[j] > drow[i]) drow[i] = srow[j]; break; + case BARK_GGML_OP_POOL_COUNT: BARK_GGML_ASSERT(false); break; } ++j; } switch (op) { - case GGML_OP_POOL_AVG: drow[i] /= k; break; - case GGML_OP_POOL_MAX: break; - case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break; + case BARK_GGML_OP_POOL_AVG: drow[i] /= k; break; + case BARK_GGML_OP_POOL_MAX: break; + case BARK_GGML_OP_POOL_COUNT: BARK_GGML_ASSERT(false); break; } } @@ -15098,42 +15096,42 @@ static void ggml_compute_forward_pool_1d_sk_p0( } } -// ggml_compute_forward_pool_1d +// bark_ggml_compute_forward_pool_1d -static void ggml_compute_forward_pool_1d( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_pool_1d( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { const int32_t * opts = (const int32_t *)dst->op_params; - enum ggml_op_pool op = opts[0]; + enum bark_ggml_op_pool op = opts[0]; const int k0 = opts[1]; const int s0 = opts[2]; const int p0 = opts[3]; - GGML_ASSERT(p0 == 0); // padding not supported - GGML_ASSERT(k0 == s0); // only s = k supported + BARK_GGML_ASSERT(p0 == 0); // padding not supported + BARK_GGML_ASSERT(k0 == s0); // only s = k supported - ggml_compute_forward_pool_1d_sk_p0(params, op, src0, k0, dst); + bark_ggml_compute_forward_pool_1d_sk_p0(params, op, src0, k0, dst); } -// ggml_compute_forward_pool_2d_sk_p0 +// bark_ggml_compute_forward_pool_2d_sk_p0 -static void ggml_compute_forward_pool_2d_sk_p0( - const struct ggml_compute_params * params, - const enum ggml_op_pool op, - const struct ggml_tensor * src, +static void bark_ggml_compute_forward_pool_2d_sk_p0( + const struct bark_ggml_compute_params * params, + const enum bark_ggml_op_pool op, + const struct bark_ggml_tensor * src, const int k0, const int k1, - struct ggml_tensor * dst) { - assert(src->type == GGML_TYPE_F32); + struct bark_ggml_tensor * dst) { + assert(src->type == BARK_GGML_TYPE_F32); assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } const char * cdata = (const char*)src->data; - const char * const data_end = cdata + ggml_nbytes(src); + const char * const data_end = cdata + bark_ggml_nbytes(src); const int64_t px = dst->ne[0]; const int64_t py = dst->ne[1]; @@ -15149,9 +15147,9 @@ static void ggml_compute_forward_pool_2d_sk_p0( for (int ox = 0; ox < px; ++ox) { float * const out = drow + ox; switch (op) { - case GGML_OP_POOL_AVG: *out = 0; break; - case GGML_OP_POOL_MAX: *out = -FLT_MAX; break; - case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break; + case BARK_GGML_OP_POOL_AVG: *out = 0; break; + case BARK_GGML_OP_POOL_MAX: *out = -FLT_MAX; break; + case BARK_GGML_OP_POOL_COUNT: BARK_GGML_ASSERT(false); break; } const int ix = ox * k0; @@ -15162,16 +15160,16 @@ static void ggml_compute_forward_pool_2d_sk_p0( for (int kx = 0; kx < k0; ++kx) { int j = ix + kx; switch (op) { - case GGML_OP_POOL_AVG: *out += srow[j]; break; - case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break; - case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break; + case BARK_GGML_OP_POOL_AVG: *out += srow[j]; break; + case BARK_GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break; + case BARK_GGML_OP_POOL_COUNT: BARK_GGML_ASSERT(false); break; } } } switch (op) { - case GGML_OP_POOL_AVG: *out /= ka; break; - case GGML_OP_POOL_MAX: break; - case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break; + case BARK_GGML_OP_POOL_AVG: *out /= ka; break; + case BARK_GGML_OP_POOL_MAX: break; + case BARK_GGML_OP_POOL_COUNT: BARK_GGML_ASSERT(false); break; } } } @@ -15181,45 +15179,45 @@ static void ggml_compute_forward_pool_2d_sk_p0( } } -// ggml_compute_forward_pool_2d +// bark_ggml_compute_forward_pool_2d -static void ggml_compute_forward_pool_2d( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_pool_2d( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { const int32_t * opts = (const int32_t *)dst->op_params; - enum ggml_op_pool op = opts[0]; + enum bark_ggml_op_pool op = opts[0]; const int k0 = opts[1]; const int k1 = opts[2]; const int s0 = opts[3]; const int s1 = opts[4]; const int p0 = opts[5]; const int p1 = opts[6]; - GGML_ASSERT(p0 == 0); - GGML_ASSERT(p1 == 0); // padding not supported - GGML_ASSERT(k0 == s0); - GGML_ASSERT(k1 == s1); // only s = k supported + BARK_GGML_ASSERT(p0 == 0); + BARK_GGML_ASSERT(p1 == 0); // padding not supported + BARK_GGML_ASSERT(k0 == s0); + BARK_GGML_ASSERT(k1 == s1); // only s = k supported - ggml_compute_forward_pool_2d_sk_p0(params, op, src0, k0, k1, dst); + bark_ggml_compute_forward_pool_2d_sk_p0(params, op, src0, k0, k1, dst); } -// ggml_compute_forward_upscale +// bark_ggml_compute_forward_upscale -static void ggml_compute_forward_upscale_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_upscale_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_ASSERT(src0->nb[0] == sizeof(float)); + BARK_GGML_ASSERT(src0->nb[0] == sizeof(float)); const int ith = params->ith; - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS const int scale_factor = dst->op_params[0]; @@ -15243,42 +15241,42 @@ static void ggml_compute_forward_upscale_f32( } } -static void ggml_compute_forward_upscale( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_upscale( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_upscale_f32(params, src0, dst); + bark_ggml_compute_forward_upscale_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_flash_attn +// bark_ggml_compute_forward_flash_attn -static void ggml_compute_forward_flash_attn_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * q, - const struct ggml_tensor * k, - const struct ggml_tensor * v, +static void bark_ggml_compute_forward_flash_attn_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * q, + const struct bark_ggml_tensor * k, + const struct bark_ggml_tensor * v, const bool masked, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); + struct bark_ggml_tensor * dst) { + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_LOCALS(int64_t, neq, q, ne) - GGML_TENSOR_LOCALS(size_t, nbq, q, nb) - GGML_TENSOR_LOCALS(int64_t, nek, k, ne) - GGML_TENSOR_LOCALS(size_t, nbk, k, nb) - GGML_TENSOR_LOCALS(int64_t, nev, v, ne) - GGML_TENSOR_LOCALS(size_t, nbv, v, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nb, dst, nb) const int ith = params->ith; const int nth = params->nth; @@ -15288,39 +15286,39 @@ static void ggml_compute_forward_flash_attn_f32( const int64_t P = nek1 - N; const int64_t M = P + N; - const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); + const int Mup = bark_ggml_up(M, BARK_GGML_SOFT_MAX_UNROLL); - GGML_ASSERT(ne0 == D); - GGML_ASSERT(ne1 == N); - GGML_ASSERT(P >= 0); + BARK_GGML_ASSERT(ne0 == D); + BARK_GGML_ASSERT(ne1 == N); + BARK_GGML_ASSERT(P >= 0); - GGML_ASSERT(nbq0 == sizeof(float)); - GGML_ASSERT(nbk0 == sizeof(float)); - GGML_ASSERT(nbv0 == sizeof(float)); + BARK_GGML_ASSERT(nbq0 == sizeof(float)); + BARK_GGML_ASSERT(nbk0 == sizeof(float)); + BARK_GGML_ASSERT(nbv0 == sizeof(float)); - GGML_ASSERT(neq0 == D); - GGML_ASSERT(nek0 == D); - GGML_ASSERT(nev1 == D); + BARK_GGML_ASSERT(neq0 == D); + BARK_GGML_ASSERT(nek0 == D); + BARK_GGML_ASSERT(nev1 == D); - GGML_ASSERT(neq1 == N); - GGML_ASSERT(nek1 == N + P); - GGML_ASSERT(nev1 == D); + BARK_GGML_ASSERT(neq1 == N); + BARK_GGML_ASSERT(nek1 == N + P); + BARK_GGML_ASSERT(nev1 == D); // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); + BARK_GGML_ASSERT(nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb0 <= nb1); + BARK_GGML_ASSERT(nb1 <= nb2); + BARK_GGML_ASSERT(nb2 <= nb3); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } - // parallelize by q rows using ggml_vec_dot_f32 + // parallelize by q rows using bark_ggml_vec_dot_f32 // total rows in q const int nr = neq1*neq2*neq3; @@ -15358,14 +15356,14 @@ static void ggml_compute_forward_flash_attn_f32( // S indices const int i1 = ik1; - ggml_vec_dot_f32(neq0, + bark_ggml_vec_dot_f32(neq0, S + i1, (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); } // scale - ggml_vec_scale_f32(masked_begin, S, scale); + bark_ggml_vec_scale_f32(masked_begin, S, scale); for (int64_t i = masked_begin; i < M; i++) { S[i] = -INFINITY; @@ -15376,45 +15374,45 @@ static void ggml_compute_forward_flash_attn_f32( // dont forget to set their SW values to zero { float max = -INFINITY; - ggml_vec_max_f32(masked_begin, &max, S); + bark_ggml_vec_max_f32(masked_begin, &max, S); - ggml_float sum = 0.0; + bark_ggml_float sum = 0.0; { -#ifdef GGML_SOFT_MAX_ACCELERATE +#ifdef BARK_GGML_SOFT_MAX_ACCELERATE max = -max; vDSP_vsadd(S, 1, &max, S, 1, Mup); vvexpf(S, S, &Mup); - ggml_vec_sum_f32(Mup, &sum, S); + bark_ggml_vec_sum_f32(Mup, &sum, S); #else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt); - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; + uint16_t scvt[BARK_GGML_SOFT_MAX_UNROLL]; UNUSED(scvt); + bark_ggml_float sump[BARK_GGML_SOFT_MAX_UNROLL] = { 0.0 }; - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { + for (int i = 0; i < Mup; i += BARK_GGML_SOFT_MAX_UNROLL) { if (i >= masked_begin) { break; } float * SS = S + i; - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { + for (int j = 0; j < BARK_GGML_SOFT_MAX_UNROLL; ++j) { if (i + j >= masked_begin) { break; } else if (SS[j] == -INFINITY) { SS[j] = 0.0f; } else { -#ifndef GGML_FLASH_ATTN_EXP_FP16 +#ifndef BARK_GGML_FLASH_ATTN_EXP_FP16 const float val = expf(SS[j] - max); #else - ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); + bark_ggml_fp16_t s = BARK_GGML_FP32_TO_FP16(SS[j] - max); memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); + const float val = BARK_GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); #endif - sump[j] += (ggml_float)val; + sump[j] += (bark_ggml_float)val; SS[j] = val; } } } - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { + for (int i = 0; i < BARK_GGML_SOFT_MAX_UNROLL; i++) { sum += sump[i]; } #endif @@ -15423,7 +15421,7 @@ static void ggml_compute_forward_flash_attn_f32( assert(sum > 0.0); sum = 1.0/sum; - ggml_vec_scale_f32(masked_begin, S, sum); + bark_ggml_vec_scale_f32(masked_begin, S, sum); #ifndef NDEBUG for (int i = 0; i < masked_begin; ++i) { @@ -15443,7 +15441,7 @@ static void ggml_compute_forward_flash_attn_f32( const int iv2 = iq2 % nev2; const int iv3 = iq3; - ggml_vec_dot_f32(masked_begin, + bark_ggml_vec_dot_f32(masked_begin, (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)), S); @@ -15451,24 +15449,24 @@ static void ggml_compute_forward_flash_attn_f32( } } -static void ggml_compute_forward_flash_attn_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * q, - const struct ggml_tensor * k, - const struct ggml_tensor * v, +static void bark_ggml_compute_forward_flash_attn_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * q, + const struct bark_ggml_tensor * k, + const struct bark_ggml_tensor * v, const bool masked, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); + struct bark_ggml_tensor * dst) { + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_LOCALS(int64_t, neq, q, ne) - GGML_TENSOR_LOCALS(size_t, nbq, q, nb) - GGML_TENSOR_LOCALS(int64_t, nek, k, ne) - GGML_TENSOR_LOCALS(size_t, nbk, k, nb) - GGML_TENSOR_LOCALS(int64_t, nev, v, ne) - GGML_TENSOR_LOCALS(size_t, nbv, v, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nb, dst, nb) const int ith = params->ith; const int nth = params->nth; @@ -15478,39 +15476,39 @@ static void ggml_compute_forward_flash_attn_f16( const int64_t P = nek1 - N; const int64_t M = P + N; - const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); + const int Mup = bark_ggml_up(M, BARK_GGML_SOFT_MAX_UNROLL); - GGML_ASSERT(ne0 == D); - GGML_ASSERT(ne1 == N); - GGML_ASSERT(P >= 0); + BARK_GGML_ASSERT(ne0 == D); + BARK_GGML_ASSERT(ne1 == N); + BARK_GGML_ASSERT(P >= 0); - GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t)); + BARK_GGML_ASSERT(nbq0 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nbk0 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nbv0 == sizeof(bark_ggml_fp16_t)); - GGML_ASSERT(neq0 == D); - GGML_ASSERT(nek0 == D); - GGML_ASSERT(nev1 == D); + BARK_GGML_ASSERT(neq0 == D); + BARK_GGML_ASSERT(nek0 == D); + BARK_GGML_ASSERT(nev1 == D); - GGML_ASSERT(neq1 == N); - GGML_ASSERT(nek1 == N + P); - GGML_ASSERT(nev1 == D); + BARK_GGML_ASSERT(neq1 == N); + BARK_GGML_ASSERT(nek1 == N + P); + BARK_GGML_ASSERT(nev1 == D); // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); + BARK_GGML_ASSERT(nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb0 <= nb1); + BARK_GGML_ASSERT(nb1 <= nb2); + BARK_GGML_ASSERT(nb2 <= nb3); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } - // parallelize by q rows using ggml_vec_dot_f32 + // parallelize by q rows using bark_ggml_vec_dot_f32 // total rows in q const int nr = neq1*neq2*neq3; @@ -15538,7 +15536,7 @@ static void ggml_compute_forward_flash_attn_f16( S[i] = -INFINITY; } - if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) { + if (BARK_GGML_VEC_DOT_UNROLL > 2 || nek1 % BARK_GGML_VEC_DOT_UNROLL != 0) { for (int64_t ic = 0; ic < nek1; ++ic) { // k indices const int ik3 = iq3; @@ -15548,13 +15546,13 @@ static void ggml_compute_forward_flash_attn_f16( // S indices const int i1 = ik1; - ggml_vec_dot_f16(neq0, + bark_ggml_vec_dot_f16(neq0, S + i1, - (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), - (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); + (bark_ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), + (bark_ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); } } else { - for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) { + for (int64_t ic = 0; ic < nek1; ic += BARK_GGML_VEC_DOT_UNROLL) { // k indices const int ik3 = iq3; const int ik2 = iq2 % nek2; @@ -15563,15 +15561,15 @@ static void ggml_compute_forward_flash_attn_f16( // S indices const int i1 = ik1; - ggml_vec_dot_f16_unroll(neq0, nbk1, + bark_ggml_vec_dot_f16_unroll(neq0, nbk1, S + i1, ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), - (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); + (bark_ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); } } // scale - ggml_vec_scale_f32(nek1, S, scale); + bark_ggml_vec_scale_f32(nek1, S, scale); if (masked) { for (int64_t i = P; i < M; i++) { @@ -15586,36 +15584,36 @@ static void ggml_compute_forward_flash_attn_f16( // dont forget to set their S values to zero { float max = -INFINITY; - ggml_vec_max_f32(M, &max, S); + bark_ggml_vec_max_f32(M, &max, S); - ggml_float sum = 0.0; + bark_ggml_float sum = 0.0; { -#ifdef GGML_SOFT_MAX_ACCELERATE +#ifdef BARK_GGML_SOFT_MAX_ACCELERATE max = -max; vDSP_vsadd(S, 1, &max, S, 1, Mup); vvexpf(S, S, &Mup); - ggml_vec_sum_f32(Mup, &sum, S); + bark_ggml_vec_sum_f32(Mup, &sum, S); #else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; + uint16_t scvt[BARK_GGML_SOFT_MAX_UNROLL]; + bark_ggml_float sump[BARK_GGML_SOFT_MAX_UNROLL] = { 0.0 }; - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { + for (int i = 0; i < Mup; i += BARK_GGML_SOFT_MAX_UNROLL) { float * SS = S + i; - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { + for (int j = 0; j < BARK_GGML_SOFT_MAX_UNROLL; ++j) { if (SS[j] == -INFINITY) { SS[j] = 0.0f; } else { - ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); + bark_ggml_fp16_t s = BARK_GGML_FP32_TO_FP16(SS[j] - max); memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); - sump[j] += (ggml_float)val; + const float val = BARK_GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); + sump[j] += (bark_ggml_float)val; SS[j] = val; } } } - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { + for (int i = 0; i < BARK_GGML_SOFT_MAX_UNROLL; i++) { sum += sump[i]; } #endif @@ -15624,7 +15622,7 @@ static void ggml_compute_forward_flash_attn_f16( assert(sum > 0.0); sum = 1.0/sum; - ggml_vec_scale_f32(M, S, sum); + bark_ggml_vec_scale_f32(M, S, sum); #ifndef NDEBUG for (int i = 0; i < M; ++i) { @@ -15634,14 +15632,14 @@ static void ggml_compute_forward_flash_attn_f16( #endif } - ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup); + bark_ggml_fp16_t * S16 = (bark_ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup); for (int64_t i = 0; i < M; i++) { - S16[i] = GGML_FP32_TO_FP16(S[i]); + S16[i] = BARK_GGML_FP32_TO_FP16(S[i]); } // todo: exclude known zero S[..] values from dot (reducing nev0 and increasing begin of v and S16). - if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) { + if (BARK_GGML_VEC_DOT_UNROLL == 1 || (nev1 % BARK_GGML_VEC_DOT_UNROLL != 0)) { for (int64_t ic = 0; ic < nev1; ++ic) { // dst indices const int i1 = iq1; @@ -15652,13 +15650,13 @@ static void ggml_compute_forward_flash_attn_f16( const int iv2 = iq2 % nev2; const int iv3 = iq3; - ggml_vec_dot_f16(nev0, + bark_ggml_vec_dot_f16(nev0, (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), - (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)), + (bark_ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)), S16); } } else { - for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) { + for (int64_t ic = 0; ic < nev1; ic += BARK_GGML_VEC_DOT_UNROLL) { // dst indices const int i1 = iq1; const int i2 = iq2; @@ -15668,7 +15666,7 @@ static void ggml_compute_forward_flash_attn_f16( const int iv2 = iq2 % nev2; const int iv3 = iq3; - ggml_vec_dot_f16_unroll(nev0, nbv1, + bark_ggml_vec_dot_f16_unroll(nev0, nbv1, (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)), S16); @@ -15677,54 +15675,54 @@ static void ggml_compute_forward_flash_attn_f16( } } -static void ggml_compute_forward_flash_attn( - const struct ggml_compute_params * params, - const struct ggml_tensor * q, - const struct ggml_tensor * k, - const struct ggml_tensor * v, +static void bark_ggml_compute_forward_flash_attn( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * q, + const struct bark_ggml_tensor * k, + const struct bark_ggml_tensor * v, const bool masked, - struct ggml_tensor * dst) { + struct bark_ggml_tensor * dst) { switch (q->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst); + bark_ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst); + bark_ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_flash_ff +// bark_ggml_compute_forward_flash_ff -static void ggml_compute_forward_flash_ff_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * a, // F16 - const struct ggml_tensor * b0, // F16 fc_w - const struct ggml_tensor * b1, // F32 fc_b - const struct ggml_tensor * c0, // F16 proj_w - const struct ggml_tensor * c1, // F32 proj_b - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); +static void bark_ggml_compute_forward_flash_ff_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * a, // F16 + const struct bark_ggml_tensor * b0, // F16 fc_w + const struct bark_ggml_tensor * b1, // F32 fc_b + const struct bark_ggml_tensor * c0, // F16 proj_w + const struct bark_ggml_tensor * c1, // F32 proj_b + struct bark_ggml_tensor * dst) { + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_LOCALS(int64_t, nea, a, ne) - GGML_TENSOR_LOCALS(size_t, nba, a, nb) - GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne) - GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb) - GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne) - GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb) - GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne) - GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb) - GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne) - GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, nea, a, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nba, a, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nb, dst, nb) const int ith = params->ith; const int nth = params->nth; @@ -15733,41 +15731,41 @@ static void ggml_compute_forward_flash_ff_f16( //const int64_t N = nea1; const int64_t M = neb01; - GGML_ASSERT(ne0 == nea0); - GGML_ASSERT(ne1 == nea1); - GGML_ASSERT(ne2 == nea2); + BARK_GGML_ASSERT(ne0 == nea0); + BARK_GGML_ASSERT(ne1 == nea1); + BARK_GGML_ASSERT(ne2 == nea2); - GGML_ASSERT(nba0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbb10 == sizeof(float)); - GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbc10 == sizeof(float)); + BARK_GGML_ASSERT(nba0 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nbb00 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nbb10 == sizeof(float)); + BARK_GGML_ASSERT(nbc00 == sizeof(bark_ggml_fp16_t)); + BARK_GGML_ASSERT(nbc10 == sizeof(float)); - GGML_ASSERT(neb00 == D); - GGML_ASSERT(neb01 == M); - GGML_ASSERT(neb10 == M); - GGML_ASSERT(neb11 == 1); + BARK_GGML_ASSERT(neb00 == D); + BARK_GGML_ASSERT(neb01 == M); + BARK_GGML_ASSERT(neb10 == M); + BARK_GGML_ASSERT(neb11 == 1); - GGML_ASSERT(nec00 == M); - GGML_ASSERT(nec01 == D); - GGML_ASSERT(nec10 == D); - GGML_ASSERT(nec11 == 1); + BARK_GGML_ASSERT(nec00 == M); + BARK_GGML_ASSERT(nec01 == D); + BARK_GGML_ASSERT(nec10 == D); + BARK_GGML_ASSERT(nec11 == 1); // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); + BARK_GGML_ASSERT(nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb0 <= nb1); + BARK_GGML_ASSERT(nb1 <= nb2); + BARK_GGML_ASSERT(nb2 <= nb3); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } - // parallelize by a rows using ggml_vec_dot_f32 + // parallelize by a rows using bark_ggml_vec_dot_f32 // total rows in a const int nr = nea1*nea2*nea3; @@ -15796,22 +15794,22 @@ static void ggml_compute_forward_flash_ff_f16( // S indices const int i1 = ib01; - ggml_vec_dot_f16(nea0, + bark_ggml_vec_dot_f16(nea0, S + i1, - (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)), - (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3))); + (bark_ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)), + (bark_ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3))); } - ggml_vec_add_f32(neb01, S, S, (float *) b1->data); - //ggml_vec_gelu_f32(neb01, S, S); + bark_ggml_vec_add_f32(neb01, S, S, (float *) b1->data); + //bark_ggml_vec_gelu_f32(neb01, S, S); - ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M); + bark_ggml_fp16_t * S16 = (bark_ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M); for (int64_t i = 0; i < M; i++) { - S16[i] = GGML_FP32_TO_FP16(S[i]); + S16[i] = BARK_GGML_FP32_TO_FP16(S[i]); } - ggml_vec_gelu_f16(neb01, S16, S16); + bark_ggml_vec_gelu_f16(neb01, S16, S16); { // dst indices @@ -15821,13 +15819,13 @@ static void ggml_compute_forward_flash_ff_f16( for (int64_t ic = 0; ic < nec01; ++ic) { - ggml_vec_dot_f16(neb01, + bark_ggml_vec_dot_f16(neb01, (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), - (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)), + (bark_ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)), S16); } - ggml_vec_add_f32(nec01, + bark_ggml_vec_add_f32(nec01, (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)), (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)), (float *) c1->data); @@ -15835,53 +15833,53 @@ static void ggml_compute_forward_flash_ff_f16( } } -static void ggml_compute_forward_flash_ff( - const struct ggml_compute_params * params, - const struct ggml_tensor * a, - const struct ggml_tensor * b0, - const struct ggml_tensor * b1, - const struct ggml_tensor * c0, - const struct ggml_tensor * c1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_flash_ff( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * a, + const struct bark_ggml_tensor * b0, + const struct bark_ggml_tensor * b1, + const struct bark_ggml_tensor * c0, + const struct bark_ggml_tensor * c1, + struct bark_ggml_tensor * dst) { switch (b0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst); + bark_ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst); } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - GGML_ASSERT(false); // TODO + BARK_GGML_ASSERT(false); // TODO } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_flash_attn_back +// bark_ggml_compute_forward_flash_attn_back -static void ggml_compute_forward_flash_attn_back_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * q, - const struct ggml_tensor * k, - const struct ggml_tensor * v, - const struct ggml_tensor * d, +static void bark_ggml_compute_forward_flash_attn_back_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * q, + const struct bark_ggml_tensor * k, + const struct bark_ggml_tensor * v, + const struct bark_ggml_tensor * d, const bool masked, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); + struct bark_ggml_tensor * dst) { + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); - GGML_TENSOR_LOCALS(int64_t, neq, q, ne) - GGML_TENSOR_LOCALS(size_t, nbq, q, nb) - GGML_TENSOR_LOCALS(int64_t, nek, k, ne) - GGML_TENSOR_LOCALS(size_t, nbk, k, nb) - GGML_TENSOR_LOCALS(int64_t, nev, v, ne) - GGML_TENSOR_LOCALS(size_t, nbv, v, nb) - GGML_TENSOR_LOCALS(int64_t, ned, d, ne) - GGML_TENSOR_LOCALS(size_t, nbd, d, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, ned, d, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nbd, d, nb) + BARK_GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + BARK_GGML_TENSOR_LOCALS(size_t, nb, dst, nb) const int ith = params->ith; const int nth = params->nth; @@ -15891,54 +15889,54 @@ static void ggml_compute_forward_flash_attn_back_f32( const int64_t P = nek1 - N; const int64_t M = P + N; - const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); + const int Mup = bark_ggml_up(M, BARK_GGML_SOFT_MAX_UNROLL); const int mxDM = MAX(D, Mup); - // GGML_ASSERT(ne0 == D); - // GGML_ASSERT(ne1 == N); - GGML_ASSERT(P >= 0); + // BARK_GGML_ASSERT(ne0 == D); + // BARK_GGML_ASSERT(ne1 == N); + BARK_GGML_ASSERT(P >= 0); - GGML_ASSERT(nbq0 == sizeof(float)); - GGML_ASSERT(nbk0 == sizeof(float)); - GGML_ASSERT(nbv0 == sizeof(float)); + BARK_GGML_ASSERT(nbq0 == sizeof(float)); + BARK_GGML_ASSERT(nbk0 == sizeof(float)); + BARK_GGML_ASSERT(nbv0 == sizeof(float)); - GGML_ASSERT(neq0 == D); - GGML_ASSERT(nek0 == D); - GGML_ASSERT(nev1 == D); - GGML_ASSERT(ned0 == D); + BARK_GGML_ASSERT(neq0 == D); + BARK_GGML_ASSERT(nek0 == D); + BARK_GGML_ASSERT(nev1 == D); + BARK_GGML_ASSERT(ned0 == D); - GGML_ASSERT(neq1 == N); - GGML_ASSERT(nek1 == N + P); - GGML_ASSERT(nev1 == D); - GGML_ASSERT(ned1 == N); + BARK_GGML_ASSERT(neq1 == N); + BARK_GGML_ASSERT(nek1 == N + P); + BARK_GGML_ASSERT(nev1 == D); + BARK_GGML_ASSERT(ned1 == N); // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); + BARK_GGML_ASSERT(nb0 == sizeof(float)); + BARK_GGML_ASSERT(nb0 <= nb1); + BARK_GGML_ASSERT(nb1 <= nb2); + BARK_GGML_ASSERT(nb2 <= nb3); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { if (ith == 0) { memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3); } return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int64_t elem_q = ggml_nelements(q); - const int64_t elem_k = ggml_nelements(k); + const int64_t elem_q = bark_ggml_nelements(q); + const int64_t elem_k = bark_ggml_nelements(k); - enum ggml_type result_type = dst->type; - GGML_ASSERT(ggml_blck_size(result_type) == 1); - const size_t tsize = ggml_type_size(result_type); + enum bark_ggml_type result_type = dst->type; + BARK_GGML_ASSERT(bark_ggml_blck_size(result_type) == 1); + const size_t tsize = bark_ggml_type_size(result_type); const size_t offs_q = 0; - const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN); - const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN); + const size_t offs_k = offs_q + BARK_GGML_PAD(elem_q * tsize, BARK_GGML_MEM_ALIGN); + const size_t offs_v = offs_k + BARK_GGML_PAD(elem_k * tsize, BARK_GGML_MEM_ALIGN); void * grad_q = (char *) dst->data; void * grad_k = (char *) dst->data + offs_k; @@ -15956,7 +15954,7 @@ static void ggml_compute_forward_flash_attn_back_f32( const size_t nbgv2 = nb0*nev0*nev1; const size_t nbgv3 = nb0*nev0*nev1*neq2; - // parallelize by k rows using ggml_vec_dot_f32 + // parallelize by k rows using bark_ggml_vec_dot_f32 // total rows in k const int nr = nek2*nek3; @@ -16010,14 +16008,14 @@ static void ggml_compute_forward_flash_attn_back_f32( // S indices const int i1 = ik1; - ggml_vec_dot_f32(neq0, + bark_ggml_vec_dot_f32(neq0, S + i1, (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); } // scale - ggml_vec_scale_f32(masked_begin, S, scale); + bark_ggml_vec_scale_f32(masked_begin, S, scale); for (int64_t i = masked_begin; i < M; i++) { S[i] = -INFINITY; @@ -16028,46 +16026,46 @@ static void ggml_compute_forward_flash_attn_back_f32( // dont forget to set their SM values to zero { float max = -INFINITY; - ggml_vec_max_f32(masked_begin, &max, S); + bark_ggml_vec_max_f32(masked_begin, &max, S); - ggml_float sum = 0.0; + bark_ggml_float sum = 0.0; { -#ifdef GGML_SOFT_MAX_ACCELERATE +#ifdef BARK_GGML_SOFT_MAX_ACCELERATE max = -max; vDSP_vsadd(SM, 1, &max, SM, 1, Mup); vvexpf(SM, SM, &Mup); - ggml_vec_sum_f32(Mup, &sum, SM); + bark_ggml_vec_sum_f32(Mup, &sum, SM); #else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt); - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; + uint16_t scvt[BARK_GGML_SOFT_MAX_UNROLL]; UNUSED(scvt); + bark_ggml_float sump[BARK_GGML_SOFT_MAX_UNROLL] = { 0.0 }; - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { + for (int i = 0; i < Mup; i += BARK_GGML_SOFT_MAX_UNROLL) { if (i >= masked_begin) { break; } float * SR = S + i; float * SW = SM + i; - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { + for (int j = 0; j < BARK_GGML_SOFT_MAX_UNROLL; ++j) { if (i + j >= masked_begin) { break; } else if (SR[j] == -INFINITY) { SW[j] = 0.0f; } else { -#ifndef GGML_FLASH_ATTN_EXP_FP16 +#ifndef BARK_GGML_FLASH_ATTN_EXP_FP16 const float val = expf(SR[j] - max); #else - ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max); + bark_ggml_fp16_t s = BARK_GGML_FP32_TO_FP16(SR[j] - max); memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); + const float val = BARK_GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); #endif - sump[j] += (ggml_float)val; + sump[j] += (bark_ggml_float)val; SW[j] = val; } } } - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { + for (int i = 0; i < BARK_GGML_SOFT_MAX_UNROLL; i++) { sum += sump[i]; } #endif @@ -16076,7 +16074,7 @@ static void ggml_compute_forward_flash_attn_back_f32( assert(sum > 0.0); sum = 1.0/sum; - ggml_vec_scale_f32(masked_begin, SM, sum); + bark_ggml_vec_scale_f32(masked_begin, SM, sum); } @@ -16148,9 +16146,9 @@ static void ggml_compute_forward_flash_attn_back_f32( // for ic: // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3] // exclude known future zero S[..] values from operation - ggml_vec_set_f32(masked_begin, S, 0); + bark_ggml_vec_set_f32(masked_begin, S, 0); for (int64_t ic = 0; ic < D; ++ic) { - ggml_vec_mad_f32(masked_begin, + bark_ggml_vec_mad_f32(masked_begin, S, (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)), *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3))); @@ -16158,15 +16156,15 @@ static void ggml_compute_forward_flash_attn_back_f32( // S = SM * (S - dot(SM, S)) float dot_SM_gradSM = 0; - ggml_vec_dot_f32 (masked_begin, &dot_SM_gradSM, SM, S); - ggml_vec_acc1_f32(M, S, -dot_SM_gradSM); - ggml_vec_mul_f32 (masked_begin, S, S, SM); + bark_ggml_vec_dot_f32 (masked_begin, &dot_SM_gradSM, SM, S); + bark_ggml_vec_acc1_f32(M, S, -dot_SM_gradSM); + bark_ggml_vec_mul_f32 (masked_begin, S, S, SM); // S = diag_mask_zero(S, P) * scale - // already done by above ggml_vec_set_f32 + // already done by above bark_ggml_vec_set_f32 // exclude known zero S[..] values from operation - ggml_vec_scale_f32(masked_begin, S, scale); + bark_ggml_vec_scale_f32(masked_begin, S, scale); // S shape [M,1] // SM shape [M,1] @@ -16180,7 +16178,7 @@ static void ggml_compute_forward_flash_attn_back_f32( // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3] // exclude known zero S[..] values from loop for (int64_t ic = 0; ic < masked_begin; ++ic) { - ggml_vec_mad_f32(D, + bark_ggml_vec_mad_f32(D, (float *) ((char *) grad_q + (iq1*nbgq1 + iq2*nbgq2 + iq3*nbgq3)), (float *) ((char *) k->data + (ic*nbk1 + ik2*nbk2 + ik3*nbk3)), S[ic]); @@ -16192,7 +16190,7 @@ static void ggml_compute_forward_flash_attn_back_f32( // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0] // exclude known zero S[..] values from loop for (int64_t ic = 0; ic < masked_begin; ++ic) { - ggml_vec_mad_f32(D, + bark_ggml_vec_mad_f32(D, (float *) ((char *) grad_k + (ic*nbgk1 + ik2*nbgk2 + ik3*nbgk3)), (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)), S[ic]); @@ -16204,7 +16202,7 @@ static void ggml_compute_forward_flash_attn_back_f32( // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M] // exclude known zero SM[..] values from mad for (int64_t ic = 0; ic < D; ++ic) { - ggml_vec_mad_f32(masked_begin, + bark_ggml_vec_mad_f32(masked_begin, (float *) ((char *) grad_v + ( ic*nbgv1 + iv2*nbgv2 + iv3*nbgv3)), SM, *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3))); @@ -16214,38 +16212,38 @@ static void ggml_compute_forward_flash_attn_back_f32( } } -static void ggml_compute_forward_flash_attn_back( - const struct ggml_compute_params * params, - const struct ggml_tensor * q, - const struct ggml_tensor * k, - const struct ggml_tensor * v, - const struct ggml_tensor * d, +static void bark_ggml_compute_forward_flash_attn_back( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * q, + const struct bark_ggml_tensor * k, + const struct bark_ggml_tensor * v, + const struct bark_ggml_tensor * d, const bool masked, - struct ggml_tensor * dst) { + struct bark_ggml_tensor * dst) { switch (q->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_flash_attn_back_f32(params, q, k, v, d, masked, dst); + bark_ggml_compute_forward_flash_attn_back_f32(params, q, k, v, d, masked, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_win_part +// bark_ggml_compute_forward_win_part -static void ggml_compute_forward_win_part_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { +static void bark_ggml_compute_forward_win_part_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + BARK_GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + BARK_GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) const int32_t nep0 = ((const int32_t *)(dst->op_params))[0]; const int32_t nep1 = ((const int32_t *)(dst->op_params))[1]; @@ -16280,34 +16278,34 @@ static void ggml_compute_forward_win_part_f32( } } -static void ggml_compute_forward_win_part( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_win_part( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_win_part_f32(params, src0, dst); + bark_ggml_compute_forward_win_part_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_win_unpart +// bark_ggml_compute_forward_win_unpart -static void ggml_compute_forward_win_unpart_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { +static void bark_ggml_compute_forward_win_unpart_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + BARK_GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + BARK_GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) const int32_t w = ((const int32_t *)(dst->op_params))[0]; @@ -16340,96 +16338,96 @@ static void ggml_compute_forward_win_unpart_f32( } } -static void ggml_compute_forward_win_unpart( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_win_unpart( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_win_unpart_f32(params, src0, dst); + bark_ggml_compute_forward_win_unpart_f32(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } //gmml_compute_forward_unary -static void ggml_compute_forward_unary( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - const enum ggml_unary_op op = ggml_get_unary_op(dst); +static void bark_ggml_compute_forward_unary( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + const enum bark_ggml_unary_op op = bark_ggml_get_unary_op(dst); switch (op) { - case GGML_UNARY_OP_ABS: + case BARK_GGML_UNARY_OP_ABS: { - ggml_compute_forward_abs(params, src0, dst); + bark_ggml_compute_forward_abs(params, src0, dst); } break; - case GGML_UNARY_OP_SGN: + case BARK_GGML_UNARY_OP_SGN: { - ggml_compute_forward_sgn(params, src0, dst); + bark_ggml_compute_forward_sgn(params, src0, dst); } break; - case GGML_UNARY_OP_NEG: + case BARK_GGML_UNARY_OP_NEG: { - ggml_compute_forward_neg(params, src0, dst); + bark_ggml_compute_forward_neg(params, src0, dst); } break; - case GGML_UNARY_OP_STEP: + case BARK_GGML_UNARY_OP_STEP: { - ggml_compute_forward_step(params, src0, dst); + bark_ggml_compute_forward_step(params, src0, dst); } break; - case GGML_UNARY_OP_TANH: + case BARK_GGML_UNARY_OP_TANH: { - ggml_compute_forward_tanh(params, src0, dst); + bark_ggml_compute_forward_tanh(params, src0, dst); } break; - case GGML_UNARY_OP_ELU: + case BARK_GGML_UNARY_OP_ELU: { - ggml_compute_forward_elu(params, src0, dst); + bark_ggml_compute_forward_elu(params, src0, dst); } break; - case GGML_UNARY_OP_RELU: + case BARK_GGML_UNARY_OP_RELU: { - ggml_compute_forward_relu(params, src0, dst); + bark_ggml_compute_forward_relu(params, src0, dst); } break; - case GGML_UNARY_OP_GELU: + case BARK_GGML_UNARY_OP_GELU: { - ggml_compute_forward_gelu(params, src0, dst); + bark_ggml_compute_forward_gelu(params, src0, dst); } break; - case GGML_UNARY_OP_GELU_QUICK: + case BARK_GGML_UNARY_OP_GELU_QUICK: { - ggml_compute_forward_gelu_quick(params, src0, dst); + bark_ggml_compute_forward_gelu_quick(params, src0, dst); } break; - case GGML_UNARY_OP_SILU: + case BARK_GGML_UNARY_OP_SILU: { - ggml_compute_forward_silu(params, src0, dst); + bark_ggml_compute_forward_silu(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_get_rel_pos +// bark_ggml_compute_forward_get_rel_pos -static void ggml_compute_forward_get_rel_pos_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { +static void bark_ggml_compute_forward_get_rel_pos_f16( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322 - GGML_TENSOR_UNARY_OP_LOCALS + BARK_GGML_TENSOR_UNARY_OP_LOCALS const int64_t w = ne1; - ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data; - ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data; + bark_ggml_fp16_t * src0_data = (bark_ggml_fp16_t *) src0->data; + bark_ggml_fp16_t * dst_data = (bark_ggml_fp16_t *) dst->data; for (int64_t i2 = 0; i2 < ne2; ++i2) { for (int64_t i1 = 0; i1 < ne1; ++i1) { @@ -16441,41 +16439,41 @@ static void ggml_compute_forward_get_rel_pos_f16( } } -static void ggml_compute_forward_get_rel_pos( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_get_rel_pos( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - ggml_compute_forward_get_rel_pos_f16(params, src0, dst); + bark_ggml_compute_forward_get_rel_pos_f16(params, src0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_add_rel_pos +// bark_ggml_compute_forward_add_rel_pos -static void ggml_compute_forward_add_rel_pos_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - const struct ggml_tensor * src2, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_add_rel_pos_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + const struct bark_ggml_tensor * src2, + struct bark_ggml_tensor * dst) { const bool inplace = (bool) ((int32_t *) dst->op_params)[0]; - if (!inplace && params->type == GGML_TASK_INIT) { - memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst)); + if (!inplace && params->type == BARK_GGML_TASK_INIT) { + memcpy((char *) dst->data, (char *) src0->data, bark_ggml_nbytes(dst)); return; } - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - int64_t t0 = ggml_perf_time_us(); + int64_t t0 = bark_ggml_perf_time_us(); UNUSED(t0); // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359 @@ -16524,38 +16522,38 @@ static void ggml_compute_forward_add_rel_pos_f32( } } -static void ggml_compute_forward_add_rel_pos( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - const struct ggml_tensor * src2, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_add_rel_pos( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + const struct bark_ggml_tensor * src2, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_add_rel_pos_f32(params, src0, src1, src2, dst); + bark_ggml_compute_forward_add_rel_pos_f32(params, src0, src1, src2, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_map_unary +// bark_ggml_compute_forward_map_unary -static void ggml_compute_forward_map_unary_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst, - const ggml_unary_op_f32_t fun) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_map_unary_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst, + const bark_ggml_unary_op_f32_t fun) { + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; assert( dst->nb[0] == sizeof(float)); @@ -16568,39 +16566,39 @@ static void ggml_compute_forward_map_unary_f32( } } -static void ggml_compute_forward_map_unary( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst, - const ggml_unary_op_f32_t fun) { +static void bark_ggml_compute_forward_map_unary( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + struct bark_ggml_tensor * dst, + const bark_ggml_unary_op_f32_t fun) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_map_unary_f32(params, src0, dst, fun); + bark_ggml_compute_forward_map_unary_f32(params, src0, dst, fun); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_map_binary +// bark_ggml_compute_forward_map_binary -static void ggml_compute_forward_map_binary_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst, - const ggml_binary_op_f32_t fun) { +static void bark_ggml_compute_forward_map_binary_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst, + const bark_ggml_binary_op_f32_t fun) { assert(params->ith == 0); - assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + assert(bark_ggml_are_same_shape(src0, src1) && bark_ggml_are_same_shape(src0, dst)); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - const int n = ggml_nrows(src0); + const int n = bark_ggml_nrows(src0); const int nc = src0->ne[0]; assert( dst->nb[0] == sizeof(float)); @@ -16615,134 +16613,134 @@ static void ggml_compute_forward_map_binary_f32( } } -static void ggml_compute_forward_map_binary( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst, - const ggml_binary_op_f32_t fun) { +static void bark_ggml_compute_forward_map_binary( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst, + const bark_ggml_binary_op_f32_t fun) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun); + bark_ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_map_custom1 +// bark_ggml_compute_forward_map_custom1 -static void ggml_compute_forward_map_custom1_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * a, - struct ggml_tensor * dst, - const ggml_custom1_op_f32_t fun) { +static void bark_ggml_compute_forward_map_custom1_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * a, + struct bark_ggml_tensor * dst, + const bark_ggml_custom1_op_f32_t fun) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } fun(dst, a); } -// ggml_compute_forward_map_custom2 +// bark_ggml_compute_forward_map_custom2 -static void ggml_compute_forward_map_custom2_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * a, - const struct ggml_tensor * b, - struct ggml_tensor * dst, - const ggml_custom2_op_f32_t fun) { +static void bark_ggml_compute_forward_map_custom2_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * a, + const struct bark_ggml_tensor * b, + struct bark_ggml_tensor * dst, + const bark_ggml_custom2_op_f32_t fun) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } fun(dst, a, b); } -// ggml_compute_forward_map_custom3 +// bark_ggml_compute_forward_map_custom3 -static void ggml_compute_forward_map_custom3_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * a, - const struct ggml_tensor * b, - const struct ggml_tensor * c, - struct ggml_tensor * dst, - const ggml_custom3_op_f32_t fun) { +static void bark_ggml_compute_forward_map_custom3_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * a, + const struct bark_ggml_tensor * b, + const struct bark_ggml_tensor * c, + struct bark_ggml_tensor * dst, + const bark_ggml_custom3_op_f32_t fun) { assert(params->ith == 0); - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } fun(dst, a, b, c); } -// ggml_compute_forward_map_custom1 +// bark_ggml_compute_forward_map_custom1 -static void ggml_compute_forward_map_custom1( - const struct ggml_compute_params * params, - const struct ggml_tensor * a, - struct ggml_tensor * dst) { - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { +static void bark_ggml_compute_forward_map_custom1( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * a, + struct bark_ggml_tensor * dst) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) dst->op_params; + struct bark_ggml_map_custom1_op_params * p = (struct bark_ggml_map_custom1_op_params *) dst->op_params; p->fun(dst, a, params->ith, params->nth, p->userdata); } -// ggml_compute_forward_map_custom2 +// bark_ggml_compute_forward_map_custom2 -static void ggml_compute_forward_map_custom2( - const struct ggml_compute_params * params, - const struct ggml_tensor * a, - const struct ggml_tensor * b, - struct ggml_tensor * dst) { - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { +static void bark_ggml_compute_forward_map_custom2( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * a, + const struct bark_ggml_tensor * b, + struct bark_ggml_tensor * dst) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) dst->op_params; + struct bark_ggml_map_custom2_op_params * p = (struct bark_ggml_map_custom2_op_params *) dst->op_params; p->fun(dst, a, b, params->ith, params->nth, p->userdata); } -// ggml_compute_forward_map_custom3 +// bark_ggml_compute_forward_map_custom3 -static void ggml_compute_forward_map_custom3( - const struct ggml_compute_params * params, - const struct ggml_tensor * a, - const struct ggml_tensor * b, - const struct ggml_tensor * c, - struct ggml_tensor * dst) { - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { +static void bark_ggml_compute_forward_map_custom3( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * a, + const struct bark_ggml_tensor * b, + const struct bark_ggml_tensor * c, + struct bark_ggml_tensor * dst) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } - struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) dst->op_params; + struct bark_ggml_map_custom3_op_params * p = (struct bark_ggml_map_custom3_op_params *) dst->op_params; p->fun(dst, a, b, c, params->ith, params->nth, p->userdata); } -// ggml_compute_forward_cross_entropy_loss +// bark_ggml_compute_forward_cross_entropy_loss -static void ggml_compute_forward_cross_entropy_loss_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(src1)); - GGML_ASSERT(ggml_is_scalar(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, src1)); +static void bark_ggml_compute_forward_cross_entropy_loss_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src1)); + BARK_GGML_ASSERT(bark_ggml_is_scalar(dst)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, src1)); const int ith = params->ith; const int nth = params->nth; @@ -16751,21 +16749,21 @@ static void ggml_compute_forward_cross_entropy_loss_f32( // TODO: handle transposed/permuted matrices const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); + const int nr = bark_ggml_nrows(src0); - GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc)); + BARK_GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc)); - if (params->type == GGML_TASK_INIT) { + if (params->type == BARK_GGML_TASK_INIT) { if (ith == 0) { memset(sums, 0, sizeof(float) * (nth + nth * nc)); } return; } - if (params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_FINALIZE) { if (ith == 0) { float * dp = (float *) dst->data; - ggml_vec_sum_f32(nth, dp, sums); + bark_ggml_vec_sum_f32(nth, dp, sums); dp[0] *= -1.0f / (float) nr; } return; @@ -16793,25 +16791,25 @@ static void ggml_compute_forward_cross_entropy_loss_f32( } #endif // soft_max - ggml_float sum = 0.0; + bark_ggml_float sum = 0.0; { float max = -INFINITY; - ggml_vec_max_f32(nc, &max, s0); + bark_ggml_vec_max_f32(nc, &max, s0); uint16_t scvt; UNUSED(scvt); for (int i = 0; i < nc; i++) { if (s0[i] == -INFINITY) { st[i] = 0.0f; } else { -#ifndef GGML_CROSS_ENTROPY_EXP_FP16 +#ifndef BARK_GGML_CROSS_ENTROPY_EXP_FP16 const float s = s0[i] - max; const float val = expf(s); #else - ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max); + bark_ggml_fp16_t s = BARK_GGML_FP32_TO_FP16(s0[i] - max); memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]); + const float val = BARK_GGML_FP16_TO_FP32(table_exp_f16[scvt]); #endif - sum += (ggml_float)val; + sum += (bark_ggml_float)val; st[i] = val; } } @@ -16821,13 +16819,13 @@ static void ggml_compute_forward_cross_entropy_loss_f32( } // avoid log(0) by rescaling from [0..1] to [eps..1] sum = (1.0 - eps) / sum; - ggml_vec_scale_f32(nc, st, sum); - ggml_vec_add1_f32(nc, st, st, eps); - ggml_vec_log_f32(nc, st, st); - ggml_vec_mul_f32(nc, st, st, s1); + bark_ggml_vec_scale_f32(nc, st, sum); + bark_ggml_vec_add1_f32(nc, st, st, eps); + bark_ggml_vec_log_f32(nc, st, st); + bark_ggml_vec_mul_f32(nc, st, st, s1); float st_sum = 0; - ggml_vec_sum_f32(nc, &st_sum, st); + bark_ggml_vec_sum_f32(nc, &st_sum, st); sums[ith] += st_sum; #ifndef NDEBUG @@ -16840,41 +16838,41 @@ static void ggml_compute_forward_cross_entropy_loss_f32( } -static void ggml_compute_forward_cross_entropy_loss( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_cross_entropy_loss( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_cross_entropy_loss_f32(params, src0, src1, dst); + bark_ggml_compute_forward_cross_entropy_loss_f32(params, src0, src1, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } -// ggml_compute_forward_cross_entropy_loss_back +// bark_ggml_compute_forward_cross_entropy_loss_back -static void ggml_compute_forward_cross_entropy_loss_back_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - const struct ggml_tensor * opt0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(src1)); - GGML_ASSERT(ggml_is_contiguous(opt0)); - GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); +static void bark_ggml_compute_forward_cross_entropy_loss_back_f32( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + const struct bark_ggml_tensor * opt0, + struct bark_ggml_tensor * dst) { + BARK_GGML_ASSERT(bark_ggml_is_contiguous(dst)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src0)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src1)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(opt0)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(src0, src1) && bark_ggml_are_same_shape(src0, dst)); const int64_t ith = params->ith; const int64_t nth = params->nth; - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + if (params->type == BARK_GGML_TASK_INIT || params->type == BARK_GGML_TASK_FINALIZE) { return; } @@ -16882,7 +16880,7 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32( // TODO: handle transposed/permuted matrices const int64_t nc = src0->ne[0]; - const int64_t nr = ggml_nrows(src0); + const int64_t nr = bark_ggml_nrows(src0); // rows per thread const int64_t dr = (nr + nth - 1)/nth; @@ -16907,25 +16905,25 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32( #endif // soft_max - ggml_float sum = 0.0; + bark_ggml_float sum = 0.0; { float max = -INFINITY; - ggml_vec_max_f32(nc, &max, s0); + bark_ggml_vec_max_f32(nc, &max, s0); uint16_t scvt; UNUSED(scvt); for (int i = 0; i < nc; i++) { if (s0[i] == -INFINITY) { ds0[i] = 0.0f; } else { -#ifndef GGML_CROSS_ENTROPY_EXP_FP16 +#ifndef BARK_GGML_CROSS_ENTROPY_EXP_FP16 const float s = s0[i] - max; const float val = expf(s); #else - ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max); + bark_ggml_fp16_t s = BARK_GGML_FP32_TO_FP16(s0[i] - max); memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]); + const float val = BARK_GGML_FP16_TO_FP32(table_exp_f16[scvt]); #endif - sum += (ggml_float)val; + sum += (bark_ggml_float)val; ds0[i] = val; } } @@ -16935,10 +16933,10 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32( } // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr - ggml_vec_scale_f32(nc, ds0, sum); - ggml_vec_add1_f32(nc, ds0, ds0, eps); - ggml_vec_sub_f32(nc, ds0, ds0, s1); - ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr); + bark_ggml_vec_scale_f32(nc, ds0, sum); + bark_ggml_vec_add1_f32(nc, ds0, ds0, eps); + bark_ggml_vec_sub_f32(nc, ds0, ds0, s1); + bark_ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr); #ifndef NDEBUG for (int i = 0; i < nc; ++i) { @@ -16949,374 +16947,374 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32( } } -static void ggml_compute_forward_cross_entropy_loss_back( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - const struct ggml_tensor * opt0, - struct ggml_tensor * dst) { +static void bark_ggml_compute_forward_cross_entropy_loss_back( + const struct bark_ggml_compute_params * params, + const struct bark_ggml_tensor * src0, + const struct bark_ggml_tensor * src1, + const struct bark_ggml_tensor * opt0, + struct bark_ggml_tensor * dst) { switch (src0->type) { - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { - ggml_compute_forward_cross_entropy_loss_back_f32(params, src0, src1, opt0, dst); + bark_ggml_compute_forward_cross_entropy_loss_back_f32(params, src0, src1, opt0, dst); } break; default: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } ///////////////////////////////// -static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { - GGML_ASSERT(params); +static void bark_ggml_compute_forward(struct bark_ggml_compute_params * params, struct bark_ggml_tensor * tensor) { + BARK_GGML_ASSERT(params); -#ifdef GGML_USE_CUBLAS - bool skip_cpu = ggml_cuda_compute_forward(params, tensor); +#ifdef BARK_GGML_USE_CUBLAS + bool skip_cpu = bark_ggml_cuda_compute_forward(params, tensor); if (skip_cpu) { return; } - GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU); - GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU); -#endif // GGML_USE_CUBLAS + BARK_GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == BARK_GGML_BACKEND_CPU); + BARK_GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == BARK_GGML_BACKEND_CPU); +#endif // BARK_GGML_USE_CUBLAS switch (tensor->op) { - case GGML_OP_DUP: + case BARK_GGML_OP_DUP: { - ggml_compute_forward_dup(params, tensor->src[0], tensor); + bark_ggml_compute_forward_dup(params, tensor->src[0], tensor); } break; - case GGML_OP_ADD: + case BARK_GGML_OP_ADD: { - ggml_compute_forward_add(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_add(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_ADD1: + case BARK_GGML_OP_ADD1: { - ggml_compute_forward_add1(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_add1(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_ACC: + case BARK_GGML_OP_ACC: { - ggml_compute_forward_acc(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_acc(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_SUB: + case BARK_GGML_OP_SUB: { - ggml_compute_forward_sub(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_sub(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_MUL: + case BARK_GGML_OP_MUL: { - ggml_compute_forward_mul(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_mul(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_DIV: + case BARK_GGML_OP_DIV: { - ggml_compute_forward_div(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_div(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_SQR: + case BARK_GGML_OP_SQR: { - ggml_compute_forward_sqr(params, tensor->src[0], tensor); + bark_ggml_compute_forward_sqr(params, tensor->src[0], tensor); } break; - case GGML_OP_SQRT: + case BARK_GGML_OP_SQRT: { - ggml_compute_forward_sqrt(params, tensor->src[0], tensor); + bark_ggml_compute_forward_sqrt(params, tensor->src[0], tensor); } break; - case GGML_OP_LOG: + case BARK_GGML_OP_LOG: { - ggml_compute_forward_log(params, tensor->src[0], tensor); + bark_ggml_compute_forward_log(params, tensor->src[0], tensor); } break; - case GGML_OP_SUM: + case BARK_GGML_OP_SUM: { - ggml_compute_forward_sum(params, tensor->src[0], tensor); + bark_ggml_compute_forward_sum(params, tensor->src[0], tensor); } break; - case GGML_OP_SUM_ROWS: + case BARK_GGML_OP_SUM_ROWS: { - ggml_compute_forward_sum_rows(params, tensor->src[0], tensor); + bark_ggml_compute_forward_sum_rows(params, tensor->src[0], tensor); } break; - case GGML_OP_MEAN: + case BARK_GGML_OP_MEAN: { - ggml_compute_forward_mean(params, tensor->src[0], tensor); + bark_ggml_compute_forward_mean(params, tensor->src[0], tensor); } break; - case GGML_OP_ARGMAX: + case BARK_GGML_OP_ARGMAX: { - ggml_compute_forward_argmax(params, tensor->src[0], tensor); + bark_ggml_compute_forward_argmax(params, tensor->src[0], tensor); } break; - case GGML_OP_REPEAT: + case BARK_GGML_OP_REPEAT: { - ggml_compute_forward_repeat(params, tensor->src[0], tensor); + bark_ggml_compute_forward_repeat(params, tensor->src[0], tensor); } break; - case GGML_OP_REPEAT_BACK: + case BARK_GGML_OP_REPEAT_BACK: { - ggml_compute_forward_repeat_back(params, tensor->src[0], tensor); + bark_ggml_compute_forward_repeat_back(params, tensor->src[0], tensor); } break; - case GGML_OP_CONCAT: + case BARK_GGML_OP_CONCAT: { - ggml_compute_forward_concat(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_concat(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_SILU_BACK: + case BARK_GGML_OP_SILU_BACK: { - ggml_compute_forward_silu_back(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_silu_back(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_NORM: + case BARK_GGML_OP_NORM: { - ggml_compute_forward_norm(params, tensor->src[0], tensor); + bark_ggml_compute_forward_norm(params, tensor->src[0], tensor); } break; - case GGML_OP_RMS_NORM: + case BARK_GGML_OP_RMS_NORM: { - ggml_compute_forward_rms_norm(params, tensor->src[0], tensor); + bark_ggml_compute_forward_rms_norm(params, tensor->src[0], tensor); } break; - case GGML_OP_RMS_NORM_BACK: + case BARK_GGML_OP_RMS_NORM_BACK: { - ggml_compute_forward_rms_norm_back(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_rms_norm_back(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_GROUP_NORM: + case BARK_GGML_OP_GROUP_NORM: { - ggml_compute_forward_group_norm(params, tensor->src[0], tensor); + bark_ggml_compute_forward_group_norm(params, tensor->src[0], tensor); } break; - case GGML_OP_MUL_MAT: + case BARK_GGML_OP_MUL_MAT: { - ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_mul_mat(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_OUT_PROD: + case BARK_GGML_OP_OUT_PROD: { - ggml_compute_forward_out_prod(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_out_prod(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_SCALE: + case BARK_GGML_OP_SCALE: { - ggml_compute_forward_scale(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_scale(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_SET: + case BARK_GGML_OP_SET: { - ggml_compute_forward_set(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_set(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_CPY: + case BARK_GGML_OP_CPY: { - ggml_compute_forward_cpy(params, tensor->src[0], tensor); + bark_ggml_compute_forward_cpy(params, tensor->src[0], tensor); } break; - case GGML_OP_CONT: + case BARK_GGML_OP_CONT: { - ggml_compute_forward_cont(params, tensor->src[0], tensor); + bark_ggml_compute_forward_cont(params, tensor->src[0], tensor); } break; - case GGML_OP_RESHAPE: + case BARK_GGML_OP_RESHAPE: { - ggml_compute_forward_reshape(params, tensor->src[0], tensor); + bark_ggml_compute_forward_reshape(params, tensor->src[0], tensor); } break; - case GGML_OP_VIEW: + case BARK_GGML_OP_VIEW: { - ggml_compute_forward_view(params, tensor->src[0]); + bark_ggml_compute_forward_view(params, tensor->src[0]); } break; - case GGML_OP_PERMUTE: + case BARK_GGML_OP_PERMUTE: { - ggml_compute_forward_permute(params, tensor->src[0]); + bark_ggml_compute_forward_permute(params, tensor->src[0]); } break; - case GGML_OP_TRANSPOSE: + case BARK_GGML_OP_TRANSPOSE: { - ggml_compute_forward_transpose(params, tensor->src[0]); + bark_ggml_compute_forward_transpose(params, tensor->src[0]); } break; - case GGML_OP_GET_ROWS: + case BARK_GGML_OP_GET_ROWS: { - ggml_compute_forward_get_rows(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_get_rows(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_GET_ROWS_BACK: + case BARK_GGML_OP_GET_ROWS_BACK: { - ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_DIAG: + case BARK_GGML_OP_DIAG: { - ggml_compute_forward_diag(params, tensor->src[0], tensor); + bark_ggml_compute_forward_diag(params, tensor->src[0], tensor); } break; - case GGML_OP_DIAG_MASK_INF: + case BARK_GGML_OP_DIAG_MASK_INF: { - ggml_compute_forward_diag_mask_inf(params, tensor->src[0], tensor); + bark_ggml_compute_forward_diag_mask_inf(params, tensor->src[0], tensor); } break; - case GGML_OP_DIAG_MASK_ZERO: + case BARK_GGML_OP_DIAG_MASK_ZERO: { - ggml_compute_forward_diag_mask_zero(params, tensor->src[0], tensor); + bark_ggml_compute_forward_diag_mask_zero(params, tensor->src[0], tensor); } break; - case GGML_OP_SOFT_MAX: + case BARK_GGML_OP_SOFT_MAX: { - ggml_compute_forward_soft_max(params, tensor->src[0], tensor); + bark_ggml_compute_forward_soft_max(params, tensor->src[0], tensor); } break; - case GGML_OP_SOFT_MAX_BACK: + case BARK_GGML_OP_SOFT_MAX_BACK: { - ggml_compute_forward_soft_max_back(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_soft_max_back(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_ROPE: + case BARK_GGML_OP_ROPE: { - ggml_compute_forward_rope(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_rope(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_ROPE_BACK: + case BARK_GGML_OP_ROPE_BACK: { - ggml_compute_forward_rope_back(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_rope_back(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_ALIBI: + case BARK_GGML_OP_ALIBI: { - ggml_compute_forward_alibi(params, tensor->src[0], tensor); + bark_ggml_compute_forward_alibi(params, tensor->src[0], tensor); } break; - case GGML_OP_CLAMP: + case BARK_GGML_OP_CLAMP: { - ggml_compute_forward_clamp(params, tensor->src[0], tensor); + bark_ggml_compute_forward_clamp(params, tensor->src[0], tensor); } break; - case GGML_OP_CONV_1D: + case BARK_GGML_OP_CONV_1D: { - ggml_compute_forward_conv_1d(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_conv_1d(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_CONV_1D_STAGE_0: + case BARK_GGML_OP_CONV_1D_STAGE_0: { - ggml_compute_forward_conv_1d_stage_0(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_conv_1d_stage_0(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_CONV_1D_STAGE_1: + case BARK_GGML_OP_CONV_1D_STAGE_1: { - ggml_compute_forward_conv_1d_stage_1(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_conv_1d_stage_1(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_CONV_TRANSPOSE_1D: + case BARK_GGML_OP_CONV_TRANSPOSE_1D: { - ggml_compute_forward_conv_transpose_1d(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_conv_transpose_1d(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_PAD_REFLEC_1D: + case BARK_GGML_OP_PAD_REFLEC_1D: { - ggml_compute_forward_pad_reflec_1d(params, tensor->src[0], tensor); + bark_ggml_compute_forward_pad_reflec_1d(params, tensor->src[0], tensor); } break; - case GGML_OP_CONV_2D: + case BARK_GGML_OP_CONV_2D: { - ggml_compute_forward_conv_2d(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_conv_2d(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_CONV_2D_STAGE_0: + case BARK_GGML_OP_CONV_2D_STAGE_0: { - ggml_compute_forward_conv_2d_stage_0(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_conv_2d_stage_0(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_CONV_2D_STAGE_1: + case BARK_GGML_OP_CONV_2D_STAGE_1: { - ggml_compute_forward_conv_2d_stage_1(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_conv_2d_stage_1(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_CONV_TRANSPOSE_2D: + case BARK_GGML_OP_CONV_TRANSPOSE_2D: { - ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_POOL_1D: + case BARK_GGML_OP_POOL_1D: { - ggml_compute_forward_pool_1d(params, tensor->src[0], tensor); + bark_ggml_compute_forward_pool_1d(params, tensor->src[0], tensor); } break; - case GGML_OP_POOL_2D: + case BARK_GGML_OP_POOL_2D: { - ggml_compute_forward_pool_2d(params, tensor->src[0], tensor); + bark_ggml_compute_forward_pool_2d(params, tensor->src[0], tensor); } break; - case GGML_OP_UPSCALE: + case BARK_GGML_OP_UPSCALE: { - ggml_compute_forward_upscale(params, tensor->src[0], tensor); + bark_ggml_compute_forward_upscale(params, tensor->src[0], tensor); } break; - case GGML_OP_FLASH_ATTN: + case BARK_GGML_OP_FLASH_ATTN: { - const int32_t t = ggml_get_op_params_i32(tensor, 0); - GGML_ASSERT(t == 0 || t == 1); + const int32_t t = bark_ggml_get_op_params_i32(tensor, 0); + BARK_GGML_ASSERT(t == 0 || t == 1); const bool masked = t != 0; - ggml_compute_forward_flash_attn(params, tensor->src[0], tensor->src[1], tensor->src[2], masked, tensor); + bark_ggml_compute_forward_flash_attn(params, tensor->src[0], tensor->src[1], tensor->src[2], masked, tensor); } break; - case GGML_OP_FLASH_FF: + case BARK_GGML_OP_FLASH_FF: { - ggml_compute_forward_flash_ff(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor->src[4], tensor); + bark_ggml_compute_forward_flash_ff(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor->src[4], tensor); } break; - case GGML_OP_FLASH_ATTN_BACK: + case BARK_GGML_OP_FLASH_ATTN_BACK: { - int32_t t = ggml_get_op_params_i32(tensor, 0); - GGML_ASSERT(t == 0 || t == 1); + int32_t t = bark_ggml_get_op_params_i32(tensor, 0); + BARK_GGML_ASSERT(t == 0 || t == 1); bool masked = t != 0; - ggml_compute_forward_flash_attn_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], masked, tensor); + bark_ggml_compute_forward_flash_attn_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], masked, tensor); } break; - case GGML_OP_WIN_PART: + case BARK_GGML_OP_WIN_PART: { - ggml_compute_forward_win_part(params, tensor->src[0], tensor); + bark_ggml_compute_forward_win_part(params, tensor->src[0], tensor); } break; - case GGML_OP_WIN_UNPART: + case BARK_GGML_OP_WIN_UNPART: { - ggml_compute_forward_win_unpart(params, tensor->src[0], tensor); + bark_ggml_compute_forward_win_unpart(params, tensor->src[0], tensor); } break; - case GGML_OP_UNARY: + case BARK_GGML_OP_UNARY: { - ggml_compute_forward_unary(params, tensor->src[0], tensor); + bark_ggml_compute_forward_unary(params, tensor->src[0], tensor); } break; - case GGML_OP_GET_REL_POS: + case BARK_GGML_OP_GET_REL_POS: { - ggml_compute_forward_get_rel_pos(params, tensor->src[0], tensor); + bark_ggml_compute_forward_get_rel_pos(params, tensor->src[0], tensor); } break; - case GGML_OP_ADD_REL_POS: + case BARK_GGML_OP_ADD_REL_POS: { - ggml_compute_forward_add_rel_pos(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor); + bark_ggml_compute_forward_add_rel_pos(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor); } break; - case GGML_OP_MAP_UNARY: + case BARK_GGML_OP_MAP_UNARY: { - ggml_unary_op_f32_t fun; + bark_ggml_unary_op_f32_t fun; memcpy(&fun, tensor->op_params, sizeof(fun)); - ggml_compute_forward_map_unary(params, tensor->src[0], tensor, fun); + bark_ggml_compute_forward_map_unary(params, tensor->src[0], tensor, fun); } break; - case GGML_OP_MAP_BINARY: + case BARK_GGML_OP_MAP_BINARY: { - ggml_binary_op_f32_t fun; + bark_ggml_binary_op_f32_t fun; memcpy(&fun, tensor->op_params, sizeof(fun)); - ggml_compute_forward_map_binary(params, tensor->src[0], tensor->src[1], tensor, fun); + bark_ggml_compute_forward_map_binary(params, tensor->src[0], tensor->src[1], tensor, fun); } break; - case GGML_OP_MAP_CUSTOM1_F32: + case BARK_GGML_OP_MAP_CUSTOM1_F32: { - ggml_custom1_op_f32_t fun; + bark_ggml_custom1_op_f32_t fun; memcpy(&fun, tensor->op_params, sizeof(fun)); - ggml_compute_forward_map_custom1_f32(params, tensor->src[0], tensor, fun); + bark_ggml_compute_forward_map_custom1_f32(params, tensor->src[0], tensor, fun); } break; - case GGML_OP_MAP_CUSTOM2_F32: + case BARK_GGML_OP_MAP_CUSTOM2_F32: { - ggml_custom2_op_f32_t fun; + bark_ggml_custom2_op_f32_t fun; memcpy(&fun, tensor->op_params, sizeof(fun)); - ggml_compute_forward_map_custom2_f32(params, tensor->src[0], tensor->src[1], tensor, fun); + bark_ggml_compute_forward_map_custom2_f32(params, tensor->src[0], tensor->src[1], tensor, fun); } break; - case GGML_OP_MAP_CUSTOM3_F32: + case BARK_GGML_OP_MAP_CUSTOM3_F32: { - ggml_custom3_op_f32_t fun; + bark_ggml_custom3_op_f32_t fun; memcpy(&fun, tensor->op_params, sizeof(fun)); - ggml_compute_forward_map_custom3_f32(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor, fun); + bark_ggml_compute_forward_map_custom3_f32(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor, fun); } break; - case GGML_OP_MAP_CUSTOM1: + case BARK_GGML_OP_MAP_CUSTOM1: { - ggml_compute_forward_map_custom1(params, tensor->src[0], tensor); + bark_ggml_compute_forward_map_custom1(params, tensor->src[0], tensor); } break; - case GGML_OP_MAP_CUSTOM2: + case BARK_GGML_OP_MAP_CUSTOM2: { - ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_MAP_CUSTOM3: + case BARK_GGML_OP_MAP_CUSTOM3: { - ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor); + bark_ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor); } break; - case GGML_OP_CROSS_ENTROPY_LOSS: + case BARK_GGML_OP_CROSS_ENTROPY_LOSS: { - ggml_compute_forward_cross_entropy_loss(params, tensor->src[0], tensor->src[1], tensor); + bark_ggml_compute_forward_cross_entropy_loss(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_CROSS_ENTROPY_LOSS_BACK: + case BARK_GGML_OP_CROSS_ENTROPY_LOSS_BACK: { - ggml_compute_forward_cross_entropy_loss_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor); + bark_ggml_compute_forward_cross_entropy_loss_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor); } break; - case GGML_OP_NONE: + case BARK_GGML_OP_NONE: { // nop } break; - case GGML_OP_COUNT: + case BARK_GGML_OP_COUNT: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } } //////////////////////////////////////////////////////////////////////////////// -static_assert(GGML_GRAPH_HASHTABLE_SIZE > GGML_MAX_NODES * 2, "GGML_GRAPH_HT_SIZE is too small"); +static_assert(BARK_GGML_GRAPH_HASHTABLE_SIZE > BARK_GGML_MAX_NODES * 2, "BARK_GGML_GRAPH_HT_SIZE is too small"); static size_t hash(void * p) { - return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE; + return (size_t)p % BARK_GGML_GRAPH_HASHTABLE_SIZE; } static size_t hash_find(void * hash_table[], void * p) { @@ -17325,10 +17323,10 @@ static size_t hash_find(void * hash_table[], void * p) { // linear probing size_t i = h; while (hash_table[i] != NULL && hash_table[i] != p) { - i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE; + i = (i + 1) % BARK_GGML_GRAPH_HASHTABLE_SIZE; if (i == h) { // visited all hash table entries -> not found - return GGML_GRAPH_HASHTABLE_SIZE; + return BARK_GGML_GRAPH_HASHTABLE_SIZE; } } return i; @@ -17337,31 +17335,31 @@ static size_t hash_find(void * hash_table[], void * p) { static bool hash_insert(void * hash_table[], void * p) { size_t i = hash_find(hash_table, p); - GGML_ASSERT(i < GGML_GRAPH_HASHTABLE_SIZE); // assert that not full + BARK_GGML_ASSERT(i < BARK_GGML_GRAPH_HASHTABLE_SIZE); // assert that not full if (hash_table[i] == p) { return true; } // insert - GGML_ASSERT(hash_table[i] == NULL); + BARK_GGML_ASSERT(hash_table[i] == NULL); hash_table[i] = p; return false; } static bool hash_contains(void * hash_table[], void * p) { size_t i = hash_find(hash_table, p); - return (i < GGML_GRAPH_HASHTABLE_SIZE) && (hash_table[i] == p); + return (i < BARK_GGML_GRAPH_HASHTABLE_SIZE) && (hash_table[i] == p); } struct hash_map { - void * keys[GGML_GRAPH_HASHTABLE_SIZE]; - void * vals[GGML_GRAPH_HASHTABLE_SIZE]; + void * keys[BARK_GGML_GRAPH_HASHTABLE_SIZE]; + void * vals[BARK_GGML_GRAPH_HASHTABLE_SIZE]; }; static struct hash_map * new_hash_map(void) { struct hash_map * result = malloc(sizeof(struct hash_map)); - for (int i=0; ikeys[i] = NULL; result->vals[i] = NULL; } @@ -17374,11 +17372,11 @@ static void free_hash_map(struct hash_map * map) { // gradient checkpointing -static struct ggml_tensor * ggml_recompute_graph_node( - struct ggml_context * ctx, - struct ggml_cgraph * graph, +static struct bark_ggml_tensor * bark_ggml_recompute_graph_node( + struct bark_ggml_context * ctx, + struct bark_ggml_cgraph * graph, struct hash_map * replacements, - struct ggml_tensor * node) { + struct bark_ggml_tensor * node) { if (node == NULL) { return NULL; @@ -17393,7 +17391,7 @@ static struct ggml_tensor * ggml_recompute_graph_node( } int count_children = 0; - for (int k = 0; k < GGML_MAX_SRC; ++k) { + for (int k = 0; k < BARK_GGML_MAX_SRC; ++k) { if (node->src[k]) { ++count_children; } @@ -17404,15 +17402,15 @@ static struct ggml_tensor * ggml_recompute_graph_node( } size_t i = hash_find(replacements->keys, node); - GGML_ASSERT(i < GGML_GRAPH_HASHTABLE_SIZE); // assert that not full + BARK_GGML_ASSERT(i < BARK_GGML_GRAPH_HASHTABLE_SIZE); // assert that not full if (replacements->keys[i] == node) { - return (struct ggml_tensor *) replacements->vals[i]; + return (struct bark_ggml_tensor *) replacements->vals[i]; } - struct ggml_tensor * clone = ggml_new_tensor(ctx, node->type, node->n_dims, node->ne); + struct bark_ggml_tensor * clone = bark_ggml_new_tensor(ctx, node->type, node->n_dims, node->ne); // insert clone into replacements - GGML_ASSERT(replacements->keys[i] == NULL); // assert that we don't overwrite + BARK_GGML_ASSERT(replacements->keys[i] == NULL); // assert that we don't overwrite replacements->keys[i] = node; replacements->vals[i] = clone; @@ -17420,11 +17418,11 @@ static struct ggml_tensor * ggml_recompute_graph_node( clone->grad = node->grad; clone->is_param = node->is_param; clone->extra = node->extra; - for (int k = 0; k < GGML_MAX_DIMS; ++k) { + for (int k = 0; k < BARK_GGML_MAX_DIMS; ++k) { clone->nb[k] = node->nb[k]; } - for (int k = 0; k < GGML_MAX_SRC; ++k) { - clone->src[k] = ggml_recompute_graph_node(ctx, graph, replacements, node->src[k]); + for (int k = 0; k < BARK_GGML_MAX_SRC; ++k) { + clone->src[k] = bark_ggml_recompute_graph_node(ctx, graph, replacements, node->src[k]); } if (node->view_src != NULL) { clone->data = (node->view_src->data == NULL) @@ -17435,23 +17433,23 @@ static struct ggml_tensor * ggml_recompute_graph_node( clone->view_offs = node->view_offs; } - GGML_ASSERT(sizeof(node->op_params) == sizeof(int32_t) * (GGML_MAX_OP_PARAMS / sizeof(int32_t))); - GGML_ASSERT(sizeof(node->name) == GGML_MAX_NAME); + BARK_GGML_ASSERT(sizeof(node->op_params) == sizeof(int32_t) * (BARK_GGML_MAX_OP_PARAMS / sizeof(int32_t))); + BARK_GGML_ASSERT(sizeof(node->name) == BARK_GGML_MAX_NAME); memcpy(clone->op_params, node->op_params, sizeof(node->op_params)); - ggml_format_name(clone, "%s (clone)", ggml_get_name(node)); + bark_ggml_format_name(clone, "%s (clone)", bark_ggml_get_name(node)); return clone; } -void ggml_build_backward_gradient_checkpointing( - struct ggml_context * ctx, - struct ggml_cgraph * gf, - struct ggml_cgraph * gb, - struct ggml_cgraph * gb_tmp, - struct ggml_tensor * * checkpoints, +void bark_ggml_build_backward_gradient_checkpointing( + struct bark_ggml_context * ctx, + struct bark_ggml_cgraph * gf, + struct bark_ggml_cgraph * gb, + struct bark_ggml_cgraph * gb_tmp, + struct bark_ggml_tensor * * checkpoints, int n_checkpoints) { *gb_tmp = *gf; - ggml_build_backward_expand(ctx, gf, gb_tmp, true); + bark_ggml_build_backward_expand(ctx, gf, gb_tmp, true); if (n_checkpoints <= 0) { *gb = *gb_tmp; @@ -17463,8 +17461,8 @@ void ggml_build_backward_gradient_checkpointing( // insert checkpoints in replacements for (int i = 0; i < n_checkpoints; ++i) { size_t k = hash_find(replacements->keys, checkpoints[i]); - GGML_ASSERT(k < GGML_GRAPH_HASHTABLE_SIZE); // assert that not full - GGML_ASSERT(replacements->keys[k] == NULL); // assert that we don't overwrite + BARK_GGML_ASSERT(k < BARK_GGML_GRAPH_HASHTABLE_SIZE); // assert that not full + BARK_GGML_ASSERT(replacements->keys[k] == NULL); // assert that we don't overwrite replacements->keys[k] = checkpoints[i]; replacements->vals[k] = checkpoints[i]; } @@ -17474,16 +17472,16 @@ void ggml_build_backward_gradient_checkpointing( // replacing references to gb_tmp->nodes[0:gf->n_nodes] ( == gf->nodes[0:gf->n_nodes]), // by recomputing them from checkpoints for (int i = gf->n_nodes; in_nodes; ++i) { - struct ggml_tensor * node = gb_tmp->nodes[i]; - for (int k = 0; k < GGML_MAX_SRC; ++k) { + struct bark_ggml_tensor * node = gb_tmp->nodes[i]; + for (int k = 0; k < BARK_GGML_MAX_SRC; ++k) { // insert new tensors recomputing src, reusing already made replacements, // remember replacements: remember new tensors with mapping from corresponding gf nodes // recurse for input tensors, // unless (i.e. terminating when) input tensors are replacments (like checkpoints) - node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]); + node->src[k] = bark_ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]); } // insert rewritten backward node with replacements made into resulting backward graph gb - ggml_build_forward_expand(gb, node); + bark_ggml_build_forward_expand(gb, node); } free_hash_map(replacements); @@ -17491,75 +17489,75 @@ void ggml_build_backward_gradient_checkpointing( // functions to change gradients considering the case that input a might be initial gradient with zero value -static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void * zero_table[]) { +static struct bark_ggml_tensor * bark_ggml_add_or_set(struct bark_ggml_context * ctx, struct bark_ggml_tensor * a, struct bark_ggml_tensor * b, void * zero_table[]) { if (hash_contains(zero_table, a)) { return b; } else { - return ggml_add_impl(ctx, a, b, false); + return bark_ggml_add_impl(ctx, a, b, false); } } -static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, void * zero_table[]) { +static struct bark_ggml_tensor * bark_ggml_acc_or_set(struct bark_ggml_context * ctx, struct bark_ggml_tensor * a, struct bark_ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, void * zero_table[]) { if (hash_contains(zero_table, a)) { - struct ggml_tensor * a_zero = ggml_scale(ctx, a, ggml_new_f32(ctx, 0)); - return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false); + struct bark_ggml_tensor * a_zero = bark_ggml_scale(ctx, a, bark_ggml_new_f32(ctx, 0)); + return bark_ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false); } else { - return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false); + return bark_ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false); } } -static struct ggml_tensor * ggml_add1_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void * zero_table[]) { +static struct bark_ggml_tensor * bark_ggml_add1_or_set(struct bark_ggml_context * ctx, struct bark_ggml_tensor * a, struct bark_ggml_tensor * b, void * zero_table[]) { if (hash_contains(zero_table, a)) { - return ggml_repeat(ctx, b, a); + return bark_ggml_repeat(ctx, b, a); } else { - return ggml_add1_impl(ctx, a, b, false); + return bark_ggml_add1_impl(ctx, a, b, false); } } -static struct ggml_tensor * ggml_sub_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void * zero_table[]) { +static struct bark_ggml_tensor * bark_ggml_sub_or_set(struct bark_ggml_context * ctx, struct bark_ggml_tensor * a, struct bark_ggml_tensor * b, void * zero_table[]) { if (hash_contains(zero_table, a)) { - return ggml_neg(ctx, b); + return bark_ggml_neg(ctx, b); } else { - return ggml_sub_impl(ctx, a, b, false); + return bark_ggml_sub_impl(ctx, a, b, false); } } -static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, void * zero_table[]) { - struct ggml_tensor * src0 = tensor->src[0]; - struct ggml_tensor * src1 = tensor->src[1]; +static void bark_ggml_compute_backward(struct bark_ggml_context * ctx, struct bark_ggml_tensor * tensor, void * zero_table[]) { + struct bark_ggml_tensor * src0 = tensor->src[0]; + struct bark_ggml_tensor * src1 = tensor->src[1]; switch (tensor->op) { - case GGML_OP_DUP: + case BARK_GGML_OP_DUP: { if (src0->grad) { - src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); } } break; - case GGML_OP_ADD: + case BARK_GGML_OP_ADD: { if (src0->grad) { - src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); } if (src1->grad) { - src1->grad = ggml_add_or_set(ctx, src1->grad, tensor->grad, zero_table); + src1->grad = bark_ggml_add_or_set(ctx, src1->grad, tensor->grad, zero_table); } } break; - case GGML_OP_ADD1: + case BARK_GGML_OP_ADD1: { if (src0->grad) { - src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); } if (src1->grad) { - src1->grad = ggml_add_or_set(ctx, + src1->grad = bark_ggml_add_or_set(ctx, src1->grad, - ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean + bark_ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean zero_table); } } break; - case GGML_OP_ACC: + case BARK_GGML_OP_ACC: { if (src0->grad) { - src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); } if (src1->grad) { const size_t nb1 = ((int32_t *) tensor->op_params)[0]; @@ -17567,7 +17565,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor const size_t nb3 = ((int32_t *) tensor->op_params)[2]; const size_t offset = ((int32_t *) tensor->op_params)[3]; - struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx, + struct bark_ggml_tensor * tensor_grad_view = bark_ggml_view_4d(ctx, tensor->grad, src1->grad->ne[0], src1->grad->ne[1], @@ -17576,178 +17574,178 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor nb1, nb2, nb3, offset); src1->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src1->grad, - ggml_reshape(ctx, - ggml_cont(ctx, tensor_grad_view), + bark_ggml_reshape(ctx, + bark_ggml_cont(ctx, tensor_grad_view), src1->grad), zero_table); } } break; - case GGML_OP_SUB: + case BARK_GGML_OP_SUB: { if (src0->grad) { - src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); } if (src1->grad) { - src1->grad = ggml_sub_or_set(ctx, src1->grad, tensor->grad, zero_table); + src1->grad = bark_ggml_sub_or_set(ctx, src1->grad, tensor->grad, zero_table); } } break; - case GGML_OP_MUL: + case BARK_GGML_OP_MUL: { if (src0->grad) { src0->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src0->grad, - ggml_mul(ctx, src1, tensor->grad), + bark_ggml_mul(ctx, src1, tensor->grad), zero_table); } if (src1->grad) { src1->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src1->grad, - ggml_mul(ctx, src0, tensor->grad), + bark_ggml_mul(ctx, src0, tensor->grad), zero_table); } } break; - case GGML_OP_DIV: + case BARK_GGML_OP_DIV: { if (src0->grad) { src0->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src0->grad, - ggml_div(ctx, tensor->grad, src1), + bark_ggml_div(ctx, tensor->grad, src1), zero_table); } if (src1->grad) { src1->grad = - ggml_sub_or_set(ctx, + bark_ggml_sub_or_set(ctx, src1->grad, - ggml_mul(ctx, + bark_ggml_mul(ctx, tensor->grad, - ggml_div(ctx, tensor, src1)), + bark_ggml_div(ctx, tensor, src1)), zero_table); } } break; - case GGML_OP_SQR: + case BARK_GGML_OP_SQR: { if (src0->grad) { src0->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src0->grad, - ggml_scale(ctx, - ggml_mul(ctx, src0, tensor->grad), - ggml_new_f32(ctx, 2.0f)), + bark_ggml_scale(ctx, + bark_ggml_mul(ctx, src0, tensor->grad), + bark_ggml_new_f32(ctx, 2.0f)), zero_table); } } break; - case GGML_OP_SQRT: + case BARK_GGML_OP_SQRT: { if (src0->grad) { src0->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src0->grad, - ggml_scale(ctx, - ggml_div(ctx, + bark_ggml_scale(ctx, + bark_ggml_div(ctx, tensor->grad, tensor), - ggml_new_f32(ctx, 0.5f)), + bark_ggml_new_f32(ctx, 0.5f)), zero_table); } } break; - case GGML_OP_LOG: + case BARK_GGML_OP_LOG: { if (src0->grad) { src0->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src0->grad, - ggml_div(ctx, + bark_ggml_div(ctx, tensor->grad, src0), zero_table); } } break; - case GGML_OP_SUM: + case BARK_GGML_OP_SUM: { if (src0->grad) { src0->grad = - ggml_add1_or_set(ctx, + bark_ggml_add1_or_set(ctx, src0->grad, tensor->grad, zero_table); } } break; - case GGML_OP_SUM_ROWS: + case BARK_GGML_OP_SUM_ROWS: { if (src0->grad) { src0->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src0->grad, - ggml_repeat(ctx, + bark_ggml_repeat(ctx, tensor->grad, src0->grad), zero_table); } } break; - case GGML_OP_MEAN: - case GGML_OP_ARGMAX: + case BARK_GGML_OP_MEAN: + case BARK_GGML_OP_ARGMAX: { - GGML_ASSERT(false); // TODO: implement + BARK_GGML_ASSERT(false); // TODO: implement } break; - case GGML_OP_REPEAT: + case BARK_GGML_OP_REPEAT: { // necessary for llama if (src0->grad) { - src0->grad = ggml_add_or_set(ctx, + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, - ggml_repeat_back(ctx, tensor->grad, src0->grad), + bark_ggml_repeat_back(ctx, tensor->grad, src0->grad), zero_table); } } break; - case GGML_OP_REPEAT_BACK: + case BARK_GGML_OP_REPEAT_BACK: { if (src0->grad) { // TODO: test this - src0->grad = ggml_add_or_set(ctx, + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, - ggml_repeat(ctx, tensor->grad, src0->grad), + bark_ggml_repeat(ctx, tensor->grad, src0->grad), zero_table); } } break; - case GGML_OP_CONCAT: + case BARK_GGML_OP_CONCAT: { - GGML_ASSERT(false); // TODO: implement + BARK_GGML_ASSERT(false); // TODO: implement } break; - case GGML_OP_SILU_BACK: + case BARK_GGML_OP_SILU_BACK: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_NORM: + case BARK_GGML_OP_NORM: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_RMS_NORM: + case BARK_GGML_OP_RMS_NORM: { // necessary for llama if (src0->grad) { float eps; memcpy(&eps, tensor->op_params, sizeof(float)); - src0->grad = ggml_add_or_set(ctx, + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, - ggml_rms_norm_back(ctx, src0, tensor->grad, eps), + bark_ggml_rms_norm_back(ctx, src0, tensor->grad, eps), zero_table); } } break; - case GGML_OP_RMS_NORM_BACK: + case BARK_GGML_OP_RMS_NORM_BACK: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_GROUP_NORM: + case BARK_GGML_OP_GROUP_NORM: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_MUL_MAT: + case BARK_GGML_OP_MUL_MAT: { // https://cs231n.github.io/optimization-2/#staged // # forward pass @@ -17766,8 +17764,8 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor // necessary for llama if (src0->grad) { - struct ggml_tensor * s1_tg = - ggml_out_prod(ctx, // [n,m,qq,rr] + struct bark_ggml_tensor * s1_tg = + bark_ggml_out_prod(ctx, // [n,m,qq,rr] src1, // [n,p,qq,rr] tensor->grad); // [m,p,qq,rr] const int64_t qq = s1_tg->ne[2]; @@ -17778,70 +17776,70 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor const bool ne3_broadcasted = rr > r1; if (ne2_broadcasted || ne3_broadcasted) { // sum broadcast repetitions of s1_tg into shape of src0 - s1_tg = ggml_repeat_back(ctx, s1_tg, src0); + s1_tg = bark_ggml_repeat_back(ctx, s1_tg, src0); } src0->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src0->grad, // [n,m,q1,r1] s1_tg, // [n,m,q1,r1] zero_table); } if (src1->grad) { src1->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src1->grad, // [n,p,qq,rr] - // ggml_mul_mat(ctx, // [n,p,qq,rr] - // ggml_cont(ctx, // [m,n,q1,r1] - // ggml_transpose(ctx, src0)), // [m,n,q1,r1] + // bark_ggml_mul_mat(ctx, // [n,p,qq,rr] + // bark_ggml_cont(ctx, // [m,n,q1,r1] + // bark_ggml_transpose(ctx, src0)), // [m,n,q1,r1] // tensor->grad), // [m,p,qq,rr] // // when src0 is bigger than tensor->grad (this is mostly the case in llama), // // avoid transpose of src0, rather transpose smaller tensor->grad - // // and then use ggml_out_prod - ggml_out_prod(ctx, // [n,p,qq,rr] + // // and then use bark_ggml_out_prod + bark_ggml_out_prod(ctx, // [n,p,qq,rr] src0, // [n,m,q1,r1] - ggml_transpose(ctx, // [p,m,qq,rr] + bark_ggml_transpose(ctx, // [p,m,qq,rr] tensor->grad)), // [m,p,qq,rr] zero_table); } } break; - case GGML_OP_OUT_PROD: + case BARK_GGML_OP_OUT_PROD: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_SCALE: + case BARK_GGML_OP_SCALE: { // necessary for llama if (src0->grad) { src0->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src0->grad, - ggml_scale_impl(ctx, tensor->grad, src1, false), + bark_ggml_scale_impl(ctx, tensor->grad, src1, false), zero_table); } if (src1->grad) { src1->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src1->grad, - ggml_sum(ctx, ggml_mul_impl(ctx, tensor->grad, src0, false)), + bark_ggml_sum(ctx, bark_ggml_mul_impl(ctx, tensor->grad, src0, false)), zero_table); } } break; - case GGML_OP_SET: + case BARK_GGML_OP_SET: { const size_t nb1 = ((int32_t *) tensor->op_params)[0]; const size_t nb2 = ((int32_t *) tensor->op_params)[1]; const size_t nb3 = ((int32_t *) tensor->op_params)[2]; const size_t offset = ((int32_t *) tensor->op_params)[3]; - struct ggml_tensor * tensor_grad_view = NULL; + struct bark_ggml_tensor * tensor_grad_view = NULL; if (src0->grad || src1->grad) { - GGML_ASSERT(src0->type == tensor->type); - GGML_ASSERT(tensor->grad->type == tensor->type); - GGML_ASSERT(tensor->grad->type == src1->grad->type); + BARK_GGML_ASSERT(src0->type == tensor->type); + BARK_GGML_ASSERT(tensor->grad->type == tensor->type); + BARK_GGML_ASSERT(tensor->grad->type == src1->grad->type); - tensor_grad_view = ggml_view_4d(ctx, + tensor_grad_view = bark_ggml_view_4d(ctx, tensor->grad, src1->grad->ne[0], src1->grad->ne[1], @@ -17851,26 +17849,26 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor } if (src0->grad) { - src0->grad = ggml_add_or_set(ctx, + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, - ggml_acc_impl(ctx, + bark_ggml_acc_impl(ctx, tensor->grad, - ggml_neg(ctx, tensor_grad_view), + bark_ggml_neg(ctx, tensor_grad_view), nb1, nb2, nb3, offset, false), zero_table); } if (src1->grad) { src1->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src1->grad, - ggml_reshape(ctx, - ggml_cont(ctx, tensor_grad_view), + bark_ggml_reshape(ctx, + bark_ggml_cont(ctx, tensor_grad_view), src1->grad), zero_table); } } break; - case GGML_OP_CPY: + case BARK_GGML_OP_CPY: { // necessary for llama // cpy overwrites value of src1 by src0 and returns view(src1) @@ -17878,36 +17876,36 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor // tensor = src0 * 1 + src1 * 0 if (src0->grad) { // dsrc0 = dtensor * 1 - src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); } if (src1->grad) { // dsrc1 = dtensor * 0 -> noop } } break; - case GGML_OP_CONT: + case BARK_GGML_OP_CONT: { // same as cpy if (src0->grad) { - GGML_ASSERT(ggml_is_contiguous(src0->grad)); - GGML_ASSERT(ggml_is_contiguous(tensor->grad)); - src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(src0->grad)); + BARK_GGML_ASSERT(bark_ggml_is_contiguous(tensor->grad)); + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table); } } break; - case GGML_OP_RESHAPE: + case BARK_GGML_OP_RESHAPE: { // necessary for llama if (src0->grad) { src0->grad = - ggml_add_or_set(ctx, src0->grad, - ggml_reshape(ctx, - ggml_is_contiguous(tensor->grad) + bark_ggml_add_or_set(ctx, src0->grad, + bark_ggml_reshape(ctx, + bark_ggml_is_contiguous(tensor->grad) ? tensor->grad - : ggml_cont(ctx, tensor->grad), + : bark_ggml_cont(ctx, tensor->grad), src0->grad), zero_table); } } break; - case GGML_OP_VIEW: + case BARK_GGML_OP_VIEW: { // necessary for llama if (src0->grad) { @@ -17921,22 +17919,22 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor if (src0->type != src0->grad->type) { // gradient is typically F32, but src0 could be other type - size_t ng = ggml_element_size(src0->grad); - size_t n0 = ggml_element_size(src0); - GGML_ASSERT(offset % n0 == 0); - GGML_ASSERT(nb1 % n0 == 0); - GGML_ASSERT(nb2 % n0 == 0); - GGML_ASSERT(nb3 % n0 == 0); + size_t ng = bark_ggml_element_size(src0->grad); + size_t n0 = bark_ggml_element_size(src0); + BARK_GGML_ASSERT(offset % n0 == 0); + BARK_GGML_ASSERT(nb1 % n0 == 0); + BARK_GGML_ASSERT(nb2 % n0 == 0); + BARK_GGML_ASSERT(nb3 % n0 == 0); offset = (offset / n0) * ng; nb1 = (nb1 / n0) * ng; nb2 = (nb2 / n0) * ng; nb3 = (nb3 / n0) * ng; } - src0->grad = ggml_acc_or_set(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, zero_table); + src0->grad = bark_ggml_acc_or_set(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, zero_table); } } break; - case GGML_OP_PERMUTE: + case BARK_GGML_OP_PERMUTE: { // necessary for llama if (src0->grad) { @@ -17951,8 +17949,8 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor axes_backward[axis2] = 2; axes_backward[axis3] = 3; src0->grad = - ggml_add_or_set(ctx, src0->grad, - ggml_permute(ctx, + bark_ggml_add_or_set(ctx, src0->grad, + bark_ggml_permute(ctx, tensor->grad, axes_backward[0], axes_backward[1], @@ -17961,77 +17959,77 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor zero_table); } } break; - case GGML_OP_TRANSPOSE: + case BARK_GGML_OP_TRANSPOSE: { // necessary for llama if (src0->grad) { src0->grad = - ggml_add_or_set(ctx, src0->grad, - ggml_transpose(ctx, tensor->grad), + bark_ggml_add_or_set(ctx, src0->grad, + bark_ggml_transpose(ctx, tensor->grad), zero_table); } } break; - case GGML_OP_GET_ROWS: + case BARK_GGML_OP_GET_ROWS: { // necessary for llama (only for tokenizer) if (src0->grad) { src0->grad = - ggml_add_or_set(ctx, src0->grad, - // last ggml_get_rows_back argument src0->grad is only + bark_ggml_add_or_set(ctx, src0->grad, + // last bark_ggml_get_rows_back argument src0->grad is only // necessary to setup correct output shape - ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad), + bark_ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad), zero_table); } if (src1->grad) { // noop } } break; - case GGML_OP_GET_ROWS_BACK: + case BARK_GGML_OP_GET_ROWS_BACK: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_DIAG: + case BARK_GGML_OP_DIAG: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_DIAG_MASK_INF: + case BARK_GGML_OP_DIAG_MASK_INF: { // necessary for llama if (src0->grad) { const int n_past = ((int32_t *) tensor->op_params)[0]; src0->grad = - ggml_add_or_set(ctx, src0->grad, - ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false), + bark_ggml_add_or_set(ctx, src0->grad, + bark_ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false), zero_table); } } break; - case GGML_OP_DIAG_MASK_ZERO: + case BARK_GGML_OP_DIAG_MASK_ZERO: { // necessary for llama if (src0->grad) { const int n_past = ((int32_t *) tensor->op_params)[0]; src0->grad = - ggml_add_or_set(ctx, src0->grad, - ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false), + bark_ggml_add_or_set(ctx, src0->grad, + bark_ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false), zero_table); } } break; - case GGML_OP_SOFT_MAX: + case BARK_GGML_OP_SOFT_MAX: { // necessary for llama if (src0->grad) { src0->grad = - ggml_add_or_set(ctx, src0->grad, - ggml_soft_max_back(ctx, tensor->grad, tensor), + bark_ggml_add_or_set(ctx, src0->grad, + bark_ggml_soft_max_back(ctx, tensor->grad, tensor), zero_table); } } break; - case GGML_OP_SOFT_MAX_BACK: + case BARK_GGML_OP_SOFT_MAX_BACK: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_ROPE: + case BARK_GGML_OP_ROPE: { // necessary for llama if (src0->grad) { @@ -18048,9 +18046,9 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor memcpy(&xpos_base, (int32_t *) tensor->op_params + 6, sizeof(float)); memcpy(&xpos_down, (int32_t *) tensor->op_params + 7, sizeof(bool)); - src0->grad = ggml_add_or_set(ctx, + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, - ggml_rope_back(ctx, + bark_ggml_rope_back(ctx, tensor->grad, src1, n_dims, @@ -18063,7 +18061,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor zero_table); } } break; - case GGML_OP_ROPE_BACK: + case BARK_GGML_OP_ROPE_BACK: { if (src0->grad) { //const int n_past = ((int32_t *) tensor->op_params)[0]; @@ -18079,9 +18077,9 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor memcpy(&xpos_base, (int32_t *) tensor->op_params + 6, sizeof(float)); memcpy(&xpos_down, (int32_t *) tensor->op_params + 7, sizeof(bool)); - src0->grad = ggml_add_or_set(ctx, + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, - ggml_rope_impl(ctx, + bark_ggml_rope_impl(ctx, tensor->grad, src1, n_dims, @@ -18095,71 +18093,71 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor zero_table); } } break; - case GGML_OP_ALIBI: + case BARK_GGML_OP_ALIBI: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CLAMP: + case BARK_GGML_OP_CLAMP: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_1D: + case BARK_GGML_OP_CONV_1D: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_1D_STAGE_0: + case BARK_GGML_OP_CONV_1D_STAGE_0: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_1D_STAGE_1: + case BARK_GGML_OP_CONV_1D_STAGE_1: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_PAD_REFLEC_1D: + case BARK_GGML_OP_PAD_REFLEC_1D: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_TRANSPOSE_1D: + case BARK_GGML_OP_CONV_TRANSPOSE_1D: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_2D: + case BARK_GGML_OP_CONV_2D: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_2D_STAGE_0: + case BARK_GGML_OP_CONV_2D_STAGE_0: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_2D_STAGE_1: + case BARK_GGML_OP_CONV_2D_STAGE_1: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_TRANSPOSE_2D: + case BARK_GGML_OP_CONV_TRANSPOSE_2D: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_POOL_1D: + case BARK_GGML_OP_POOL_1D: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_POOL_2D: + case BARK_GGML_OP_POOL_2D: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_UPSCALE: + case BARK_GGML_OP_UPSCALE: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_FLASH_ATTN: + case BARK_GGML_OP_FLASH_ATTN: { - struct ggml_tensor * flash_grad = NULL; + struct bark_ggml_tensor * flash_grad = NULL; if (src0->grad || src1->grad || tensor->src[2]->grad) { - int32_t t = ggml_get_op_params_i32(tensor, 0); - GGML_ASSERT(t == 0 || t == 1); + int32_t t = bark_ggml_get_op_params_i32(tensor, 0); + BARK_GGML_ASSERT(t == 0 || t == 1); bool masked = t != 0; flash_grad = - ggml_flash_attn_back(ctx, + bark_ggml_flash_attn_back(ctx, src0, src1, tensor->src[2], @@ -18167,180 +18165,180 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor masked); } - struct ggml_tensor * src2 = tensor->src[2]; - const int64_t elem_q = ggml_nelements(src0); - const int64_t elem_k = ggml_nelements(src1); - const int64_t elem_v = ggml_nelements(src2); + struct bark_ggml_tensor * src2 = tensor->src[2]; + const int64_t elem_q = bark_ggml_nelements(src0); + const int64_t elem_k = bark_ggml_nelements(src1); + const int64_t elem_v = bark_ggml_nelements(src2); - enum ggml_type result_type = flash_grad->type; - GGML_ASSERT(ggml_blck_size(result_type) == 1); - const size_t tsize = ggml_type_size(result_type); + enum bark_ggml_type result_type = flash_grad->type; + BARK_GGML_ASSERT(bark_ggml_blck_size(result_type) == 1); + const size_t tsize = bark_ggml_type_size(result_type); const size_t offs_q = 0; - const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN); - const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN); + const size_t offs_k = offs_q + BARK_GGML_PAD(elem_q * tsize, BARK_GGML_MEM_ALIGN); + const size_t offs_v = offs_k + BARK_GGML_PAD(elem_k * tsize, BARK_GGML_MEM_ALIGN); if (src0->grad) { - struct ggml_tensor * view_q = ggml_view_1d(ctx, flash_grad, elem_q, offs_q); - struct ggml_tensor * grad_q = ggml_reshape(ctx, view_q, src0); - src0->grad = ggml_add_or_set(ctx, + struct bark_ggml_tensor * view_q = bark_ggml_view_1d(ctx, flash_grad, elem_q, offs_q); + struct bark_ggml_tensor * grad_q = bark_ggml_reshape(ctx, view_q, src0); + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, grad_q, zero_table); } if (src1->grad) { - struct ggml_tensor * view_k = ggml_view_1d(ctx, flash_grad, elem_k, offs_k); - struct ggml_tensor * grad_k = ggml_reshape(ctx, view_k, src1); - src1->grad = ggml_add_or_set(ctx, + struct bark_ggml_tensor * view_k = bark_ggml_view_1d(ctx, flash_grad, elem_k, offs_k); + struct bark_ggml_tensor * grad_k = bark_ggml_reshape(ctx, view_k, src1); + src1->grad = bark_ggml_add_or_set(ctx, src1->grad, grad_k, zero_table); } if (src2->grad) { - struct ggml_tensor * view_v = ggml_view_1d(ctx, flash_grad, elem_v, offs_v); - struct ggml_tensor * grad_v = ggml_reshape(ctx, view_v, src2); - src2->grad = ggml_add_or_set(ctx, + struct bark_ggml_tensor * view_v = bark_ggml_view_1d(ctx, flash_grad, elem_v, offs_v); + struct bark_ggml_tensor * grad_v = bark_ggml_reshape(ctx, view_v, src2); + src2->grad = bark_ggml_add_or_set(ctx, src2->grad, grad_v, zero_table); } } break; - case GGML_OP_FLASH_FF: + case BARK_GGML_OP_FLASH_FF: { - GGML_ASSERT(false); // not supported + BARK_GGML_ASSERT(false); // not supported } break; - case GGML_OP_FLASH_ATTN_BACK: + case BARK_GGML_OP_FLASH_ATTN_BACK: { - GGML_ASSERT(false); // not supported + BARK_GGML_ASSERT(false); // not supported } break; - case GGML_OP_WIN_PART: - case GGML_OP_WIN_UNPART: - case GGML_OP_UNARY: + case BARK_GGML_OP_WIN_PART: + case BARK_GGML_OP_WIN_UNPART: + case BARK_GGML_OP_UNARY: { - switch (ggml_get_unary_op(tensor)) { - case GGML_UNARY_OP_ABS: + switch (bark_ggml_get_unary_op(tensor)) { + case BARK_GGML_UNARY_OP_ABS: { if (src0->grad) { src0->grad = - ggml_add_or_set(ctx, + bark_ggml_add_or_set(ctx, src0->grad, - ggml_mul(ctx, - ggml_sgn(ctx, src0), + bark_ggml_mul(ctx, + bark_ggml_sgn(ctx, src0), tensor->grad), zero_table); } } break; - case GGML_UNARY_OP_SGN: + case BARK_GGML_UNARY_OP_SGN: { if (src0->grad) { // noop } } break; - case GGML_UNARY_OP_NEG: + case BARK_GGML_UNARY_OP_NEG: { if (src0->grad) { - src0->grad = ggml_sub_or_set(ctx, src0->grad, tensor->grad, zero_table); + src0->grad = bark_ggml_sub_or_set(ctx, src0->grad, tensor->grad, zero_table); } } break; - case GGML_UNARY_OP_STEP: + case BARK_GGML_UNARY_OP_STEP: { if (src0->grad) { // noop } } break; - case GGML_UNARY_OP_TANH: + case BARK_GGML_UNARY_OP_TANH: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_UNARY_OP_ELU: + case BARK_GGML_UNARY_OP_ELU: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_UNARY_OP_RELU: + case BARK_GGML_UNARY_OP_RELU: { if (src0->grad) { - src0->grad = ggml_add_or_set(ctx, + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, - ggml_mul(ctx, - ggml_step(ctx, src0), + bark_ggml_mul(ctx, + bark_ggml_step(ctx, src0), tensor->grad), zero_table); } } break; - case GGML_UNARY_OP_GELU: + case BARK_GGML_UNARY_OP_GELU: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_UNARY_OP_GELU_QUICK: + case BARK_GGML_UNARY_OP_GELU_QUICK: { - GGML_ASSERT(false); // TODO: not implemented + BARK_GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_UNARY_OP_SILU: + case BARK_GGML_UNARY_OP_SILU: { // necessary for llama if (src0->grad) { - src0->grad = ggml_add_or_set(ctx, + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, - ggml_silu_back(ctx, src0, tensor->grad), + bark_ggml_silu_back(ctx, src0, tensor->grad), zero_table); } } break; default: - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } } break; - case GGML_OP_GET_REL_POS: - case GGML_OP_ADD_REL_POS: - case GGML_OP_MAP_UNARY: - case GGML_OP_MAP_BINARY: - case GGML_OP_MAP_CUSTOM1_F32: - case GGML_OP_MAP_CUSTOM2_F32: - case GGML_OP_MAP_CUSTOM3_F32: - case GGML_OP_MAP_CUSTOM1: - case GGML_OP_MAP_CUSTOM2: - case GGML_OP_MAP_CUSTOM3: + case BARK_GGML_OP_GET_REL_POS: + case BARK_GGML_OP_ADD_REL_POS: + case BARK_GGML_OP_MAP_UNARY: + case BARK_GGML_OP_MAP_BINARY: + case BARK_GGML_OP_MAP_CUSTOM1_F32: + case BARK_GGML_OP_MAP_CUSTOM2_F32: + case BARK_GGML_OP_MAP_CUSTOM3_F32: + case BARK_GGML_OP_MAP_CUSTOM1: + case BARK_GGML_OP_MAP_CUSTOM2: + case BARK_GGML_OP_MAP_CUSTOM3: { - GGML_ASSERT(false); // not supported + BARK_GGML_ASSERT(false); // not supported } break; - case GGML_OP_CROSS_ENTROPY_LOSS: + case BARK_GGML_OP_CROSS_ENTROPY_LOSS: { if (src0->grad) { - src0->grad = ggml_add_or_set(ctx, + src0->grad = bark_ggml_add_or_set(ctx, src0->grad, - ggml_cross_entropy_loss_back(ctx, + bark_ggml_cross_entropy_loss_back(ctx, src0, src1, tensor->grad), zero_table); } } break; - case GGML_OP_CROSS_ENTROPY_LOSS_BACK: + case BARK_GGML_OP_CROSS_ENTROPY_LOSS_BACK: { - GGML_ASSERT(false); // not supported + BARK_GGML_ASSERT(false); // not supported } break; - case GGML_OP_NONE: + case BARK_GGML_OP_NONE: { // nop } break; - case GGML_OP_COUNT: + case BARK_GGML_OP_COUNT: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } - for (int i = 0; i < GGML_MAX_SRC; ++i) { + for (int i = 0; i < BARK_GGML_MAX_SRC; ++i) { if (tensor->src[i] && tensor->src[i]->grad) { - GGML_ASSERT(ggml_are_same_shape(tensor->src[i], tensor->src[i]->grad)); + BARK_GGML_ASSERT(bark_ggml_are_same_shape(tensor->src[i], tensor->src[i]->grad)); } } } -static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) { +static void bark_ggml_visit_parents(struct bark_ggml_cgraph * cgraph, struct bark_ggml_tensor * node) { if (node->grad == NULL) { // this usually happens when we generate intermediate nodes from constants in the backward pass // it can also happen during forward pass, if the user performs computations with constants - if (node->op != GGML_OP_NONE) { - //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op); + if (node->op != BARK_GGML_OP_NONE) { + //BARK_GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op); } } @@ -18349,31 +18347,31 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * return; } - for (int i = 0; i < GGML_MAX_SRC; ++i) { + for (int i = 0; i < BARK_GGML_MAX_SRC; ++i) { const int k = - (cgraph->order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i : - (cgraph->order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (GGML_MAX_SRC-1-i) : + (cgraph->order == BARK_GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i : + (cgraph->order == BARK_GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (BARK_GGML_MAX_SRC-1-i) : /* unknown order, just fall back to using i*/ i; if (node->src[k]) { - ggml_visit_parents(cgraph, node->src[k]); + bark_ggml_visit_parents(cgraph, node->src[k]); } } - if (node->op == GGML_OP_NONE && node->grad == NULL) { + if (node->op == BARK_GGML_OP_NONE && node->grad == NULL) { // reached a leaf node, not part of the gradient graph (e.g. a constant) - GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES); + BARK_GGML_ASSERT(cgraph->n_leafs < BARK_GGML_MAX_NODES); if (strlen(node->name) == 0) { - ggml_format_name(node, "leaf_%d", cgraph->n_leafs); + bark_ggml_format_name(node, "leaf_%d", cgraph->n_leafs); } cgraph->leafs[cgraph->n_leafs] = node; cgraph->n_leafs++; } else { - GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES); + BARK_GGML_ASSERT(cgraph->n_nodes < BARK_GGML_MAX_NODES); if (strlen(node->name) == 0) { - ggml_format_name(node, "node_%d", cgraph->n_nodes); + bark_ggml_format_name(node, "node_%d", cgraph->n_nodes); } cgraph->nodes[cgraph->n_nodes] = node; @@ -18382,7 +18380,7 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * } } -static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) { +static void bark_ggml_build_forward_impl(struct bark_ggml_cgraph * cgraph, struct bark_ggml_tensor * tensor, bool expand) { if (!expand) { cgraph->n_nodes = 0; cgraph->n_leafs = 0; @@ -18391,58 +18389,58 @@ static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_ten const int n0 = cgraph->n_nodes; UNUSED(n0); - ggml_visit_parents(cgraph, tensor); + bark_ggml_visit_parents(cgraph, tensor); const int n_new = cgraph->n_nodes - n0; - GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new); + BARK_GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new); if (n_new > 0) { // the last added node should always be starting point - GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor); + BARK_GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor); } } -void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) { - ggml_build_forward_impl(cgraph, tensor, true); +void bark_ggml_build_forward_expand(struct bark_ggml_cgraph * cgraph, struct bark_ggml_tensor * tensor) { + bark_ggml_build_forward_impl(cgraph, tensor, true); } -struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) { - struct ggml_cgraph result = { +struct bark_ggml_cgraph bark_ggml_build_forward(struct bark_ggml_tensor * tensor) { + struct bark_ggml_cgraph result = { /*.n_nodes =*/ 0, /*.n_leafs =*/ 0, /*.nodes =*/ { NULL }, /*.grads =*/ { NULL }, /*.leafs =*/ { NULL }, /*.hash_table =*/ { NULL }, - /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT, + /*.order =*/ BARK_GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT, /*.perf_runs =*/ 0, /*.perf_cycles =*/ 0, /*.perf_time_us =*/ 0, }; - ggml_build_forward_impl(&result, tensor, false); + bark_ggml_build_forward_impl(&result, tensor, false); return result; } -void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) { - GGML_ASSERT(gf->n_nodes > 0); +void bark_ggml_build_backward_expand(struct bark_ggml_context * ctx, struct bark_ggml_cgraph * gf, struct bark_ggml_cgraph * gb, bool keep) { + BARK_GGML_ASSERT(gf->n_nodes > 0); // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph if (keep) { for (int i = 0; i < gf->n_nodes; i++) { - struct ggml_tensor * node = gf->nodes[i]; + struct bark_ggml_tensor * node = gf->nodes[i]; if (node->grad) { - node->grad = ggml_dup_tensor(ctx, node); + node->grad = bark_ggml_dup_tensor(ctx, node); gf->grads[i] = node->grad; } } } // remember original gradients which start with zero values - void ** zero_table = malloc(sizeof(void *) * GGML_GRAPH_HASHTABLE_SIZE); - memset(zero_table, 0, sizeof(void*) * GGML_GRAPH_HASHTABLE_SIZE); + void ** zero_table = malloc(sizeof(void *) * BARK_GGML_GRAPH_HASHTABLE_SIZE); + memset(zero_table, 0, sizeof(void*) * BARK_GGML_GRAPH_HASHTABLE_SIZE); for (int i = 0; i < gf->n_nodes; i++) { if (gf->grads[i]) { hash_insert(zero_table, gf->grads[i]); @@ -18450,61 +18448,59 @@ void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * } for (int i = gf->n_nodes - 1; i >= 0; i--) { - struct ggml_tensor * node = gf->nodes[i]; + struct bark_ggml_tensor * node = gf->nodes[i]; - // inplace operations to add gradients are not created by ggml_compute_backward + // inplace operations to add gradients are not created by bark_ggml_compute_backward // use allocator to automatically make inplace operations if (node->grad) { - ggml_compute_backward(ctx, node, zero_table); + bark_ggml_compute_backward(ctx, node, zero_table); } } for (int i = 0; i < gf->n_nodes; i++) { - struct ggml_tensor * node = gf->nodes[i]; + struct bark_ggml_tensor * node = gf->nodes[i]; if (node->is_param) { - GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node); - ggml_build_forward_expand(gb, node->grad); + BARK_GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node); + bark_ggml_build_forward_expand(gb, node->grad); } } free(zero_table); } -struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) { - struct ggml_cgraph result = *gf; - ggml_build_backward_expand(ctx, gf, &result, keep); +struct bark_ggml_cgraph bark_ggml_build_backward(struct bark_ggml_context * ctx, struct bark_ggml_cgraph * gf, bool keep) { + struct bark_ggml_cgraph result = *gf; + bark_ggml_build_backward_expand(ctx, gf, &result, keep); return result; } -struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) { - struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, GGML_GRAPH_SIZE); - struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs); +struct bark_ggml_cgraph * bark_ggml_new_graph(struct bark_ggml_context * ctx) { + struct bark_ggml_object * obj = bark_ggml_new_object(ctx, BARK_GGML_OBJECT_GRAPH, BARK_GGML_GRAPH_SIZE); + struct bark_ggml_cgraph * cgraph = (struct bark_ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs); - *cgraph = (struct ggml_cgraph) { - /*.n_nodes =*/ 0, - /*.n_leafs =*/ 0, - /*.nodes =*/ { NULL }, - /*.grads =*/ { NULL }, - /*.leafs =*/ { NULL }, - /*.hash_table =*/ { NULL }, - /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT, - /*.perf_runs =*/ 0, - /*.perf_cycles =*/ 0, - /*.perf_time_us =*/ 0, - }; + cgraph->n_nodes = 0; + cgraph->n_leafs = 0; + memset(cgraph->nodes, 0, sizeof(cgraph->nodes)); + memset(cgraph->grads, 0, sizeof(cgraph->grads)); + memset(cgraph->leafs, 0, sizeof(cgraph->leafs)); + memset(cgraph->visited_hash_table, 0, sizeof(cgraph->visited_hash_table)); + cgraph->order = BARK_GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT; + cgraph->perf_runs = 0; + cgraph->perf_cycles = 0; + cgraph->perf_time_us = 0; return cgraph; } -struct ggml_cgraph * ggml_build_forward_ctx(struct ggml_context * ctx, struct ggml_tensor * tensor) { - struct ggml_cgraph * cgraph = ggml_new_graph(ctx); - ggml_build_forward_impl(cgraph, tensor, false); +struct bark_ggml_cgraph * bark_ggml_build_forward_ctx(struct bark_ggml_context * ctx, struct bark_ggml_tensor * tensor) { + struct bark_ggml_cgraph * cgraph = bark_ggml_new_graph(ctx); + bark_ggml_build_forward_impl(cgraph, tensor, false); return cgraph; } -size_t ggml_graph_overhead(void) { - return GGML_OBJECT_SIZE + GGML_PAD(GGML_GRAPH_SIZE, GGML_MEM_ALIGN); +size_t bark_ggml_graph_overhead(void) { + return BARK_GGML_OBJECT_SIZE + BARK_GGML_PAD(BARK_GGML_GRAPH_SIZE, BARK_GGML_MEM_ALIGN); } // @@ -18518,68 +18514,68 @@ size_t ggml_graph_overhead(void) { //#include // -//typedef os_unfair_lock ggml_lock_t; +//typedef os_unfair_lock bark_ggml_lock_t; // -//#define ggml_lock_init(x) UNUSED(x) -//#define ggml_lock_destroy(x) UNUSED(x) -//#define ggml_lock_lock os_unfair_lock_lock -//#define ggml_lock_unlock os_unfair_lock_unlock +//#define bark_ggml_lock_init(x) UNUSED(x) +//#define bark_ggml_lock_destroy(x) UNUSED(x) +//#define bark_ggml_lock_lock os_unfair_lock_lock +//#define bark_ggml_lock_unlock os_unfair_lock_unlock // -//#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT +//#define BARK_GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT -typedef int ggml_lock_t; +typedef int bark_ggml_lock_t; -#define ggml_lock_init(x) UNUSED(x) -#define ggml_lock_destroy(x) UNUSED(x) -#define ggml_lock_lock(x) UNUSED(x) -#define ggml_lock_unlock(x) UNUSED(x) +#define bark_ggml_lock_init(x) UNUSED(x) +#define bark_ggml_lock_destroy(x) UNUSED(x) +#define bark_ggml_lock_lock(x) UNUSED(x) +#define bark_ggml_lock_unlock(x) UNUSED(x) -#define GGML_LOCK_INITIALIZER 0 +#define BARK_GGML_LOCK_INITIALIZER 0 -typedef pthread_t ggml_thread_t; +typedef pthread_t bark_ggml_thread_t; -#define ggml_thread_create pthread_create -#define ggml_thread_join pthread_join +#define bark_ggml_thread_create pthread_create +#define bark_ggml_thread_join pthread_join #else -//typedef pthread_spinlock_t ggml_lock_t; +//typedef pthread_spinlock_t bark_ggml_lock_t; -//#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE) -//#define ggml_lock_destroy pthread_spin_destroy -//#define ggml_lock_lock pthread_spin_lock -//#define ggml_lock_unlock pthread_spin_unlock +//#define bark_ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE) +//#define bark_ggml_lock_destroy pthread_spin_destroy +//#define bark_ggml_lock_lock pthread_spin_lock +//#define bark_ggml_lock_unlock pthread_spin_unlock -typedef int ggml_lock_t; +typedef int bark_ggml_lock_t; -#define ggml_lock_init(x) UNUSED(x) -#define ggml_lock_destroy(x) UNUSED(x) +#define bark_ggml_lock_init(x) UNUSED(x) +#define bark_ggml_lock_destroy(x) UNUSED(x) #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64)) -#define ggml_lock_lock(x) _mm_pause() +#define bark_ggml_lock_lock(x) _mm_pause() #else -#define ggml_lock_lock(x) UNUSED(x) +#define bark_ggml_lock_lock(x) UNUSED(x) #endif -#define ggml_lock_unlock(x) UNUSED(x) +#define bark_ggml_lock_unlock(x) UNUSED(x) -#define GGML_LOCK_INITIALIZER 0 +#define BARK_GGML_LOCK_INITIALIZER 0 -typedef pthread_t ggml_thread_t; +typedef pthread_t bark_ggml_thread_t; -#define ggml_thread_create pthread_create -#define ggml_thread_join pthread_join +#define bark_ggml_thread_create pthread_create +#define bark_ggml_thread_join pthread_join #endif // Android's libc implementation "bionic" does not support setting affinity #if defined(__linux__) && !defined(__BIONIC__) static void set_numa_thread_affinity(int thread_n, int n_threads) { - if (!ggml_is_numa()) { + if (!bark_ggml_is_numa()) { return; } // run thread on node_num thread_n / (threads per node) const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes); - struct ggml_numa_node * node = &g_state.numa.nodes[node_num]; + struct bark_ggml_numa_node * node = &g_state.numa.nodes[node_num]; size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus); cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus); @@ -18598,7 +18594,7 @@ static void set_numa_thread_affinity(int thread_n, int n_threads) { } static void clear_numa_thread_affinity(void) { - if (!ggml_is_numa()) { + if (!bark_ggml_is_numa()) { return; } @@ -18625,9 +18621,9 @@ static void set_numa_thread_affinity(int thread_n, int n_threads) { UNUSED(threa static void clear_numa_thread_affinity(void) {} #endif -struct ggml_compute_state_shared { - const struct ggml_cgraph * cgraph; - const struct ggml_cplan * cplan; +struct bark_ggml_compute_state_shared { + const struct bark_ggml_cgraph * cgraph; + const struct bark_ggml_cplan * cplan; int64_t perf_node_start_cycles; int64_t perf_node_start_time_us; @@ -18638,30 +18634,30 @@ struct ggml_compute_state_shared { atomic_int n_active; // num active threads atomic_int node_n; // active graph node - bool (*abort_callback)(void * data); // abort ggml_graph_compute when true + bool (*abort_callback)(void * data); // abort bark_ggml_graph_compute when true void * abort_callback_data; }; -struct ggml_compute_state { - ggml_thread_t thrd; +struct bark_ggml_compute_state { + bark_ggml_thread_t thrd; int ith; - struct ggml_compute_state_shared * shared; + struct bark_ggml_compute_state_shared * shared; }; -static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) { - int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles; - int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us; +static void bark_ggml_graph_compute_perf_stats_node(struct bark_ggml_tensor * node, const struct bark_ggml_compute_state_shared * st) { + int64_t cycles_cur = bark_ggml_perf_cycles() - st->perf_node_start_cycles; + int64_t time_us_cur = bark_ggml_perf_time_us() - st->perf_node_start_time_us; node->perf_runs++; node->perf_cycles += cycles_cur; node->perf_time_us += time_us_cur; } -static thread_ret_t ggml_graph_compute_thread(void * data) { - struct ggml_compute_state * state = (struct ggml_compute_state *) data; +static thread_ret_t bark_ggml_graph_compute_thread(void * data) { + struct bark_ggml_compute_state * state = (struct bark_ggml_compute_state *) data; - const struct ggml_cgraph * cgraph = state->shared->cgraph; - const struct ggml_cplan * cplan = state->shared->cplan; + const struct bark_ggml_cgraph * cgraph = state->shared->cgraph; + const struct bark_ggml_cplan * cplan = state->shared->cplan; const int * n_tasks_arr = cplan->n_tasks; const int n_threads = state->shared->n_threads; @@ -18673,13 +18669,13 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { while (true) { if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) { state->shared->node_n += 1; - return (thread_ret_t) GGML_EXIT_ABORTED; + return (thread_ret_t) BARK_GGML_EXIT_ABORTED; } if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) { // all other threads are finished and spinning // do finalize and init here so we don't have synchronize again - struct ggml_compute_params params = { - /*.type =*/ GGML_TASK_FINALIZE, + struct bark_ggml_compute_params params = { + /*.type =*/ BARK_GGML_TASK_FINALIZE, /*.ith =*/ 0, /*.nth =*/ 0, /*.wsize =*/ cplan->work_size, @@ -18688,44 +18684,44 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { if (node_n != -1) { /* FINALIZE */ - struct ggml_tensor * node = state->shared->cgraph->nodes[node_n]; - if (GGML_OP_HAS_FINALIZE[node->op]) { + struct bark_ggml_tensor * node = state->shared->cgraph->nodes[node_n]; + if (BARK_GGML_OP_HAS_FINALIZE[node->op]) { params.nth = n_tasks_arr[node_n]; - ggml_compute_forward(¶ms, node); + bark_ggml_compute_forward(¶ms, node); } - ggml_graph_compute_perf_stats_node(node, state->shared); + bark_ggml_graph_compute_perf_stats_node(node, state->shared); } // distribute new work or execute it direct if 1T while (++node_n < cgraph->n_nodes) { - GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes); + BARK_GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes); - struct ggml_tensor * node = cgraph->nodes[node_n]; + struct bark_ggml_tensor * node = cgraph->nodes[node_n]; const int n_tasks = n_tasks_arr[node_n]; - state->shared->perf_node_start_cycles = ggml_perf_cycles(); - state->shared->perf_node_start_time_us = ggml_perf_time_us(); + state->shared->perf_node_start_cycles = bark_ggml_perf_cycles(); + state->shared->perf_node_start_time_us = bark_ggml_perf_time_us(); params.nth = n_tasks; /* INIT */ - if (GGML_OP_HAS_INIT[node->op]) { - params.type = GGML_TASK_INIT; - ggml_compute_forward(¶ms, node); + if (BARK_GGML_OP_HAS_INIT[node->op]) { + params.type = BARK_GGML_TASK_INIT; + bark_ggml_compute_forward(¶ms, node); } if (n_tasks == 1) { // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1, // they do something more efficient than spinning (?) - params.type = GGML_TASK_COMPUTE; - ggml_compute_forward(¶ms, node); + params.type = BARK_GGML_TASK_COMPUTE; + bark_ggml_compute_forward(¶ms, node); - if (GGML_OP_HAS_FINALIZE[node->op]) { - params.type = GGML_TASK_FINALIZE; - ggml_compute_forward(¶ms, node); + if (BARK_GGML_OP_HAS_FINALIZE[node->op]) { + params.type = BARK_GGML_TASK_FINALIZE; + bark_ggml_compute_forward(¶ms, node); } - ggml_graph_compute_perf_stats_node(node, state->shared); + bark_ggml_graph_compute_perf_stats_node(node, state->shared); } else { break; } @@ -18745,7 +18741,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { // depending on the workload and the operating system. // since it is not clear what is the best approach, it should potentially become user-configurable // ref: https://github.com/ggerganov/ggml/issues/291 -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) +#if defined(BARK_GGML_USE_ACCELERATE) || defined(BARK_GGML_USE_OPENBLAS) sched_yield(); #endif @@ -18758,11 +18754,11 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { if (node_n >= cgraph->n_nodes) break; /* COMPUTE */ - struct ggml_tensor * node = cgraph->nodes[node_n]; + struct bark_ggml_tensor * node = cgraph->nodes[node_n]; const int n_tasks = n_tasks_arr[node_n]; - struct ggml_compute_params params = { - /*.type =*/ GGML_TASK_COMPUTE, + struct bark_ggml_compute_params params = { + /*.type =*/ BARK_GGML_TASK_COMPUTE, /*.ith =*/ state->ith, /*.nth =*/ n_tasks, /*.wsize =*/ cplan->work_size, @@ -18770,211 +18766,211 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { }; if (state->ith < n_tasks) { - ggml_compute_forward(¶ms, node); + bark_ggml_compute_forward(¶ms, node); } } - return GGML_EXIT_SUCCESS; + return BARK_GGML_EXIT_SUCCESS; } -struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { +struct bark_ggml_cplan bark_ggml_graph_plan(struct bark_ggml_cgraph * cgraph, int n_threads) { if (n_threads <= 0) { - n_threads = GGML_DEFAULT_N_THREADS; + n_threads = BARK_GGML_DEFAULT_N_THREADS; } size_t work_size = 0; - struct ggml_cplan cplan; - memset(&cplan, 0, sizeof(struct ggml_cplan)); + struct bark_ggml_cplan cplan; + memset(&cplan, 0, sizeof(struct bark_ggml_cplan)); // thread scheduling for the different operations + work buffer size estimation for (int i = 0; i < cgraph->n_nodes; i++) { int n_tasks = 1; - struct ggml_tensor * node = cgraph->nodes[i]; + struct bark_ggml_tensor * node = cgraph->nodes[i]; switch (node->op) { - case GGML_OP_CPY: - case GGML_OP_DUP: + case BARK_GGML_OP_CPY: + case BARK_GGML_OP_DUP: { n_tasks = n_threads; size_t cur = 0; - if (ggml_is_quantized(node->type)) { - cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks; + if (bark_ggml_is_quantized(node->type)) { + cur = bark_ggml_type_size(BARK_GGML_TYPE_F32) * node->ne[0] * n_tasks; } work_size = MAX(work_size, cur); } break; - case GGML_OP_ADD: - case GGML_OP_ADD1: + case BARK_GGML_OP_ADD: + case BARK_GGML_OP_ADD1: { n_tasks = n_threads; size_t cur = 0; - if (ggml_is_quantized(node->src[0]->type)) { - cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; + if (bark_ggml_is_quantized(node->src[0]->type)) { + cur = bark_ggml_type_size(BARK_GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; } work_size = MAX(work_size, cur); } break; - case GGML_OP_ACC: + case BARK_GGML_OP_ACC: { n_tasks = n_threads; size_t cur = 0; - if (ggml_is_quantized(node->src[0]->type)) { - cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks; + if (bark_ggml_is_quantized(node->src[0]->type)) { + cur = bark_ggml_type_size(BARK_GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks; } work_size = MAX(work_size, cur); } break; - case GGML_OP_SUB: - case GGML_OP_DIV: - case GGML_OP_SQR: - case GGML_OP_SQRT: - case GGML_OP_LOG: - case GGML_OP_SUM: - case GGML_OP_SUM_ROWS: - case GGML_OP_MEAN: - case GGML_OP_ARGMAX: - case GGML_OP_REPEAT: - case GGML_OP_REPEAT_BACK: + case BARK_GGML_OP_SUB: + case BARK_GGML_OP_DIV: + case BARK_GGML_OP_SQR: + case BARK_GGML_OP_SQRT: + case BARK_GGML_OP_LOG: + case BARK_GGML_OP_SUM: + case BARK_GGML_OP_SUM_ROWS: + case BARK_GGML_OP_MEAN: + case BARK_GGML_OP_ARGMAX: + case BARK_GGML_OP_REPEAT: + case BARK_GGML_OP_REPEAT_BACK: { n_tasks = 1; } break; - case GGML_OP_UNARY: + case BARK_GGML_OP_UNARY: { - switch (ggml_get_unary_op(node)) { - case GGML_UNARY_OP_ABS: - case GGML_UNARY_OP_SGN: - case GGML_UNARY_OP_NEG: - case GGML_UNARY_OP_STEP: - case GGML_UNARY_OP_TANH: - case GGML_UNARY_OP_ELU: - case GGML_UNARY_OP_RELU: + switch (bark_ggml_get_unary_op(node)) { + case BARK_GGML_UNARY_OP_ABS: + case BARK_GGML_UNARY_OP_SGN: + case BARK_GGML_UNARY_OP_NEG: + case BARK_GGML_UNARY_OP_STEP: + case BARK_GGML_UNARY_OP_TANH: + case BARK_GGML_UNARY_OP_ELU: + case BARK_GGML_UNARY_OP_RELU: { n_tasks = 1; } break; - case GGML_UNARY_OP_GELU: - case GGML_UNARY_OP_GELU_QUICK: - case GGML_UNARY_OP_SILU: + case BARK_GGML_UNARY_OP_GELU: + case BARK_GGML_UNARY_OP_GELU_QUICK: + case BARK_GGML_UNARY_OP_SILU: { n_tasks = n_threads; } break; } } break; - case GGML_OP_SILU_BACK: - case GGML_OP_MUL: - case GGML_OP_NORM: - case GGML_OP_RMS_NORM: - case GGML_OP_RMS_NORM_BACK: - case GGML_OP_GROUP_NORM: + case BARK_GGML_OP_SILU_BACK: + case BARK_GGML_OP_MUL: + case BARK_GGML_OP_NORM: + case BARK_GGML_OP_RMS_NORM: + case BARK_GGML_OP_RMS_NORM_BACK: + case BARK_GGML_OP_GROUP_NORM: { n_tasks = n_threads; } break; - case GGML_OP_CONCAT: - case GGML_OP_MUL_MAT: + case BARK_GGML_OP_CONCAT: + case BARK_GGML_OP_MUL_MAT: { n_tasks = n_threads; // TODO: use different scheduling for different matrix sizes - //const int nr0 = ggml_nrows(node->src[0]); - //const int nr1 = ggml_nrows(node->src[1]); + //const int nr0 = bark_ggml_nrows(node->src[0]); + //const int nr1 = bark_ggml_nrows(node->src[1]); //n_tasks = MIN(n_threads, MAX(1, nr0/128)); //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks); size_t cur = 0; - const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type; + const enum bark_ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type; -#if defined(GGML_USE_CUBLAS) - if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) { +#if defined(BARK_GGML_USE_CUBLAS) + if (bark_ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) { n_tasks = 1; // TODO: this actually is doing nothing // the threads are still spinning } else -#elif defined(GGML_USE_CLBLAST) - if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) { +#elif defined(BARK_GGML_USE_CLBLAST) + if (bark_ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) { n_tasks = 1; // TODO: this actually is doing nothing // the threads are still spinning - cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node); + cur = bark_ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node); } else #endif -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) - if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) { +#if defined(BARK_GGML_USE_ACCELERATE) || defined(BARK_GGML_USE_OPENBLAS) + if (bark_ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) { n_tasks = 1; // TODO: this actually is doing nothing // the threads are still spinning - if (node->src[0]->type != GGML_TYPE_F32) { + if (node->src[0]->type != BARK_GGML_TYPE_F32) { // here we need memory just for single 2D matrix from src0 - cur = ggml_type_size(GGML_TYPE_F32)*(node->src[0]->ne[0]*node->src[0]->ne[1]); + cur = bark_ggml_type_size(BARK_GGML_TYPE_F32)*(node->src[0]->ne[0]*node->src[0]->ne[1]); } } else #endif if (node->src[1]->type != vec_dot_type) { - cur = ggml_type_size(vec_dot_type)*ggml_nelements(node->src[1])/ggml_blck_size(vec_dot_type); + cur = bark_ggml_type_size(vec_dot_type)*bark_ggml_nelements(node->src[1])/bark_ggml_blck_size(vec_dot_type); } else { cur = 0; } work_size = MAX(work_size, cur); } break; - case GGML_OP_OUT_PROD: + case BARK_GGML_OP_OUT_PROD: { n_tasks = n_threads; size_t cur = 0; - if (ggml_is_quantized(node->src[0]->type)) { - cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; + if (bark_ggml_is_quantized(node->src[0]->type)) { + cur = bark_ggml_type_size(BARK_GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; } work_size = MAX(work_size, cur); } break; - case GGML_OP_SCALE: + case BARK_GGML_OP_SCALE: { n_tasks = 1; } break; - case GGML_OP_SET: - case GGML_OP_CONT: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - case GGML_OP_GET_ROWS: - case GGML_OP_GET_ROWS_BACK: - case GGML_OP_DIAG: + case BARK_GGML_OP_SET: + case BARK_GGML_OP_CONT: + case BARK_GGML_OP_RESHAPE: + case BARK_GGML_OP_VIEW: + case BARK_GGML_OP_PERMUTE: + case BARK_GGML_OP_TRANSPOSE: + case BARK_GGML_OP_GET_ROWS: + case BARK_GGML_OP_GET_ROWS_BACK: + case BARK_GGML_OP_DIAG: { n_tasks = 1; } break; - case GGML_OP_DIAG_MASK_ZERO: - case GGML_OP_DIAG_MASK_INF: - case GGML_OP_SOFT_MAX: - case GGML_OP_SOFT_MAX_BACK: - case GGML_OP_ROPE: - case GGML_OP_ROPE_BACK: - case GGML_OP_ADD_REL_POS: + case BARK_GGML_OP_DIAG_MASK_ZERO: + case BARK_GGML_OP_DIAG_MASK_INF: + case BARK_GGML_OP_SOFT_MAX: + case BARK_GGML_OP_SOFT_MAX_BACK: + case BARK_GGML_OP_ROPE: + case BARK_GGML_OP_ROPE_BACK: + case BARK_GGML_OP_ADD_REL_POS: { n_tasks = n_threads; } break; - case GGML_OP_ALIBI: + case BARK_GGML_OP_ALIBI: { n_tasks = 1; //TODO } break; - case GGML_OP_CLAMP: + case BARK_GGML_OP_CLAMP: { n_tasks = 1; //TODO } break; - case GGML_OP_CONV_1D: + case BARK_GGML_OP_CONV_1D: { n_tasks = n_threads; - GGML_ASSERT(node->src[0]->ne[3] == 1); - GGML_ASSERT(node->src[1]->ne[2] == 1); - GGML_ASSERT(node->src[1]->ne[3] == 1); + BARK_GGML_ASSERT(node->src[0]->ne[3] == 1); + BARK_GGML_ASSERT(node->src[1]->ne[2] == 1); + BARK_GGML_ASSERT(node->src[1]->ne[3] == 1); const int64_t ne00 = node->src[0]->ne[0]; const int64_t ne01 = node->src[0]->ne[1]; @@ -18994,37 +18990,37 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { size_t cur = 0; - if (node->src[0]->type == GGML_TYPE_F16 && - node->src[1]->type == GGML_TYPE_F32) { - cur = sizeof(ggml_fp16_t)*(ne0*ne1*ew0); - } else if (node->src[0]->type == GGML_TYPE_F32 && - node->src[1]->type == GGML_TYPE_F32) { + if (node->src[0]->type == BARK_GGML_TYPE_F16 && + node->src[1]->type == BARK_GGML_TYPE_F32) { + cur = sizeof(bark_ggml_fp16_t)*(ne0*ne1*ew0); + } else if (node->src[0]->type == BARK_GGML_TYPE_F32 && + node->src[1]->type == BARK_GGML_TYPE_F32) { cur = sizeof(float)*(ne0*ne1*ew0); } else { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } work_size = MAX(work_size, cur); } break; - case GGML_OP_PAD_REFLEC_1D: + case BARK_GGML_OP_PAD_REFLEC_1D: { n_tasks = 1; } break; - case GGML_OP_CONV_1D_STAGE_0: + case BARK_GGML_OP_CONV_1D_STAGE_0: { n_tasks = n_threads; } break; - case GGML_OP_CONV_1D_STAGE_1: + case BARK_GGML_OP_CONV_1D_STAGE_1: { n_tasks = n_threads; } break; - case GGML_OP_CONV_TRANSPOSE_1D: + case BARK_GGML_OP_CONV_TRANSPOSE_1D: { n_tasks = n_threads; - GGML_ASSERT(node->src[0]->ne[3] == 1); - GGML_ASSERT(node->src[1]->ne[2] == 1); - GGML_ASSERT(node->src[1]->ne[3] == 1); + BARK_GGML_ASSERT(node->src[0]->ne[3] == 1); + BARK_GGML_ASSERT(node->src[1]->ne[2] == 1); + BARK_GGML_ASSERT(node->src[1]->ne[3] == 1); const int64_t ne00 = node->src[0]->ne[0]; // K const int64_t ne01 = node->src[0]->ne[1]; // Cout @@ -19034,21 +19030,21 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { const int64_t ne11 = node->src[1]->ne[1]; // Cin size_t cur = 0; - if (node->src[0]->type == GGML_TYPE_F16 && - node->src[1]->type == GGML_TYPE_F32) { - cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02; - cur += sizeof(ggml_fp16_t)*ne10*ne11; - } else if (node->src[0]->type == GGML_TYPE_F32 && - node->src[1]->type == GGML_TYPE_F32) { + if (node->src[0]->type == BARK_GGML_TYPE_F16 && + node->src[1]->type == BARK_GGML_TYPE_F32) { + cur += sizeof(bark_ggml_fp16_t)*ne00*ne01*ne02; + cur += sizeof(bark_ggml_fp16_t)*ne10*ne11; + } else if (node->src[0]->type == BARK_GGML_TYPE_F32 && + node->src[1]->type == BARK_GGML_TYPE_F32) { cur += sizeof(float)*ne00*ne01*ne02; cur += sizeof(float)*ne10*ne11; } else { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } work_size = MAX(work_size, cur); } break; - case GGML_OP_CONV_2D: + case BARK_GGML_OP_CONV_2D: { n_tasks = n_threads; @@ -19073,28 +19069,28 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { size_t cur = 0; - if (node->src[0]->type == GGML_TYPE_F16 && - node->src[1]->type == GGML_TYPE_F32) { + if (node->src[0]->type == BARK_GGML_TYPE_F16 && + node->src[1]->type == BARK_GGML_TYPE_F32) { // im2col: [N*OH*OW, IC*KH*KW] - cur = sizeof(ggml_fp16_t)*(ne3*ne0*ne1*ew0); - } else if (node->src[0]->type == GGML_TYPE_F32 && - node->src[1]->type == GGML_TYPE_F32) { + cur = sizeof(bark_ggml_fp16_t)*(ne3*ne0*ne1*ew0); + } else if (node->src[0]->type == BARK_GGML_TYPE_F32 && + node->src[1]->type == BARK_GGML_TYPE_F32) { cur = sizeof(float)* (ne10*ne11*ne12); } else { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } work_size = MAX(work_size, cur); } break; - case GGML_OP_CONV_2D_STAGE_0: + case BARK_GGML_OP_CONV_2D_STAGE_0: { n_tasks = n_threads; } break; - case GGML_OP_CONV_2D_STAGE_1: + case BARK_GGML_OP_CONV_2D_STAGE_1: { n_tasks = n_threads; } break; - case GGML_OP_CONV_TRANSPOSE_2D: + case BARK_GGML_OP_CONV_TRANSPOSE_2D: { n_tasks = n_threads; @@ -19108,136 +19104,136 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { const int64_t ne12 = node->src[1]->ne[2]; // Channels In size_t cur = 0; - cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03; - cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12; + cur += sizeof(bark_ggml_fp16_t)*ne00*ne01*ne02*ne03; + cur += sizeof(bark_ggml_fp16_t)*ne10*ne11*ne12; work_size = MAX(work_size, cur); } break; - case GGML_OP_POOL_1D: - case GGML_OP_POOL_2D: + case BARK_GGML_OP_POOL_1D: + case BARK_GGML_OP_POOL_2D: { n_tasks = 1; } break; - case GGML_OP_UPSCALE: + case BARK_GGML_OP_UPSCALE: { n_tasks = n_threads; } break; - case GGML_OP_FLASH_ATTN: + case BARK_GGML_OP_FLASH_ATTN: { n_tasks = n_threads; size_t cur = 0; - const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL); + const int64_t ne11 = bark_ggml_up(node->src[1]->ne[1], BARK_GGML_SOFT_MAX_UNROLL); - if (node->src[1]->type == GGML_TYPE_F32) { + if (node->src[1]->type == BARK_GGML_TYPE_F32) { cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2 } - if (node->src[1]->type == GGML_TYPE_F16) { + if (node->src[1]->type == BARK_GGML_TYPE_F16) { cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2 } work_size = MAX(work_size, cur); } break; - case GGML_OP_FLASH_FF: + case BARK_GGML_OP_FLASH_FF: { n_tasks = n_threads; size_t cur = 0; - if (node->src[1]->type == GGML_TYPE_F32) { + if (node->src[1]->type == BARK_GGML_TYPE_F32) { cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2 } - if (node->src[1]->type == GGML_TYPE_F16) { + if (node->src[1]->type == BARK_GGML_TYPE_F16) { cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2 } work_size = MAX(work_size, cur); } break; - case GGML_OP_FLASH_ATTN_BACK: + case BARK_GGML_OP_FLASH_ATTN_BACK: { n_tasks = n_threads; size_t cur = 0; const int64_t D = node->src[0]->ne[0]; - const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL); - const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back - if (node->src[1]->type == GGML_TYPE_F32) { + const int64_t ne11 = bark_ggml_up(node->src[1]->ne[1], BARK_GGML_SOFT_MAX_UNROLL); + const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in bark_ggml_compute_forward_flash_attn_back + if (node->src[1]->type == BARK_GGML_TYPE_F32) { cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 } - if (node->src[1]->type == GGML_TYPE_F16) { + if (node->src[1]->type == BARK_GGML_TYPE_F16) { cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 } work_size = MAX(work_size, cur); } break; - case GGML_OP_WIN_PART: - case GGML_OP_WIN_UNPART: - case GGML_OP_GET_REL_POS: - case GGML_OP_MAP_UNARY: - case GGML_OP_MAP_BINARY: - case GGML_OP_MAP_CUSTOM1_F32: - case GGML_OP_MAP_CUSTOM2_F32: - case GGML_OP_MAP_CUSTOM3_F32: + case BARK_GGML_OP_WIN_PART: + case BARK_GGML_OP_WIN_UNPART: + case BARK_GGML_OP_GET_REL_POS: + case BARK_GGML_OP_MAP_UNARY: + case BARK_GGML_OP_MAP_BINARY: + case BARK_GGML_OP_MAP_CUSTOM1_F32: + case BARK_GGML_OP_MAP_CUSTOM2_F32: + case BARK_GGML_OP_MAP_CUSTOM3_F32: { n_tasks = 1; } break; - case GGML_OP_MAP_CUSTOM1: + case BARK_GGML_OP_MAP_CUSTOM1: { - struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params; - if (p->n_tasks == GGML_N_TASKS_MAX) { + struct bark_ggml_map_custom1_op_params * p = (struct bark_ggml_map_custom1_op_params *) node->op_params; + if (p->n_tasks == BARK_GGML_N_TASKS_MAX) { n_tasks = n_threads; } else { n_tasks = MIN(p->n_tasks, n_threads); } } break; - case GGML_OP_MAP_CUSTOM2: + case BARK_GGML_OP_MAP_CUSTOM2: { - struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params; - if (p->n_tasks == GGML_N_TASKS_MAX) { + struct bark_ggml_map_custom2_op_params * p = (struct bark_ggml_map_custom2_op_params *) node->op_params; + if (p->n_tasks == BARK_GGML_N_TASKS_MAX) { n_tasks = n_threads; } else { n_tasks = MIN(p->n_tasks, n_threads); } } break; - case GGML_OP_MAP_CUSTOM3: + case BARK_GGML_OP_MAP_CUSTOM3: { - struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params; - if (p->n_tasks == GGML_N_TASKS_MAX) { + struct bark_ggml_map_custom3_op_params * p = (struct bark_ggml_map_custom3_op_params *) node->op_params; + if (p->n_tasks == BARK_GGML_N_TASKS_MAX) { n_tasks = n_threads; } else { n_tasks = MIN(p->n_tasks, n_threads); } } break; - case GGML_OP_CROSS_ENTROPY_LOSS: + case BARK_GGML_OP_CROSS_ENTROPY_LOSS: { n_tasks = n_threads; - size_t cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks); + size_t cur = bark_ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks); work_size = MAX(work_size, cur); } break; - case GGML_OP_CROSS_ENTROPY_LOSS_BACK: + case BARK_GGML_OP_CROSS_ENTROPY_LOSS_BACK: { n_tasks = n_threads; } break; - case GGML_OP_NONE: + case BARK_GGML_OP_NONE: { n_tasks = 1; } break; - case GGML_OP_COUNT: + case BARK_GGML_OP_COUNT: { - GGML_ASSERT(false); + BARK_GGML_ASSERT(false); } break; } @@ -19255,25 +19251,25 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { return cplan; } -int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) { +int bark_ggml_graph_compute(struct bark_ggml_cgraph * cgraph, struct bark_ggml_cplan * cplan) { { - GGML_ASSERT(cplan); - GGML_ASSERT(cplan->n_threads > 0); + BARK_GGML_ASSERT(cplan); + BARK_GGML_ASSERT(cplan->n_threads > 0); if (cplan->work_size > 0) { - GGML_ASSERT(cplan->work_data); + BARK_GGML_ASSERT(cplan->work_data); } for (int i = 0; i < cgraph->n_nodes; ++i) { - if (cgraph->nodes[i]->op != GGML_OP_NONE) { - GGML_ASSERT(cplan->n_tasks[i] > 0); + if (cgraph->nodes[i]->op != BARK_GGML_OP_NONE) { + BARK_GGML_ASSERT(cplan->n_tasks[i] > 0); } } } const int n_threads = cplan->n_threads; - struct ggml_compute_state_shared state_shared = { + struct bark_ggml_compute_state_shared state_shared = { /*.cgraph =*/ cgraph, /*.cgraph_plan =*/ cplan, /*.perf_node_start_cycles =*/ 0, @@ -19284,19 +19280,19 @@ int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) { /*.abort_callback =*/ NULL, /*.abort_callback_data =*/ NULL, }; - struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads); + struct bark_ggml_compute_state * workers = alloca(sizeof(struct bark_ggml_compute_state)*n_threads); // create thread pool if (n_threads > 1) { for (int j = 1; j < n_threads; ++j) { - workers[j] = (struct ggml_compute_state) { + workers[j] = (struct bark_ggml_compute_state) { .thrd = 0, .ith = j, .shared = &state_shared, }; - const int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]); - GGML_ASSERT(rc == 0); + const int rc = bark_ggml_thread_create(&workers[j].thrd, NULL, bark_ggml_graph_compute_thread, &workers[j]); + BARK_GGML_ASSERT(rc == 0); UNUSED(rc); } } @@ -19304,11 +19300,11 @@ int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) { workers[0].ith = 0; workers[0].shared = &state_shared; - const int64_t perf_start_cycles = ggml_perf_cycles(); - const int64_t perf_start_time_us = ggml_perf_time_us(); + const int64_t perf_start_cycles = bark_ggml_perf_cycles(); + const int64_t perf_start_time_us = bark_ggml_perf_time_us(); // this is a work thread too - int compute_status = (size_t) ggml_graph_compute_thread(&workers[0]); + int compute_status = (size_t) bark_ggml_graph_compute_thread(&workers[0]); // don't leave affinity set on the main thread clear_numa_thread_affinity(); @@ -19316,24 +19312,24 @@ int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) { // join or kill thread pool if (n_threads > 1) { for (int j = 1; j < n_threads; j++) { - const int rc = ggml_thread_join(workers[j].thrd, NULL); - GGML_ASSERT(rc == 0); + const int rc = bark_ggml_thread_join(workers[j].thrd, NULL); + BARK_GGML_ASSERT(rc == 0); } } // performance stats (graph) { - int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles; - int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us; + int64_t perf_cycles_cur = bark_ggml_perf_cycles() - perf_start_cycles; + int64_t perf_time_us_cur = bark_ggml_perf_time_us() - perf_start_time_us; cgraph->perf_runs++; cgraph->perf_cycles += perf_cycles_cur; cgraph->perf_time_us += perf_time_us_cur; - GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n", + BARK_GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n", __func__, cgraph->perf_runs, - (double) perf_cycles_cur / (double) ggml_cycles_per_ms(), - (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs, + (double) perf_cycles_cur / (double) bark_ggml_cycles_per_ms(), + (double) cgraph->perf_cycles / (double) bark_ggml_cycles_per_ms() / (double) cgraph->perf_runs, (double) perf_time_us_cur / 1000.0, (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs); } @@ -19341,29 +19337,29 @@ int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) { return compute_status; } -void ggml_graph_reset(struct ggml_cgraph * cgraph) { +void bark_ggml_graph_reset(struct bark_ggml_cgraph * cgraph) { for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * grad = cgraph->grads[i]; + struct bark_ggml_tensor * grad = cgraph->grads[i]; if (grad) { - ggml_set_zero(grad); + bark_ggml_set_zero(grad); } } } -void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) { - struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads); +void bark_ggml_graph_compute_with_ctx(struct bark_ggml_context * ctx, struct bark_ggml_cgraph * cgraph, int n_threads) { + struct bark_ggml_cplan cplan = bark_ggml_graph_plan(cgraph, n_threads); - struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size); + struct bark_ggml_object * obj = bark_ggml_new_object(ctx, BARK_GGML_OBJECT_WORK_BUFFER, cplan.work_size); cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs; - ggml_graph_compute(cgraph, &cplan); + bark_ggml_graph_compute(cgraph, &cplan); } -struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name) { +struct bark_ggml_tensor * bark_ggml_graph_get_tensor(struct bark_ggml_cgraph * cgraph, const char * name) { for (int i = 0; i < cgraph->n_leafs; i++) { - struct ggml_tensor * leaf = cgraph->leafs[i]; + struct bark_ggml_tensor * leaf = cgraph->leafs[i]; if (strcmp(leaf->name, name) == 0) { return leaf; @@ -19371,7 +19367,7 @@ struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const ch } for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * node = cgraph->nodes[i]; + struct bark_ggml_tensor * node = cgraph->nodes[i]; if (strcmp(node->name, name) == 0) { return node; @@ -19381,13 +19377,13 @@ struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const ch return NULL; } -static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fout) { +static void bark_ggml_graph_export_leaf(const struct bark_ggml_tensor * tensor, FILE * fout) { const int64_t * ne = tensor->ne; const size_t * nb = tensor->nb; fprintf(fout, "%-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n", - ggml_type_name(tensor->type), - ggml_op_name (tensor->op), + bark_ggml_type_name(tensor->type), + bark_ggml_op_name (tensor->op), tensor->n_dims, ne[0], ne[1], ne[2], ne[3], nb[0], nb[1], nb[2], nb[3], @@ -19395,14 +19391,14 @@ static void ggml_graph_export_leaf(const struct ggml_tensor * tensor, FILE * fou tensor->name); } -static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char * arg, FILE * fout) { +static void bark_ggml_graph_export_node(const struct bark_ggml_tensor * tensor, const char * arg, FILE * fout) { const int64_t * ne = tensor->ne; const size_t * nb = tensor->nb; fprintf(fout, "%-6s %-6s %-12s %8d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %16zu %16zu %16zu %16zu %16p %32s\n", arg, - ggml_type_name(tensor->type), - ggml_op_name (tensor->op), + bark_ggml_type_name(tensor->type), + bark_ggml_op_name (tensor->op), tensor->n_dims, ne[0], ne[1], ne[2], ne[3], nb[0], nb[1], nb[2], nb[3], @@ -19410,13 +19406,13 @@ static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char tensor->name); } -void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { +void bark_ggml_graph_export(const struct bark_ggml_cgraph * cgraph, const char * fname) { uint64_t size_eval = 0; // compute size of intermediate results // TODO: does not take into account scratch buffers !!!! for (int i = 0; i < cgraph->n_nodes; ++i) { - size_eval += ggml_nbytes_pad(cgraph->nodes[i]); + size_eval += bark_ggml_nbytes_pad(cgraph->nodes[i]); } // print @@ -19424,8 +19420,8 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { FILE * fout = stdout; fprintf(fout, "\n"); - fprintf(fout, "%-16s %8x\n", "magic", GGML_FILE_MAGIC); - fprintf(fout, "%-16s %8d\n", "version", GGML_FILE_VERSION); + fprintf(fout, "%-16s %8x\n", "magic", BARK_GGML_FILE_MAGIC); + fprintf(fout, "%-16s %8d\n", "version", BARK_GGML_FILE_VERSION); fprintf(fout, "%-16s %8d\n", "leafs", cgraph->n_leafs); fprintf(fout, "%-16s %8d\n", "nodes", cgraph->n_nodes); fprintf(fout, "%-16s %" PRIu64 "\n", "eval", size_eval); @@ -19436,11 +19432,11 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "DATA", "NAME"); for (int i = 0; i < cgraph->n_leafs; ++i) { - ggml_graph_export_leaf(cgraph->leafs[i], fout); + bark_ggml_graph_export_leaf(cgraph->leafs[i], fout); - GGML_ASSERT(cgraph->leafs[i]->op == GGML_OP_NONE); - GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL); - GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL); + BARK_GGML_ASSERT(cgraph->leafs[i]->op == BARK_GGML_OP_NONE); + BARK_GGML_ASSERT(cgraph->leafs[i]->src[0] == NULL); + BARK_GGML_ASSERT(cgraph->leafs[i]->src[1] == NULL); } // header @@ -19449,11 +19445,11 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { "ARG", "TYPE", "OP", "NDIMS", "NE0", "NE1", "NE2", "NE3", "NB0", "NB1", "NB2", "NB3", "NTASKS", "DATA", "NAME"); for (int i = 0; i < cgraph->n_nodes; ++i) { - ggml_graph_export_node(cgraph->nodes[i], "DST", fout); + bark_ggml_graph_export_node(cgraph->nodes[i], "DST", fout); - for (int j = 0; j < GGML_MAX_SRC; ++j) { + for (int j = 0; j < BARK_GGML_MAX_SRC; ++j) { if (cgraph->nodes[i]->src[j]) { - ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout); + bark_ggml_graph_export_node(cgraph->nodes[i]->src[j], "SRC", fout); } } @@ -19474,8 +19470,8 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { // header { - const uint32_t magic = GGML_FILE_MAGIC; - const uint32_t version = GGML_FILE_VERSION; + const uint32_t magic = BARK_GGML_FILE_MAGIC; + const uint32_t version = BARK_GGML_FILE_VERSION; const uint32_t n_leafs = cgraph->n_leafs; const uint32_t nodes = cgraph->n_nodes; @@ -19489,7 +19485,7 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { // leafs { for (int i = 0; i < cgraph->n_leafs; ++i) { - const struct ggml_tensor * tensor = cgraph->leafs[i]; + const struct bark_ggml_tensor * tensor = cgraph->leafs[i]; const uint32_t type = tensor->type; const uint32_t op = tensor->op; @@ -19499,7 +19495,7 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { fwrite(&op, sizeof(uint32_t), 1, fout); fwrite(&n_dims, sizeof(uint32_t), 1, fout); - for (int j = 0; j < GGML_MAX_DIMS; ++j) { + for (int j = 0; j < BARK_GGML_MAX_DIMS; ++j) { const uint64_t ne = tensor->ne[j]; const uint64_t nb = tensor->nb[j]; @@ -19507,13 +19503,13 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { fwrite(&nb, sizeof(uint64_t), 1, fout); } - fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout); - fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout); + fwrite(tensor->name, sizeof(char), BARK_GGML_MAX_NAME, fout); + fwrite(tensor->op_params, sizeof(char), BARK_GGML_MAX_OP_PARAMS, fout); // dump the data // TODO: pad this to 32 byte boundary { - const size_t size = ggml_nbytes(tensor); + const size_t size = bark_ggml_nbytes(tensor); fwrite(tensor->data, sizeof(char), size, fout); } @@ -19523,7 +19519,7 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { // nodes { for (int i = 0; i < cgraph->n_nodes; ++i) { - const struct ggml_tensor * tensor = cgraph->nodes[i]; + const struct bark_ggml_tensor * tensor = cgraph->nodes[i]; const uint32_t type = tensor->type; const uint32_t op = tensor->op; @@ -19533,7 +19529,7 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { fwrite(&op, sizeof(uint32_t), 1, fout); fwrite(&n_dims, sizeof(uint32_t), 1, fout); - for (int j = 0; j < GGML_MAX_DIMS; ++j) { + for (int j = 0; j < BARK_GGML_MAX_DIMS; ++j) { const uint64_t ne = tensor->ne[j]; const uint64_t nb = tensor->nb[j]; @@ -19541,18 +19537,18 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { fwrite(&nb, sizeof(uint64_t), 1, fout); } - fwrite(tensor->name, sizeof(char), GGML_MAX_NAME, fout); - fwrite(tensor->op_params, sizeof(char), GGML_MAX_OP_PARAMS, fout); + fwrite(tensor->name, sizeof(char), BARK_GGML_MAX_NAME, fout); + fwrite(tensor->op_params, sizeof(char), BARK_GGML_MAX_OP_PARAMS, fout); // output the op arguments { - struct ggml_tensor * args[GGML_MAX_SRC] = { NULL }; + struct bark_ggml_tensor * args[BARK_GGML_MAX_SRC] = { NULL }; - for (int j = 0; j < GGML_MAX_SRC; ++j) { + for (int j = 0; j < BARK_GGML_MAX_SRC; ++j) { args[j] = tensor->src[j]; } - for (int j = 0; j < GGML_MAX_SRC; ++j) { + for (int j = 0; j < BARK_GGML_MAX_SRC; ++j) { if (args[j]) { int32_t idx = -1; @@ -19570,7 +19566,7 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { if (idx == -1) { for (int k = 0; k < cgraph->n_nodes; ++k) { if (args[j] == cgraph->nodes[k]) { - idx = GGML_MAX_NODES + k; + idx = BARK_GGML_MAX_NODES + k; break; } } @@ -19596,13 +19592,13 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { } } -struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) { +struct bark_ggml_cgraph bark_ggml_graph_import(const char * fname, struct bark_ggml_context ** ctx_data, struct bark_ggml_context ** ctx_eval) { assert(*ctx_data == NULL); assert(*ctx_eval == NULL); - struct ggml_cgraph result = { 0 }; + struct bark_ggml_cgraph result = { 0 }; - struct ggml_tensor * data = NULL; + struct bark_ggml_tensor * data = NULL; // read file into data { @@ -19620,15 +19616,15 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** // create the data context { - const size_t overhead = 1*ggml_tensor_overhead(); + const size_t overhead = 1*bark_ggml_tensor_overhead(); - struct ggml_init_params params = { + struct bark_ggml_init_params params = { .mem_size = fsize + overhead, .mem_buffer = NULL, .no_alloc = false, }; - *ctx_data = ggml_init(params); + *ctx_data = bark_ggml_init(params); if (!*ctx_data) { fprintf(stderr, "%s: failed to create ggml context\n", __func__); @@ -19637,7 +19633,7 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** } } - data = ggml_new_tensor_1d(*ctx_data, GGML_TYPE_I8, fsize); + data = bark_ggml_new_tensor_1d(*ctx_data, BARK_GGML_TYPE_I8, fsize); { const size_t ret = fread(data->data, sizeof(char), fsize, fin); @@ -19657,14 +19653,14 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** const uint32_t magic = *(const uint32_t *) ptr; ptr += sizeof(magic); - if (magic != GGML_FILE_MAGIC) { + if (magic != BARK_GGML_FILE_MAGIC) { fprintf(stderr, "%s: invalid magic number, got %08x\n", __func__, magic); return result; } const uint32_t version = *(const uint32_t *) ptr; ptr += sizeof(version); - if (version != GGML_FILE_VERSION) { + if (version != BARK_GGML_FILE_VERSION) { fprintf(stderr, "%s: invalid version number\n", __func__); return result; } @@ -19678,15 +19674,15 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** // create the data context { - const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead(); + const size_t overhead = (n_leafs + n_nodes)*bark_ggml_tensor_overhead(); - struct ggml_init_params params = { + struct bark_ggml_init_params params = { .mem_size = size_eval + overhead, .mem_buffer = NULL, .no_alloc = true, }; - *ctx_eval = ggml_init(params); + *ctx_eval = bark_ggml_init(params); if (!*ctx_eval) { fprintf(stderr, "%s: failed to create ggml context\n", __func__); @@ -19705,10 +19701,10 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** op = *(const uint32_t *) ptr; ptr += sizeof(op); n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims); - int64_t ne[GGML_MAX_DIMS]; - size_t nb[GGML_MAX_DIMS]; + int64_t ne[BARK_GGML_MAX_DIMS]; + size_t nb[BARK_GGML_MAX_DIMS]; - for (int j = 0; j < GGML_MAX_DIMS; ++j) { + for (int j = 0; j < BARK_GGML_MAX_DIMS; ++j) { uint64_t ne_cur; uint64_t nb_cur; @@ -19719,28 +19715,28 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** nb[j] = nb_cur; } - struct ggml_tensor * tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne); + struct bark_ggml_tensor * tensor = bark_ggml_new_tensor(*ctx_eval, (enum bark_ggml_type) type, n_dims, ne); - tensor->op = (enum ggml_op) op; + tensor->op = (enum bark_ggml_op) op; - memcpy(tensor->name, ptr, GGML_MAX_NAME); ptr += GGML_MAX_NAME; - memcpy(tensor->op_params, ptr, GGML_MAX_OP_PARAMS); ptr += GGML_MAX_OP_PARAMS; + memcpy(tensor->name, ptr, BARK_GGML_MAX_NAME); ptr += BARK_GGML_MAX_NAME; + memcpy(tensor->op_params, ptr, BARK_GGML_MAX_OP_PARAMS); ptr += BARK_GGML_MAX_OP_PARAMS; tensor->data = (void *) ptr; - for (int j = 0; j < GGML_MAX_DIMS; ++j) { + for (int j = 0; j < BARK_GGML_MAX_DIMS; ++j) { tensor->nb[j] = nb[j]; } result.leafs[i] = tensor; - ptr += ggml_nbytes(tensor); + ptr += bark_ggml_nbytes(tensor); - fprintf(stderr, "%s: loaded leaf %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor)); + fprintf(stderr, "%s: loaded leaf %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, bark_ggml_nbytes(tensor)); } } - ggml_set_no_alloc(*ctx_eval, false); + bark_ggml_set_no_alloc(*ctx_eval, false); // nodes { @@ -19753,12 +19749,12 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** op = *(const uint32_t *) ptr; ptr += sizeof(op); n_dims = *(const uint32_t *) ptr; ptr += sizeof(n_dims); - enum ggml_op eop = (enum ggml_op) op; + enum bark_ggml_op eop = (enum bark_ggml_op) op; - int64_t ne[GGML_MAX_DIMS]; - size_t nb[GGML_MAX_DIMS]; + int64_t ne[BARK_GGML_MAX_DIMS]; + size_t nb[BARK_GGML_MAX_DIMS]; - for (int j = 0; j < GGML_MAX_DIMS; ++j) { + for (int j = 0; j < BARK_GGML_MAX_DIMS; ++j) { uint64_t ne_cur; uint64_t nb_cur; @@ -19769,25 +19765,25 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** nb[j] = nb_cur; } - const char * ptr_name = ptr; ptr += GGML_MAX_NAME; - const char * ptr_op_params = ptr; ptr += GGML_MAX_OP_PARAMS; + const char * ptr_name = ptr; ptr += BARK_GGML_MAX_NAME; + const char * ptr_op_params = ptr; ptr += BARK_GGML_MAX_OP_PARAMS; - const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += GGML_MAX_SRC*sizeof(int32_t); + const int32_t * ptr_arg_idx = (const int32_t *) ptr; ptr += BARK_GGML_MAX_SRC*sizeof(int32_t); - struct ggml_tensor * args[GGML_MAX_SRC] = { NULL }; + struct bark_ggml_tensor * args[BARK_GGML_MAX_SRC] = { NULL }; // parse args - for (int j = 0; j < GGML_MAX_SRC; ++j) { + for (int j = 0; j < BARK_GGML_MAX_SRC; ++j) { const int32_t arg_idx = ptr_arg_idx[j]; if (arg_idx == -1) { continue; } - if (arg_idx < GGML_MAX_NODES) { + if (arg_idx < BARK_GGML_MAX_NODES) { args[j] = result.leafs[arg_idx]; } else { - args[j] = result.nodes[arg_idx - GGML_MAX_NODES]; + args[j] = result.nodes[arg_idx - BARK_GGML_MAX_NODES]; } } @@ -19795,53 +19791,53 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** // "view" operations are handled differently // TODO: handle inplace ops - currently a copy is always made - struct ggml_tensor * tensor = NULL; + struct bark_ggml_tensor * tensor = NULL; switch (eop) { // TODO: implement other view ops - case GGML_OP_RESHAPE: + case BARK_GGML_OP_RESHAPE: { - tensor = ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]); + tensor = bark_ggml_reshape_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3]); } break; - case GGML_OP_VIEW: + case BARK_GGML_OP_VIEW: { - tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0); + tensor = bark_ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0); size_t offs; memcpy(&offs, ptr_op_params, sizeof(offs)); tensor->data = ((char *) tensor->data) + offs; } break; - case GGML_OP_TRANSPOSE: + case BARK_GGML_OP_TRANSPOSE: { - tensor = ggml_transpose(*ctx_eval, args[0]); + tensor = bark_ggml_transpose(*ctx_eval, args[0]); } break; - case GGML_OP_PERMUTE: + case BARK_GGML_OP_PERMUTE: { - tensor = ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0); + tensor = bark_ggml_view_4d(*ctx_eval, args[0], ne[0], ne[1], ne[2], ne[3], 0, 0, 0, 0); } break; default: { - tensor = ggml_new_tensor(*ctx_eval, (enum ggml_type) type, n_dims, ne); + tensor = bark_ggml_new_tensor(*ctx_eval, (enum bark_ggml_type) type, n_dims, ne); tensor->op = eop; } break; } - memcpy(tensor->name, ptr_name, GGML_MAX_NAME); - memcpy(tensor->op_params, ptr_op_params, GGML_MAX_OP_PARAMS); + memcpy(tensor->name, ptr_name, BARK_GGML_MAX_NAME); + memcpy(tensor->op_params, ptr_op_params, BARK_GGML_MAX_OP_PARAMS); - for (int j = 0; j < GGML_MAX_DIMS; ++j) { + for (int j = 0; j < BARK_GGML_MAX_DIMS; ++j) { tensor->nb[j] = nb[j]; } - for (int j = 0; j < GGML_MAX_SRC; ++j) { + for (int j = 0; j < BARK_GGML_MAX_SRC; ++j) { tensor->src[j] = args[j]; } result.nodes[i] = tensor; - fprintf(stderr, "%s: loaded node %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor)); + fprintf(stderr, "%s: loaded node %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, bark_ggml_nbytes(tensor)); } } } @@ -19849,51 +19845,51 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** return result; } -void ggml_graph_print(const struct ggml_cgraph * cgraph) { - int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0}; +void bark_ggml_graph_print(const struct bark_ggml_cgraph * cgraph) { + int64_t perf_total_per_op_us[BARK_GGML_OP_COUNT] = {0}; - GGML_PRINT("=== GRAPH ===\n"); + BARK_GGML_PRINT("=== GRAPH ===\n"); - GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes); + BARK_GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes); for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * node = cgraph->nodes[i]; + struct bark_ggml_tensor * node = cgraph->nodes[i]; perf_total_per_op_us[node->op] += MAX(1, node->perf_time_us); - GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n", + BARK_GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 ", %5" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n", i, node->ne[0], node->ne[1], node->ne[2], - ggml_op_name(node->op), node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs, - (double) node->perf_cycles / (double) ggml_cycles_per_ms(), - (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs, + bark_ggml_op_name(node->op), node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs, + (double) node->perf_cycles / (double) bark_ggml_cycles_per_ms(), + (double) node->perf_cycles / (double) bark_ggml_cycles_per_ms() / (double) node->perf_runs, (double) node->perf_time_us / 1000.0, (double) node->perf_time_us / 1000.0 / node->perf_runs); } - GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs); + BARK_GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs); for (int i = 0; i < cgraph->n_leafs; i++) { - struct ggml_tensor * node = cgraph->leafs[i]; + struct bark_ggml_tensor * node = cgraph->leafs[i]; - GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s %16s\n", + BARK_GGML_PRINT(" - %3d: [ %5" PRId64 ", %5" PRId64 "] %8s %16s\n", i, node->ne[0], node->ne[1], - ggml_op_name(node->op), - ggml_get_name(node)); + bark_ggml_op_name(node->op), + bark_ggml_get_name(node)); } - for (int i = 0; i < GGML_OP_COUNT; i++) { + for (int i = 0; i < BARK_GGML_OP_COUNT; i++) { if (perf_total_per_op_us[i] == 0) { continue; } - GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", ggml_op_name(i), (double) perf_total_per_op_us[i] / 1000.0); + BARK_GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", bark_ggml_op_name(i), (double) perf_total_per_op_us[i] / 1000.0); } - GGML_PRINT("========================================\n"); + BARK_GGML_PRINT("========================================\n"); } // check if node is part of the graph -static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) { +static bool bark_ggml_graph_find(const struct bark_ggml_cgraph * cgraph, const struct bark_ggml_tensor * node) { if (cgraph == NULL) { return true; } @@ -19907,9 +19903,9 @@ static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml return false; } -static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) { +static struct bark_ggml_tensor * bark_ggml_graph_get_parent(const struct bark_ggml_cgraph * cgraph, const struct bark_ggml_tensor * node) { for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * parent = cgraph->nodes[i]; + struct bark_ggml_tensor * parent = cgraph->nodes[i]; if (parent->grad == node) { return parent; @@ -19919,9 +19915,9 @@ static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgr return NULL; } -static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) { - struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node); - struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent); +static void bark_ggml_graph_dump_dot_node_edge(FILE * fp, const struct bark_ggml_cgraph * gb, struct bark_ggml_tensor * node, struct bark_ggml_tensor * parent, const char * label) { + struct bark_ggml_tensor * gparent = bark_ggml_graph_get_parent(gb, node); + struct bark_ggml_tensor * gparent0 = bark_ggml_graph_get_parent(gb, parent); fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n", gparent0 ? (void *) gparent0 : (void *) parent, gparent0 ? "g" : "x", @@ -19932,34 +19928,34 @@ static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * label); } -static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) { +static void bark_ggml_graph_dump_dot_leaf_edge(FILE * fp, struct bark_ggml_tensor * node, struct bark_ggml_tensor * parent, const char * label) { fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n", (void *) parent, "x", (void *) node, "x", label); } -void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) { +void bark_ggml_graph_dump_dot(const struct bark_ggml_cgraph * gb, const struct bark_ggml_cgraph * gf, const char * filename) { char color[16]; FILE * fp = fopen(filename, "w"); - GGML_ASSERT(fp); + BARK_GGML_ASSERT(fp); fprintf(fp, "digraph G {\n"); fprintf(fp, " newrank = true;\n"); fprintf(fp, " rankdir = LR;\n"); for (int i = 0; i < gb->n_nodes; i++) { - struct ggml_tensor * node = gb->nodes[i]; + struct bark_ggml_tensor * node = gb->nodes[i]; - if (ggml_graph_get_parent(gb, node) != NULL) { + if (bark_ggml_graph_get_parent(gb, node) != NULL) { continue; } if (node->is_param) { snprintf(color, sizeof(color), "yellow"); } else if (node->grad) { - if (ggml_graph_find(gf, node)) { + if (bark_ggml_graph_find(gf, node)) { snprintf(color, sizeof(color), "green"); } else { snprintf(color, sizeof(color), "lightblue"); @@ -19974,26 +19970,26 @@ void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph (void *) node, color); if (strlen(node->name) > 0) { - fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type)); + fprintf(fp, "%s (%s)|", node->name, bark_ggml_type_name(node->type)); } else { - fprintf(fp, "(%s)|", ggml_type_name(node->type)); + fprintf(fp, "(%s)|", bark_ggml_type_name(node->type)); } if (node->n_dims == 2) { - fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | %s", i, node->ne[0], node->ne[1], ggml_op_symbol(node->op)); + fprintf(fp, "%d [%" PRId64 ", %" PRId64 "] | %s", i, node->ne[0], node->ne[1], bark_ggml_op_symbol(node->op)); } else { - fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | %s", i, node->ne[0], node->ne[1], node->ne[2], ggml_op_symbol(node->op)); + fprintf(fp, "%d [%" PRId64 ", %" PRId64 ", %" PRId64 "] | %s", i, node->ne[0], node->ne[1], node->ne[2], bark_ggml_op_symbol(node->op)); } if (node->grad) { - fprintf(fp, " | %s\"; ]\n", ggml_op_symbol(node->grad->op)); + fprintf(fp, " | %s\"; ]\n", bark_ggml_op_symbol(node->grad->op)); } else { fprintf(fp, "\"; ]\n"); } } for (int i = 0; i < gb->n_leafs; i++) { - struct ggml_tensor * node = gb->leafs[i]; + struct bark_ggml_tensor * node = gb->leafs[i]; snprintf(color, sizeof(color), "pink"); @@ -20003,25 +19999,25 @@ void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph (void *) node, color); if (strlen(node->name) > 0) { - fprintf(fp, "%s (%s)|", node->name, ggml_type_name(node->type)); + fprintf(fp, "%s (%s)|", node->name, bark_ggml_type_name(node->type)); } else { - fprintf(fp, "(%s)|", ggml_type_name(node->type)); + fprintf(fp, "(%s)|", bark_ggml_type_name(node->type)); } fprintf(fp, "CONST %d [%" PRId64 ", %" PRId64 "]", i, node->ne[0], node->ne[1]); - if (ggml_nelements(node) < 5) { + if (bark_ggml_nelements(node) < 5) { fprintf(fp, " | ("); - for (int j = 0; j < ggml_nelements(node); j++) { - if (node->type == GGML_TYPE_I8 || node->type == GGML_TYPE_I16 || node->type == GGML_TYPE_I32) { - fprintf(fp, "%d", ggml_get_i32_1d(node, j)); + for (int j = 0; j < bark_ggml_nelements(node); j++) { + if (node->type == BARK_GGML_TYPE_I8 || node->type == BARK_GGML_TYPE_I16 || node->type == BARK_GGML_TYPE_I32) { + fprintf(fp, "%d", bark_ggml_get_i32_1d(node, j)); } - else if (node->type == GGML_TYPE_F32 || node->type == GGML_TYPE_F16) { - fprintf(fp, "%.1e", (double)ggml_get_f32_1d(node, j)); + else if (node->type == BARK_GGML_TYPE_F32 || node->type == BARK_GGML_TYPE_F16) { + fprintf(fp, "%.1e", (double)bark_ggml_get_f32_1d(node, j)); } else { fprintf(fp, "#"); } - if (j < ggml_nelements(node) - 1) { + if (j < bark_ggml_nelements(node) - 1) { fprintf(fp, ", "); } } @@ -20031,25 +20027,25 @@ void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph } for (int i = 0; i < gb->n_nodes; i++) { - struct ggml_tensor * node = gb->nodes[i]; + struct bark_ggml_tensor * node = gb->nodes[i]; - for (int j = 0; j < GGML_MAX_SRC; j++) { + for (int j = 0; j < BARK_GGML_MAX_SRC; j++) { if (node->src[j]) { char label[16]; snprintf(label, sizeof(label), "src %d", j); - ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label); + bark_ggml_graph_dump_dot_node_edge(fp, gb, node, node->src[j], label); } } } for (int i = 0; i < gb->n_leafs; i++) { - struct ggml_tensor * node = gb->leafs[i]; + struct bark_ggml_tensor * node = gb->leafs[i]; - for (int j = 0; j < GGML_MAX_SRC; j++) { + for (int j = 0; j < BARK_GGML_MAX_SRC; j++) { if (node->src[j]) { char label[16]; snprintf(label, sizeof(label), "src %d", j); - ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label); + bark_ggml_graph_dump_dot_leaf_edge(fp, node, node->src[j], label); } } } @@ -20058,51 +20054,51 @@ void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph fclose(fp); - GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename); + BARK_GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename); } //////////////////////////////////////////////////////////////////////////////// -static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) { +static void bark_ggml_opt_set_params(int np, struct bark_ggml_tensor * const ps[], const float * x) { int i = 0; for (int p = 0; p < np; ++p) { - const int64_t ne = ggml_nelements(ps[p]) ; + const int64_t ne = bark_ggml_nelements(ps[p]) ; // TODO: add function to set tensor from array for (int64_t j = 0; j < ne; ++j) { - ggml_set_f32_1d(ps[p], j, x[i++]); + bark_ggml_set_f32_1d(ps[p], j, x[i++]); } } } -static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) { +static void bark_ggml_opt_get_params(int np, struct bark_ggml_tensor * const ps[], float * x) { int i = 0; for (int p = 0; p < np; ++p) { - const int64_t ne = ggml_nelements(ps[p]) ; + const int64_t ne = bark_ggml_nelements(ps[p]) ; // TODO: add function to get all elements at once for (int64_t j = 0; j < ne; ++j) { - x[i++] = ggml_get_f32_1d(ps[p], j); + x[i++] = bark_ggml_get_f32_1d(ps[p], j); } } } -static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) { +static void bark_ggml_opt_get_grad(int np, struct bark_ggml_tensor * const ps[], float * g) { int64_t i = 0; for (int p = 0; p < np; ++p) { - const int64_t ne = ggml_nelements(ps[p]) ; + const int64_t ne = bark_ggml_nelements(ps[p]) ; // TODO: add function to get all elements at once for (int64_t j = 0; j < ne; ++j) { - g[i++] = ggml_get_f32_1d(ps[p]->grad, j); + g[i++] = bark_ggml_get_f32_1d(ps[p]->grad, j); } } } -static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g, float scale) { +static void bark_ggml_opt_acc_grad(int np, struct bark_ggml_tensor * const ps[], float * g, float scale) { int64_t i = 0; for (int p = 0; p < np; ++p) { - const int64_t ne = ggml_nelements(ps[p]) ; + const int64_t ne = bark_ggml_nelements(ps[p]) ; // TODO: add function to get all elements at once for (int64_t j = 0; j < ne; ++j) { - g[i++] += ggml_get_f32_1d(ps[p]->grad, j) * scale; + g[i++] += bark_ggml_get_f32_1d(ps[p]->grad, j) * scale; } } } @@ -20113,36 +20109,36 @@ static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g // ref: https://arxiv.org/pdf/1412.6980.pdf // -static enum ggml_opt_result ggml_opt_adam( - struct ggml_context * ctx, - struct ggml_opt_context * opt, - struct ggml_opt_params params, - struct ggml_tensor * f, - struct ggml_cgraph * gf, - struct ggml_cgraph * gb, - ggml_opt_callback callback, +static enum bark_ggml_opt_result bark_ggml_opt_adam( + struct bark_ggml_context * ctx, + struct bark_ggml_opt_context * opt, + struct bark_ggml_opt_params params, + struct bark_ggml_tensor * f, + struct bark_ggml_cgraph * gf, + struct bark_ggml_cgraph * gb, + bark_ggml_opt_callback callback, void * callback_data) { - GGML_ASSERT(ggml_is_scalar(f)); + BARK_GGML_ASSERT(bark_ggml_is_scalar(f)); // these will store the parameters we want to optimize - struct ggml_tensor * ps[GGML_MAX_PARAMS]; + struct bark_ggml_tensor * ps[BARK_GGML_MAX_PARAMS]; int np = 0; int64_t nx = 0; for (int i = 0; i < gf->n_nodes; ++i) { if (gf->nodes[i]->is_param) { - GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); + BARK_GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); - GGML_ASSERT(np < GGML_MAX_PARAMS); + BARK_GGML_ASSERT(np < BARK_GGML_MAX_PARAMS); ps[np++] = gf->nodes[i]; - nx += ggml_nelements(gf->nodes[i]); + nx += bark_ggml_nelements(gf->nodes[i]); } } if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past)) { int iter = opt->iter; - ggml_opt_init(opt->ctx, opt, params, nx); + bark_ggml_opt_init(opt->ctx, opt, params, nx); opt->iter = iter; } @@ -20164,27 +20160,27 @@ static enum ggml_opt_result ggml_opt_adam( float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values - struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads); - struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size); + struct bark_ggml_cplan cplan = bark_ggml_graph_plan(gb, params.n_threads); + struct bark_ggml_object * obj = bark_ggml_new_object(ctx, BARK_GGML_OBJECT_WORK_BUFFER, cplan.work_size); cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs; bool cancel = false; // compute the function value float fx = 0; - ggml_set_zero(opt->adam.g); + bark_ggml_set_zero(opt->adam.g); for (int accum_step = 0; accum_step < n_accum; ++accum_step) { if (callback) { callback(callback_data, accum_step, &sched, &cancel); if (cancel) { - return GGML_OPT_CANCEL; + return BARK_GGML_OPT_CANCEL; } } - // ggml_graph_reset (gf); - ggml_set_f32 (f->grad, 1.0f); - ggml_graph_compute(gb, &cplan); - ggml_opt_acc_grad(np, ps, g, accum_norm); - fx += ggml_get_f32_1d(f, 0); + // bark_ggml_graph_reset (gf); + bark_ggml_set_f32 (f->grad, 1.0f); + bark_ggml_graph_compute(gb, &cplan); + bark_ggml_opt_acc_grad(np, ps, g, accum_norm); + fx += bark_ggml_get_f32_1d(f, 0); } fx *= accum_norm; @@ -20212,19 +20208,19 @@ static enum ggml_opt_result ggml_opt_adam( // run the optimizer for (int t = 0; t < params.adam.n_iter; ++t) { opt->iter = iter0 + t + 1; - GGML_PRINT_DEBUG ("=== iter %d ===\n", t); + BARK_GGML_PRINT_DEBUG ("=== iter %d ===\n", t); - GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0)); - GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0)); - GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0)); + BARK_GGML_PRINT_DEBUG ("f = %10.6f\n", bark_ggml_get_f32_1d(f, 0)); + BARK_GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", bark_ggml_get_f32_1d(ps[0]->grad, 0)); + BARK_GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", bark_ggml_get_f32_1d(ps[1]->grad, 0)); for (int i = 0; i < np; ++i) { - GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i, - ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0)); + BARK_GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i, + bark_ggml_get_f32_1d(ps[i], 0), bark_ggml_get_f32_1d(ps[i]->grad, 0)); } - const int64_t t_start_wall = ggml_time_us(); - const int64_t t_start_cpu = ggml_cycles(); + const int64_t t_start_wall = bark_ggml_time_us(); + const int64_t t_start_cpu = bark_ggml_cycles(); UNUSED(t_start_wall); UNUSED(t_start_cpu); @@ -20232,23 +20228,23 @@ static enum ggml_opt_result ggml_opt_adam( float gnorm = 1.0f; if (gclip > 0.0f) { // gradient clipping - ggml_float sum = 0.0; + bark_ggml_float sum = 0.0; for (int64_t i = 0; i < nx; ++i) { - sum += (ggml_float)(g[i]*g[i]); + sum += (bark_ggml_float)(g[i]*g[i]); } - ggml_float norm = sqrt(sum); - if (norm > (ggml_float) gclip) { - gnorm = (float) ((ggml_float) gclip / norm); + bark_ggml_float norm = sqrt(sum); + if (norm > (bark_ggml_float) gclip) { + gnorm = (float) ((bark_ggml_float) gclip / norm); } } const float beta1h = alpha*sched/(1.0f - powf(beta1, opt->iter)); const float beta2h = 1.0f/(1.0f - powf(beta2, opt->iter)); int64_t i = 0; for (int p = 0; p < np; ++p) { - const int64_t ne = ggml_nelements(ps[p]); + const int64_t ne = bark_ggml_nelements(ps[p]); const float p_decay = ((ps[p]->n_dims >= decay_min_ndim) ? decay : 0.0f) * sched; for (int64_t j = 0; j < ne; ++j) { - float x = ggml_get_f32_1d(ps[p], j); + float x = bark_ggml_get_f32_1d(ps[p], j); float g_ = g[i]*gnorm; m[i] = m[i]*beta1 + g_*(1.0f - beta1); v[i] = v[i]*beta2 + g_*g_*(1.0f - beta2); @@ -20256,26 +20252,26 @@ static enum ggml_opt_result ggml_opt_adam( float vh = v[i]*beta2h; vh = sqrtf(vh) + eps; x = x*(1.0f - p_decay) - mh/vh; - ggml_set_f32_1d(ps[p], j, x); + bark_ggml_set_f32_1d(ps[p], j, x); ++i; } } } fx = 0; - ggml_set_zero(opt->adam.g); + bark_ggml_set_zero(opt->adam.g); for (int accum_step = 0; accum_step < n_accum; ++accum_step) { if (callback) { callback(callback_data, accum_step, &sched, &cancel); if (cancel) { - return GGML_OPT_CANCEL;; + return BARK_GGML_OPT_CANCEL;; } } - // ggml_graph_reset (gf); - ggml_set_f32 (f->grad, 1.0f); - ggml_graph_compute(gb, &cplan); - ggml_opt_acc_grad(np, ps, g, accum_norm); - fx += ggml_get_f32_1d(f, 0); + // bark_ggml_graph_reset (gf); + bark_ggml_set_f32 (f->grad, 1.0f); + bark_ggml_graph_compute(gb, &cplan); + bark_ggml_opt_acc_grad(np, ps, g, accum_norm); + fx += bark_ggml_get_f32_1d(f, 0); } fx *= accum_norm; @@ -20283,9 +20279,9 @@ static enum ggml_opt_result ggml_opt_adam( // check convergence if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) { - GGML_PRINT_DEBUG("converged\n"); + BARK_GGML_PRINT_DEBUG("converged\n"); - return GGML_OPT_OK; + return BARK_GGML_OPT_OK; } // delta-based convergence test @@ -20295,7 +20291,7 @@ static enum ggml_opt_result ggml_opt_adam( const float rate = (pf[(iter0 + t)%params.past] - fx)/fx; if (fabsf(rate) < params.delta) { - return GGML_OPT_OK; + return BARK_GGML_OPT_OK; } } @@ -20311,7 +20307,7 @@ static enum ggml_opt_result ggml_opt_adam( ++n_no_improvement[0]; if (n_no_improvement[0] >= params.max_no_improvement) { - return GGML_OPT_OK; + return BARK_GGML_OPT_OK; } } } @@ -20319,17 +20315,17 @@ static enum ggml_opt_result ggml_opt_adam( fx_prev[0] = fx; { - const int64_t t_end_cpu = ggml_cycles(); - GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC); + const int64_t t_end_cpu = bark_ggml_cycles(); + BARK_GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC); UNUSED(t_end_cpu); - const int64_t t_end_wall = ggml_time_us(); - GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6); + const int64_t t_end_wall = bark_ggml_time_us(); + BARK_GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6); UNUSED(t_end_wall); } } - return GGML_OPT_DID_NOT_CONVERGE; + return BARK_GGML_OPT_DID_NOT_CONVERGE; } // @@ -20340,15 +20336,15 @@ static enum ggml_opt_result ggml_opt_adam( // https://github.com/chokkan/liblbfgs // -struct ggml_lbfgs_iteration_data { +struct bark_ggml_lbfgs_iteration_data { float alpha; float ys; float * s; float * y; }; -static enum ggml_opt_result linesearch_backtracking( - const struct ggml_opt_params * params, +static enum bark_ggml_opt_result linesearch_backtracking( + const struct bark_ggml_opt_params * params, int nx, float * x, float * fx, @@ -20356,13 +20352,13 @@ static enum ggml_opt_result linesearch_backtracking( float * d, float * step, const float * xp, - struct ggml_tensor * f, - struct ggml_cgraph * gb, - struct ggml_cplan * cplan, + struct bark_ggml_tensor * f, + struct bark_ggml_cgraph * gb, + struct bark_ggml_cplan * cplan, const int np, - struct ggml_tensor * ps[], + struct bark_ggml_tensor * ps[], bool * cancel, - ggml_opt_callback callback, + bark_ggml_opt_callback callback, void * callback_data) { int count = 0; @@ -20379,15 +20375,15 @@ static enum ggml_opt_result linesearch_backtracking( const float accum_norm = 1.0f / (float) n_accum; if (*step <= 0.f) { - return GGML_LINESEARCH_INVALID_PARAMETERS; + return BARK_GGML_LINESEARCH_INVALID_PARAMETERS; } // compute the initial gradient in the search direction - ggml_vec_dot_f32(nx, &dginit, g, d); + bark_ggml_vec_dot_f32(nx, &dginit, g, d); // make sure that d points to a descent direction if (0 < dginit) { - return GGML_LINESEARCH_FAIL; + return BARK_GGML_LINESEARCH_FAIL; } // initialize local variables @@ -20395,12 +20391,12 @@ static enum ggml_opt_result linesearch_backtracking( dgtest = params->lbfgs.ftol*dginit; while (true) { - ggml_vec_cpy_f32(nx, x, xp); - ggml_vec_mad_f32(nx, x, d, *step); + bark_ggml_vec_cpy_f32(nx, x, xp); + bark_ggml_vec_mad_f32(nx, x, d, *step); // evaluate the function and gradient values { - ggml_opt_set_params(np, ps, x); + bark_ggml_opt_set_params(np, ps, x); *fx = 0; memset(g, 0, sizeof(float)*nx); @@ -20410,14 +20406,14 @@ static enum ggml_opt_result linesearch_backtracking( float sched = 0; callback(callback_data, accum_step, &sched, cancel); if (*cancel) { - return GGML_OPT_CANCEL; + return BARK_GGML_OPT_CANCEL; } } - // ggml_graph_reset (gf); - ggml_set_f32 (f->grad, 1.0f); - ggml_graph_compute(gb, cplan); - ggml_opt_acc_grad(np, ps, g, accum_norm); - *fx += ggml_get_f32_1d(f, 0); + // bark_ggml_graph_reset (gf); + bark_ggml_set_f32 (f->grad, 1.0f); + bark_ggml_graph_compute(gb, cplan); + bark_ggml_opt_acc_grad(np, ps, g, accum_norm); + *fx += bark_ggml_get_f32_1d(f, 0); } *fx *= accum_norm; @@ -20429,17 +20425,17 @@ static enum ggml_opt_result linesearch_backtracking( width = dec; } else { // Armijo condition is satisfied - if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) { + if (params->lbfgs.linesearch == BARK_GGML_LINESEARCH_BACKTRACKING_ARMIJO) { return count; } - ggml_vec_dot_f32(nx, &dg, g, d); + bark_ggml_vec_dot_f32(nx, &dg, g, d); // check the Wolfe condition if (dg < params->lbfgs.wolfe * dginit) { width = inc; } else { - if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) { + if(params->lbfgs.linesearch == BARK_GGML_LINESEARCH_BACKTRACKING_WOLFE) { // regular Wolfe conditions return count; } @@ -20447,70 +20443,70 @@ static enum ggml_opt_result linesearch_backtracking( if(dg > -params->lbfgs.wolfe*dginit) { width = dec; } else { - // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) + // strong Wolfe condition (BARK_GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) return count; } } } if (*step < params->lbfgs.min_step) { - return GGML_LINESEARCH_MINIMUM_STEP; + return BARK_GGML_LINESEARCH_MINIMUM_STEP; } if (*step > params->lbfgs.max_step) { - return GGML_LINESEARCH_MAXIMUM_STEP; + return BARK_GGML_LINESEARCH_MAXIMUM_STEP; } if (params->lbfgs.max_linesearch <= count) { - return GGML_LINESEARCH_MAXIMUM_ITERATIONS; + return BARK_GGML_LINESEARCH_MAXIMUM_ITERATIONS; } (*step) *= width; } - GGML_UNREACHABLE(); + BARK_GGML_UNREACHABLE(); } -static enum ggml_opt_result ggml_opt_lbfgs( - struct ggml_context * ctx, - struct ggml_opt_context * opt, - struct ggml_opt_params params, - struct ggml_tensor * f, - struct ggml_cgraph * gf, - struct ggml_cgraph * gb, - ggml_opt_callback callback, +static enum bark_ggml_opt_result bark_ggml_opt_lbfgs( + struct bark_ggml_context * ctx, + struct bark_ggml_opt_context * opt, + struct bark_ggml_opt_params params, + struct bark_ggml_tensor * f, + struct bark_ggml_cgraph * gf, + struct bark_ggml_cgraph * gb, + bark_ggml_opt_callback callback, void * callback_data) { - if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE || - params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) { + if (params.lbfgs.linesearch == BARK_GGML_LINESEARCH_BACKTRACKING_WOLFE || + params.lbfgs.linesearch == BARK_GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) { if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) { - return GGML_OPT_INVALID_WOLFE; + return BARK_GGML_OPT_INVALID_WOLFE; } } const int m = params.lbfgs.m; // these will store the parameters we want to optimize - struct ggml_tensor * ps[GGML_MAX_PARAMS]; + struct bark_ggml_tensor * ps[BARK_GGML_MAX_PARAMS]; int np = 0; int nx = 0; for (int i = 0; i < gf->n_nodes; ++i) { if (gf->nodes[i]->is_param) { - GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); + BARK_GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); - GGML_ASSERT(np < GGML_MAX_PARAMS); + BARK_GGML_ASSERT(np < BARK_GGML_MAX_PARAMS); ps[np++] = gf->nodes[i]; - nx += ggml_nelements(gf->nodes[i]); + nx += bark_ggml_nelements(gf->nodes[i]); } } if ((opt->params.type != params.type) || (opt->nx != nx) || (opt->params.past != params.past) || (opt->params.lbfgs.m != params.lbfgs.m)) { int iter = opt->iter; - ggml_opt_init(ctx, opt, params, nx); + bark_ggml_opt_init(ctx, opt, params, nx); opt->iter = iter; } - struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads); - struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size); + struct bark_ggml_cplan cplan = bark_ggml_graph_plan(gb, params.n_threads); + struct bark_ggml_object * obj = bark_ggml_new_object(ctx, BARK_GGML_OBJECT_WORK_BUFFER, cplan.work_size); cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs; float * x = opt->lbfgs.x->data; // current parameters @@ -20529,7 +20525,7 @@ static enum ggml_opt_result ggml_opt_lbfgs( float gnorm = 0.0f; // ||g|| // initialize x from the graph nodes - ggml_opt_get_params(np, ps, x); + bark_ggml_opt_get_params(np, ps, x); // the L-BFGS memory float * lm_alpha = opt->lbfgs.lmal->data; @@ -20541,7 +20537,7 @@ static enum ggml_opt_result ggml_opt_lbfgs( // evaluate the function value and its gradient { - ggml_opt_set_params(np, ps, x); + bark_ggml_opt_set_params(np, ps, x); fx = 0; memset(g, 0, sizeof(float)*nx); @@ -20551,14 +20547,14 @@ static enum ggml_opt_result ggml_opt_lbfgs( float sched = 0; callback(callback_data, accum_step, &sched, &cancel); if (cancel) { - return GGML_OPT_CANCEL; + return BARK_GGML_OPT_CANCEL; } } - // ggml_graph_reset (gf); - ggml_set_f32 (f->grad, 1.0f); - ggml_graph_compute(gb, &cplan); - ggml_opt_acc_grad(np, ps, g, accum_norm); - fx += ggml_get_f32_1d(f, 0); + // bark_ggml_graph_reset (gf); + bark_ggml_set_f32 (f->grad, 1.0f); + bark_ggml_graph_compute(gb, &cplan); + bark_ggml_opt_acc_grad(np, ps, g, accum_norm); + fx += bark_ggml_get_f32_1d(f, 0); } fx *= accum_norm; @@ -20567,11 +20563,11 @@ static enum ggml_opt_result ggml_opt_lbfgs( } // search direction = -gradient - ggml_vec_neg_f32(nx, d, g); + bark_ggml_vec_neg_f32(nx, d, g); // ||x||, ||g|| - ggml_vec_norm_f32(nx, &xnorm, x); - ggml_vec_norm_f32(nx, &gnorm, g); + bark_ggml_vec_norm_f32(nx, &xnorm, x); + bark_ggml_vec_norm_f32(nx, &gnorm, g); if (xnorm < 1.0f) { xnorm = 1.0f; @@ -20579,7 +20575,7 @@ static enum ggml_opt_result ggml_opt_lbfgs( // already optimized if (gnorm/xnorm <= params.lbfgs.eps) { - return GGML_OPT_OK; + return BARK_GGML_OPT_OK; } if (opt->just_initialized) { @@ -20589,7 +20585,7 @@ static enum ggml_opt_result ggml_opt_lbfgs( opt->lbfgs.fx_best = fx; // initial step - ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d); + bark_ggml_vec_norm_inv_f32(nx, &opt->lbfgs.step, d); opt->lbfgs.j = 0; opt->lbfgs.k = 1; opt->lbfgs.end = 0; @@ -20615,8 +20611,8 @@ static enum ggml_opt_result ggml_opt_lbfgs( while (true) { // store the current position and gradient vectors - ggml_vec_cpy_f32(nx, xp, x); - ggml_vec_cpy_f32(nx, gp, g); + bark_ggml_vec_cpy_f32(nx, xp, x); + bark_ggml_vec_cpy_f32(nx, gp, g); // TODO: instead of passing &cancel here, use the return code of the linesearch // to determine if the optimization should be cancelled @@ -20624,30 +20620,30 @@ static enum ggml_opt_result ggml_opt_lbfgs( // way to test and don't want to break something with so many changes lined up ls = linesearch_backtracking(¶ms, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data); if (cancel) { - return GGML_OPT_CANCEL; + return BARK_GGML_OPT_CANCEL; } if (ls < 0) { // linesearch failed - go back to the previous point and return - ggml_vec_cpy_f32(nx, x, xp); - ggml_vec_cpy_f32(nx, g, gp); + bark_ggml_vec_cpy_f32(nx, x, xp); + bark_ggml_vec_cpy_f32(nx, g, gp); return ls; } opt->loss_after = fx; - ggml_vec_norm_f32(nx, &xnorm, x); - ggml_vec_norm_f32(nx, &gnorm, g); + bark_ggml_vec_norm_f32(nx, &xnorm, x); + bark_ggml_vec_norm_f32(nx, &gnorm, g); - GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0)); + BARK_GGML_PRINT_DEBUG("f = %10.6f\n", bark_ggml_get_f32_1d(f, 0)); if (xnorm < 1.0f) { xnorm = 1.0f; } if (gnorm/xnorm <= params.lbfgs.eps) { // converged - return GGML_OPT_OK; + return BARK_GGML_OPT_OK; } // delta-based convergence test @@ -20657,7 +20653,7 @@ static enum ggml_opt_result ggml_opt_lbfgs( const float rate = (pf[k[0]%params.past] - fx)/fx; if (fabsf(rate) < params.delta) { - return GGML_OPT_OK; + return BARK_GGML_OPT_OK; } } @@ -20673,29 +20669,29 @@ static enum ggml_opt_result ggml_opt_lbfgs( n_no_improvement[0]++; if (n_no_improvement[0] >= params.max_no_improvement) { - return GGML_OPT_OK; + return BARK_GGML_OPT_OK; } } } if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) { // reached the maximum number of iterations - return GGML_OPT_DID_NOT_CONVERGE; + return BARK_GGML_OPT_DID_NOT_CONVERGE; } // update vectors s and y: // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}. // y_{k+1} = g_{k+1} - g_{k}. // - ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp); - ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp); + bark_ggml_vec_sub_f32(nx, &lm_s[end[0]*nx], x, xp); + bark_ggml_vec_sub_f32(nx, &lm_y[end[0]*nx], g, gp); // compute scalars ys and yy: // ys = y^t \cdot s -> 1 / \rho. // yy = y^t \cdot y. // - ggml_vec_dot_f32(nx, &ys, &lm_y[end[0]*nx], &lm_s[end[0]*nx]); - ggml_vec_dot_f32(nx, &yy, &lm_y[end[0]*nx], &lm_y[end[0]*nx]); + bark_ggml_vec_dot_f32(nx, &ys, &lm_y[end[0]*nx], &lm_s[end[0]*nx]); + bark_ggml_vec_dot_f32(nx, &yy, &lm_y[end[0]*nx], &lm_y[end[0]*nx]); lm_ys[end[0]] = ys; @@ -20708,43 +20704,43 @@ static enum ggml_opt_result ggml_opt_lbfgs( end[0] = (end[0] + 1)%m; // initialize search direction with -g - ggml_vec_neg_f32(nx, d, g); + bark_ggml_vec_neg_f32(nx, d, g); j[0] = end[0]; for (int i = 0; i < bound; ++i) { j[0] = (j[0] + m - 1) % m; // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1} - ggml_vec_dot_f32(nx, &lm_alpha[j[0]], &lm_s[j[0]*nx], d); + bark_ggml_vec_dot_f32(nx, &lm_alpha[j[0]], &lm_s[j[0]*nx], d); lm_alpha[j[0]] /= lm_ys[j[0]]; // q_{i} = q_{i+1} - \alpha_{i} y_{i} - ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]); + bark_ggml_vec_mad_f32(nx, d, &lm_y[j[0]*nx], -lm_alpha[j[0]]); } - ggml_vec_scale_f32(nx, d, ys/yy); + bark_ggml_vec_scale_f32(nx, d, ys/yy); for (int i = 0; i < bound; ++i) { // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i} - ggml_vec_dot_f32(nx, &beta, &lm_y[j[0]*nx], d); + bark_ggml_vec_dot_f32(nx, &beta, &lm_y[j[0]*nx], d); beta /= lm_ys[j[0]]; // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j} - ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta); + bark_ggml_vec_mad_f32(nx, d, &lm_s[j[0]*nx], lm_alpha[j[0]] - beta); j[0] = (j[0] + 1)%m; } step[0] = 1.0; } - GGML_UNREACHABLE(); + BARK_GGML_UNREACHABLE(); } -struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) { - struct ggml_opt_params result; +struct bark_ggml_opt_params bark_ggml_opt_default_params(enum bark_ggml_opt_type type) { + struct bark_ggml_opt_params result; switch (type) { - case GGML_OPT_ADAM: + case BARK_GGML_OPT_ADAM: { - result = (struct ggml_opt_params) { - .type = GGML_OPT_ADAM, + result = (struct bark_ggml_opt_params) { + .type = BARK_GGML_OPT_ADAM, .n_threads = 1, .past = 0, .delta = 1e-5f, @@ -20771,10 +20767,10 @@ struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) { }, }; } break; - case GGML_OPT_LBFGS: + case BARK_GGML_OPT_LBFGS: { - result = (struct ggml_opt_params) { - .type = GGML_OPT_LBFGS, + result = (struct bark_ggml_opt_params) { + .type = BARK_GGML_OPT_LBFGS, .n_threads = 1, .past = 0, .delta = 1e-5f, @@ -20797,7 +20793,7 @@ struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) { .min_step = 1e-20f, .max_step = 1e+20f, - .linesearch = GGML_LINESEARCH_DEFAULT, + .linesearch = BARK_GGML_LINESEARCH_DEFAULT, }, }; } break; @@ -20806,10 +20802,10 @@ struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) { return result; } -GGML_API void ggml_opt_init( - struct ggml_context * ctx, - struct ggml_opt_context * opt, - struct ggml_opt_params params, +BARK_GGML_API void bark_ggml_opt_init( + struct bark_ggml_context * ctx, + struct bark_ggml_opt_context * opt, + struct bark_ggml_opt_params params, int64_t nx) { opt->ctx = ctx; opt->params = params; @@ -20817,151 +20813,151 @@ GGML_API void ggml_opt_init( opt->nx = nx; opt->just_initialized = true; if (opt->ctx == NULL) { - struct ggml_init_params ctx_opt_params; - if (opt->params.type == GGML_OPT_ADAM) { - ctx_opt_params.mem_size = GGML_MEM_ALIGN*3 + ggml_tensor_overhead()*3 + ggml_type_size(GGML_TYPE_F32)*nx*3; + struct bark_ggml_init_params ctx_opt_params; + if (opt->params.type == BARK_GGML_OPT_ADAM) { + ctx_opt_params.mem_size = BARK_GGML_MEM_ALIGN*3 + bark_ggml_tensor_overhead()*3 + bark_ggml_type_size(BARK_GGML_TYPE_F32)*nx*3; if (opt->params.past > 0) { - ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past; + ctx_opt_params.mem_size += BARK_GGML_MEM_ALIGN + bark_ggml_tensor_overhead() + bark_ggml_type_size(BARK_GGML_TYPE_F32)*opt->params.past; } - } else if (opt->params.type == GGML_OPT_LBFGS) { - ctx_opt_params.mem_size = GGML_MEM_ALIGN*9 + ggml_tensor_overhead()*9 + ggml_type_size(GGML_TYPE_F32)*(nx*5 + opt->params.lbfgs.m*2 + nx*opt->params.lbfgs.m*2); + } else if (opt->params.type == BARK_GGML_OPT_LBFGS) { + ctx_opt_params.mem_size = BARK_GGML_MEM_ALIGN*9 + bark_ggml_tensor_overhead()*9 + bark_ggml_type_size(BARK_GGML_TYPE_F32)*(nx*5 + opt->params.lbfgs.m*2 + nx*opt->params.lbfgs.m*2); if (opt->params.past > 0) { - ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past; + ctx_opt_params.mem_size += BARK_GGML_MEM_ALIGN + bark_ggml_tensor_overhead() + bark_ggml_type_size(BARK_GGML_TYPE_F32)*opt->params.past; } } ctx_opt_params.mem_buffer = NULL; ctx_opt_params.no_alloc = false; - opt->ctx = ggml_init(ctx_opt_params); + opt->ctx = bark_ggml_init(ctx_opt_params); } switch (opt->params.type) { - case GGML_OPT_ADAM: + case BARK_GGML_OPT_ADAM: { - opt->adam.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx); - opt->adam.m = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx); - opt->adam.v = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx); + opt->adam.g = bark_ggml_new_tensor_1d(opt->ctx, BARK_GGML_TYPE_F32, nx); + opt->adam.m = bark_ggml_new_tensor_1d(opt->ctx, BARK_GGML_TYPE_F32, nx); + opt->adam.v = bark_ggml_new_tensor_1d(opt->ctx, BARK_GGML_TYPE_F32, nx); opt->adam.pf = params.past > 0 - ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past) + ? bark_ggml_new_tensor_1d(opt->ctx, BARK_GGML_TYPE_F32, params.past) : NULL; - ggml_set_zero(opt->adam.m); - ggml_set_zero(opt->adam.v); + bark_ggml_set_zero(opt->adam.m); + bark_ggml_set_zero(opt->adam.v); if (opt->adam.pf) { - ggml_set_zero(opt->adam.pf); + bark_ggml_set_zero(opt->adam.pf); } } break; - case GGML_OPT_LBFGS: + case BARK_GGML_OPT_LBFGS: { - opt->lbfgs.x = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx); - opt->lbfgs.xp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx); - opt->lbfgs.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx); - opt->lbfgs.gp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx); - opt->lbfgs.d = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx); + opt->lbfgs.x = bark_ggml_new_tensor_1d(opt->ctx, BARK_GGML_TYPE_F32, nx); + opt->lbfgs.xp = bark_ggml_new_tensor_1d(opt->ctx, BARK_GGML_TYPE_F32, nx); + opt->lbfgs.g = bark_ggml_new_tensor_1d(opt->ctx, BARK_GGML_TYPE_F32, nx); + opt->lbfgs.gp = bark_ggml_new_tensor_1d(opt->ctx, BARK_GGML_TYPE_F32, nx); + opt->lbfgs.d = bark_ggml_new_tensor_1d(opt->ctx, BARK_GGML_TYPE_F32, nx); opt->lbfgs.pf = params.past > 0 - ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past) + ? bark_ggml_new_tensor_1d(opt->ctx, BARK_GGML_TYPE_F32, params.past) : NULL; - opt->lbfgs.lmal = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m); - opt->lbfgs.lmys = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m); - opt->lbfgs.lms = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m); - opt->lbfgs.lmy = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m); - ggml_set_zero(opt->lbfgs.x); - ggml_set_zero(opt->lbfgs.xp); - ggml_set_zero(opt->lbfgs.g); - ggml_set_zero(opt->lbfgs.gp); - ggml_set_zero(opt->lbfgs.d); + opt->lbfgs.lmal = bark_ggml_new_tensor_1d(opt->ctx, BARK_GGML_TYPE_F32, params.lbfgs.m); + opt->lbfgs.lmys = bark_ggml_new_tensor_1d(opt->ctx, BARK_GGML_TYPE_F32, params.lbfgs.m); + opt->lbfgs.lms = bark_ggml_new_tensor_2d(opt->ctx, BARK_GGML_TYPE_F32, nx, params.lbfgs.m); + opt->lbfgs.lmy = bark_ggml_new_tensor_2d(opt->ctx, BARK_GGML_TYPE_F32, nx, params.lbfgs.m); + bark_ggml_set_zero(opt->lbfgs.x); + bark_ggml_set_zero(opt->lbfgs.xp); + bark_ggml_set_zero(opt->lbfgs.g); + bark_ggml_set_zero(opt->lbfgs.gp); + bark_ggml_set_zero(opt->lbfgs.d); if (opt->lbfgs.pf) { - ggml_set_zero(opt->lbfgs.pf); + bark_ggml_set_zero(opt->lbfgs.pf); } - ggml_set_zero(opt->lbfgs.lmal); - ggml_set_zero(opt->lbfgs.lmys); - ggml_set_zero(opt->lbfgs.lms); - ggml_set_zero(opt->lbfgs.lmy); + bark_ggml_set_zero(opt->lbfgs.lmal); + bark_ggml_set_zero(opt->lbfgs.lmys); + bark_ggml_set_zero(opt->lbfgs.lms); + bark_ggml_set_zero(opt->lbfgs.lmy); } break; } } -enum ggml_opt_result ggml_opt( - struct ggml_context * ctx, - struct ggml_opt_params params, - struct ggml_tensor * f) { +enum bark_ggml_opt_result bark_ggml_opt( + struct bark_ggml_context * ctx, + struct bark_ggml_opt_params params, + struct bark_ggml_tensor * f) { bool free_ctx = false; if (ctx == NULL) { - struct ggml_init_params params_ctx = { + struct bark_ggml_init_params params_ctx = { .mem_size = 16*1024*1024, .mem_buffer = NULL, .no_alloc = false, }; - ctx = ggml_init(params_ctx); + ctx = bark_ggml_init(params_ctx); if (ctx == NULL) { - return GGML_OPT_NO_CONTEXT; + return BARK_GGML_OPT_NO_CONTEXT; } free_ctx = true; } - enum ggml_opt_result result = GGML_OPT_OK; + enum bark_ggml_opt_result result = BARK_GGML_OPT_OK; - struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context)); + struct bark_ggml_opt_context * opt = (struct bark_ggml_opt_context *) alloca(sizeof(struct bark_ggml_opt_context)); - ggml_opt_init(ctx, opt, params, 0); - result = ggml_opt_resume(ctx, opt, f); + bark_ggml_opt_init(ctx, opt, params, 0); + result = bark_ggml_opt_resume(ctx, opt, f); if (free_ctx) { - ggml_free(ctx); + bark_ggml_free(ctx); } return result; } -enum ggml_opt_result ggml_opt_resume( - struct ggml_context * ctx, - struct ggml_opt_context * opt, - struct ggml_tensor * f) { +enum bark_ggml_opt_result bark_ggml_opt_resume( + struct bark_ggml_context * ctx, + struct bark_ggml_opt_context * opt, + struct bark_ggml_tensor * f) { // build forward + backward compute graphs - struct ggml_tensor * gfbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / ggml_type_size(GGML_TYPE_I32)+ (sizeof(struct ggml_cgraph) % ggml_type_size(GGML_TYPE_I32) ? 1 : 0)); - struct ggml_tensor * gbbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / ggml_type_size(GGML_TYPE_I32)+ (sizeof(struct ggml_cgraph) % ggml_type_size(GGML_TYPE_I32) ? 1 : 0)); + struct bark_ggml_tensor * gfbuf = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_I32, sizeof(struct bark_ggml_cgraph) / bark_ggml_type_size(BARK_GGML_TYPE_I32)+ (sizeof(struct bark_ggml_cgraph) % bark_ggml_type_size(BARK_GGML_TYPE_I32) ? 1 : 0)); + struct bark_ggml_tensor * gbbuf = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_I32, sizeof(struct bark_ggml_cgraph) / bark_ggml_type_size(BARK_GGML_TYPE_I32)+ (sizeof(struct bark_ggml_cgraph) % bark_ggml_type_size(BARK_GGML_TYPE_I32) ? 1 : 0)); - struct ggml_cgraph * gf = (struct ggml_cgraph *) gfbuf->data; - struct ggml_cgraph * gb = (struct ggml_cgraph *) gbbuf->data; + struct bark_ggml_cgraph * gf = (struct bark_ggml_cgraph *) gfbuf->data; + struct bark_ggml_cgraph * gb = (struct bark_ggml_cgraph *) gbbuf->data; - *gf = ggml_build_forward (f); - *gb = ggml_build_backward(ctx, gf, true); + *gf = bark_ggml_build_forward (f); + *gb = bark_ggml_build_backward(ctx, gf, true); - return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL); + return bark_ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL); } -enum ggml_opt_result ggml_opt_resume_g( - struct ggml_context * ctx, - struct ggml_opt_context * opt, - struct ggml_tensor * f, - struct ggml_cgraph * gf, - struct ggml_cgraph * gb, - ggml_opt_callback callback, +enum bark_ggml_opt_result bark_ggml_opt_resume_g( + struct bark_ggml_context * ctx, + struct bark_ggml_opt_context * opt, + struct bark_ggml_tensor * f, + struct bark_ggml_cgraph * gf, + struct bark_ggml_cgraph * gb, + bark_ggml_opt_callback callback, void * callback_data) { // build forward + backward compute graphs - enum ggml_opt_result result = GGML_OPT_OK; + enum bark_ggml_opt_result result = BARK_GGML_OPT_OK; switch (opt->params.type) { - case GGML_OPT_ADAM: + case BARK_GGML_OPT_ADAM: { - result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data); + result = bark_ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data); } break; - case GGML_OPT_LBFGS: + case BARK_GGML_OPT_LBFGS: { - result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data); + result = bark_ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data); } break; } if (opt->params.print_forward_graph) { - ggml_graph_print (gf); - ggml_graph_dump_dot(gf, NULL, "opt-forward.dot"); + bark_ggml_graph_print (gf); + bark_ggml_graph_dump_dot(gf, NULL, "opt-forward.dot"); } if (opt->params.print_backward_graph) { - ggml_graph_print (gb); - ggml_graph_dump_dot(gb, gf, "opt-backward.dot"); + bark_ggml_graph_print (gb); + bark_ggml_graph_dump_dot(gb, gf, "opt-backward.dot"); } return result; @@ -20969,7 +20965,7 @@ enum ggml_opt_result ggml_opt_resume_g( //////////////////////////////////////////////////////////////////////////////// -size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) { +size_t bark_ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) { assert(k % QK4_0 == 0); const int nb = k / QK4_0; @@ -20992,7 +20988,7 @@ size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * return (n/QK4_0*sizeof(block_q4_0)); } -size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) { +size_t bark_ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) { assert(k % QK4_1 == 0); const int nb = k / QK4_1; @@ -21015,7 +21011,7 @@ size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * return (n/QK4_1*sizeof(block_q4_1)); } -size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist) { +size_t bark_ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist) { assert(k % QK5_0 == 0); const int nb = k / QK5_0; @@ -21045,7 +21041,7 @@ size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * return (n/QK5_0*sizeof(block_q5_0)); } -size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist) { +size_t bark_ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist) { assert(k % QK5_1 == 0); const int nb = k / QK5_1; @@ -21075,7 +21071,7 @@ size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * return (n/QK5_1*sizeof(block_q5_1)); } -size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist) { +size_t bark_ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist) { assert(k % QK8_0 == 0); const int nb = k / QK8_0; @@ -21096,78 +21092,78 @@ size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * return (n/QK8_0*sizeof(block_q8_0)); } -size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist) { +size_t bark_ggml_quantize_chunk(enum bark_ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist) { size_t result = 0; switch (type) { - case GGML_TYPE_Q4_0: + case BARK_GGML_TYPE_Q4_0: { - GGML_ASSERT(start % QK4_0 == 0); + BARK_GGML_ASSERT(start % QK4_0 == 0); block_q4_0 * block = (block_q4_0*)dst + start / QK4_0; - result = ggml_quantize_q4_0(src + start, block, n, n, hist); + result = bark_ggml_quantize_q4_0(src + start, block, n, n, hist); } break; - case GGML_TYPE_Q4_1: + case BARK_GGML_TYPE_Q4_1: { - GGML_ASSERT(start % QK4_1 == 0); + BARK_GGML_ASSERT(start % QK4_1 == 0); block_q4_1 * block = (block_q4_1*)dst + start / QK4_1; - result = ggml_quantize_q4_1(src + start, block, n, n, hist); + result = bark_ggml_quantize_q4_1(src + start, block, n, n, hist); } break; - case GGML_TYPE_Q5_0: + case BARK_GGML_TYPE_Q5_0: { - GGML_ASSERT(start % QK5_0 == 0); + BARK_GGML_ASSERT(start % QK5_0 == 0); block_q5_0 * block = (block_q5_0*)dst + start / QK5_0; - result = ggml_quantize_q5_0(src + start, block, n, n, hist); + result = bark_ggml_quantize_q5_0(src + start, block, n, n, hist); } break; - case GGML_TYPE_Q5_1: + case BARK_GGML_TYPE_Q5_1: { - GGML_ASSERT(start % QK5_1 == 0); + BARK_GGML_ASSERT(start % QK5_1 == 0); block_q5_1 * block = (block_q5_1*)dst + start / QK5_1; - result = ggml_quantize_q5_1(src + start, block, n, n, hist); + result = bark_ggml_quantize_q5_1(src + start, block, n, n, hist); } break; - case GGML_TYPE_Q8_0: + case BARK_GGML_TYPE_Q8_0: { - GGML_ASSERT(start % QK8_0 == 0); + BARK_GGML_ASSERT(start % QK8_0 == 0); block_q8_0 * block = (block_q8_0*)dst + start / QK8_0; - result = ggml_quantize_q8_0(src + start, block, n, n, hist); + result = bark_ggml_quantize_q8_0(src + start, block, n, n, hist); } break; -#ifdef GGML_USE_K_QUANTS - case GGML_TYPE_Q2_K: +#ifdef BARK_GGML_USE_K_QUANTS + case BARK_GGML_TYPE_Q2_K: { - GGML_ASSERT(start % QK_K == 0); + BARK_GGML_ASSERT(start % QK_K == 0); block_q2_K * block = (block_q2_K*)dst + start / QK_K; - result = ggml_quantize_q2_K(src + start, block, n, n, hist); + result = bark_ggml_quantize_q2_K(src + start, block, n, n, hist); } break; - case GGML_TYPE_Q3_K: + case BARK_GGML_TYPE_Q3_K: { - GGML_ASSERT(start % QK_K == 0); + BARK_GGML_ASSERT(start % QK_K == 0); block_q3_K * block = (block_q3_K*)dst + start / QK_K; - result = ggml_quantize_q3_K(src + start, block, n, n, hist); + result = bark_ggml_quantize_q3_K(src + start, block, n, n, hist); } break; - case GGML_TYPE_Q4_K: + case BARK_GGML_TYPE_Q4_K: { - GGML_ASSERT(start % QK_K == 0); + BARK_GGML_ASSERT(start % QK_K == 0); block_q4_K * block = (block_q4_K*)dst + start / QK_K; - result = ggml_quantize_q4_K(src + start, block, n, n, hist); + result = bark_ggml_quantize_q4_K(src + start, block, n, n, hist); } break; - case GGML_TYPE_Q5_K: + case BARK_GGML_TYPE_Q5_K: { - GGML_ASSERT(start % QK_K == 0); + BARK_GGML_ASSERT(start % QK_K == 0); block_q5_K * block = (block_q5_K*)dst + start / QK_K; - result = ggml_quantize_q5_K(src + start, block, n, n, hist); + result = bark_ggml_quantize_q5_K(src + start, block, n, n, hist); } break; - case GGML_TYPE_Q6_K: + case BARK_GGML_TYPE_Q6_K: { - GGML_ASSERT(start % QK_K == 0); + BARK_GGML_ASSERT(start % QK_K == 0); block_q6_K * block = (block_q6_K*)dst + start / QK_K; - result = ggml_quantize_q6_K(src + start, block, n, n, hist); + result = bark_ggml_quantize_q6_K(src + start, block, n, n, hist); } break; #endif - case GGML_TYPE_F16: + case BARK_GGML_TYPE_F16: { - int elemsize = sizeof(ggml_fp16_t); - ggml_fp32_to_fp16_row(src + start, (ggml_fp16_t *)dst + start, n); + int elemsize = sizeof(bark_ggml_fp16_t); + bark_ggml_fp32_to_fp16_row(src + start, (bark_ggml_fp16_t *)dst + start, n); result = n * elemsize; } break; - case GGML_TYPE_F32: + case BARK_GGML_TYPE_F32: { int elemsize = sizeof(float); result = n * elemsize; @@ -21181,46 +21177,46 @@ size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, i //////////////////////////////////////////////////////////////////////////////// -struct gguf_str { +struct bark_gguf_str { uint64_t n; // GGUFv2 char * data; }; -static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = { - [GGUF_TYPE_UINT8] = sizeof(uint8_t), - [GGUF_TYPE_INT8] = sizeof(int8_t), - [GGUF_TYPE_UINT16] = sizeof(uint16_t), - [GGUF_TYPE_INT16] = sizeof(int16_t), - [GGUF_TYPE_UINT32] = sizeof(uint32_t), - [GGUF_TYPE_INT32] = sizeof(int32_t), - [GGUF_TYPE_FLOAT32] = sizeof(float), - [GGUF_TYPE_BOOL] = sizeof(bool), - [GGUF_TYPE_STRING] = sizeof(struct gguf_str), - [GGUF_TYPE_UINT64] = sizeof(uint64_t), - [GGUF_TYPE_INT64] = sizeof(int64_t), - [GGUF_TYPE_FLOAT64] = sizeof(double), - [GGUF_TYPE_ARRAY] = 0, // undefined +static const size_t BARK_GGUF_TYPE_SIZE[BARK_GGUF_TYPE_COUNT] = { + [BARK_GGUF_TYPE_UINT8] = sizeof(uint8_t), + [BARK_GGUF_TYPE_INT8] = sizeof(int8_t), + [BARK_GGUF_TYPE_UINT16] = sizeof(uint16_t), + [BARK_GGUF_TYPE_INT16] = sizeof(int16_t), + [BARK_GGUF_TYPE_UINT32] = sizeof(uint32_t), + [BARK_GGUF_TYPE_INT32] = sizeof(int32_t), + [BARK_GGUF_TYPE_FLOAT32] = sizeof(float), + [BARK_GGUF_TYPE_BOOL] = sizeof(bool), + [BARK_GGUF_TYPE_STRING] = sizeof(struct bark_gguf_str), + [BARK_GGUF_TYPE_UINT64] = sizeof(uint64_t), + [BARK_GGUF_TYPE_INT64] = sizeof(int64_t), + [BARK_GGUF_TYPE_FLOAT64] = sizeof(double), + [BARK_GGUF_TYPE_ARRAY] = 0, // undefined }; -static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13"); - -static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = { - [GGUF_TYPE_UINT8] = "u8", - [GGUF_TYPE_INT8] = "i8", - [GGUF_TYPE_UINT16] = "u16", - [GGUF_TYPE_INT16] = "i16", - [GGUF_TYPE_UINT32] = "u32", - [GGUF_TYPE_INT32] = "i32", - [GGUF_TYPE_FLOAT32] = "f32", - [GGUF_TYPE_BOOL] = "bool", - [GGUF_TYPE_STRING] = "str", - [GGUF_TYPE_ARRAY] = "arr", - [GGUF_TYPE_UINT64] = "u64", - [GGUF_TYPE_INT64] = "i64", - [GGUF_TYPE_FLOAT64] = "f64", +static_assert(BARK_GGUF_TYPE_COUNT == 13, "BARK_GGUF_TYPE_COUNT != 13"); + +static const char * BARK_GGUF_TYPE_NAME[BARK_GGUF_TYPE_COUNT] = { + [BARK_GGUF_TYPE_UINT8] = "u8", + [BARK_GGUF_TYPE_INT8] = "i8", + [BARK_GGUF_TYPE_UINT16] = "u16", + [BARK_GGUF_TYPE_INT16] = "i16", + [BARK_GGUF_TYPE_UINT32] = "u32", + [BARK_GGUF_TYPE_INT32] = "i32", + [BARK_GGUF_TYPE_FLOAT32] = "f32", + [BARK_GGUF_TYPE_BOOL] = "bool", + [BARK_GGUF_TYPE_STRING] = "str", + [BARK_GGUF_TYPE_ARRAY] = "arr", + [BARK_GGUF_TYPE_UINT64] = "u64", + [BARK_GGUF_TYPE_INT64] = "i64", + [BARK_GGUF_TYPE_FLOAT64] = "f64", }; -static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13"); +static_assert(BARK_GGUF_TYPE_COUNT == 13, "BARK_GGUF_TYPE_COUNT != 13"); -union gguf_value { +union bark_gguf_value { uint8_t uint8; int8_t int8; uint16_t uint16; @@ -21233,37 +21229,37 @@ union gguf_value { double float64; bool bool_; - struct gguf_str str; + struct bark_gguf_str str; struct { - enum gguf_type type; + enum bark_gguf_type type; uint64_t n; // GGUFv2 void * data; } arr; }; -struct gguf_kv { - struct gguf_str key; +struct bark_gguf_kv { + struct bark_gguf_str key; - enum gguf_type type; - union gguf_value value; + enum bark_gguf_type type; + union bark_gguf_value value; }; -struct gguf_header { +struct bark_gguf_header { uint32_t magic; uint32_t version; uint64_t n_tensors; // GGUFv2 uint64_t n_kv; // GGUFv2 }; -struct gguf_tensor_info { - struct gguf_str name; +struct bark_gguf_tensor_info { + struct bark_gguf_str name; uint32_t n_dims; - uint64_t ne[GGML_MAX_DIMS]; + uint64_t ne[BARK_GGML_MAX_DIMS]; - enum ggml_type type; + enum bark_ggml_type type; uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT` @@ -21272,11 +21268,11 @@ struct gguf_tensor_info { size_t size; }; -struct gguf_context { - struct gguf_header header; +struct bark_gguf_context { + struct bark_gguf_header header; - struct gguf_kv * kv; - struct gguf_tensor_info * infos; + struct bark_gguf_kv * kv; + struct bark_gguf_tensor_info * infos; size_t alignment; size_t offset; // offset of `data` from beginning of file @@ -21286,50 +21282,50 @@ struct gguf_context { void * data; }; -static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) { +static bool bark_gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) { const size_t n = fread(dst, 1, size, file); *offset += n; return n == size; } // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023 -static bool gguf_fread_str_cur(FILE * file, struct gguf_str * p, size_t * offset) { +static bool bark_gguf_fread_str_cur(FILE * file, struct bark_gguf_str * p, size_t * offset) { p->n = 0; p->data = NULL; bool ok = true; - ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset); p->data = calloc(p->n + 1, 1); - ok = ok && gguf_fread_el(file, p->data, p->n, offset); + ok = ok && bark_gguf_fread_el(file, &p->n, sizeof(p->n), offset); p->data = calloc(p->n + 1, 1); + ok = ok && bark_gguf_fread_el(file, p->data, p->n, offset); return ok; } -static bool gguf_fread_str_v1(FILE * file, struct gguf_str * p, size_t * offset) { +static bool bark_gguf_fread_str_v1(FILE * file, struct bark_gguf_str * p, size_t * offset) { p->n = 0; p->data = NULL; bool ok = true; uint32_t n = 0; - ok = ok && gguf_fread_el(file, &n, sizeof(n), offset); p->data = calloc(n + 1, 1); p->n = n; - ok = ok && gguf_fread_el(file, p->data, p->n, offset); + ok = ok && bark_gguf_fread_el(file, &n, sizeof(n), offset); p->data = calloc(n + 1, 1); p->n = n; + ok = ok && bark_gguf_fread_el(file, p->data, p->n, offset); return ok; } -struct gguf_context * gguf_init_empty(void) { - struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context)); +struct bark_gguf_context * bark_gguf_init_empty(void) { + struct bark_gguf_context * ctx = BARK_GGML_ALIGNED_MALLOC(sizeof(struct bark_gguf_context)); - ctx->header.magic = GGUF_MAGIC; - ctx->header.version = GGUF_VERSION; + ctx->header.magic = BARK_GGUF_MAGIC; + ctx->header.version = BARK_GGUF_VERSION; ctx->header.n_tensors = 0; ctx->header.n_kv = 0; ctx->kv = NULL; ctx->infos = NULL; - ctx->alignment = GGUF_DEFAULT_ALIGNMENT; + ctx->alignment = BARK_GGUF_DEFAULT_ALIGNMENT; ctx->offset = 0; ctx->size = 0; @@ -21338,7 +21334,7 @@ struct gguf_context * gguf_init_empty(void) { return ctx; } -struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) { +struct bark_gguf_context * bark_gguf_init_from_file(const char * fname, struct bark_gguf_init_params params) { FILE * file = fopen(fname, "rb"); if (!file) { return NULL; @@ -21351,9 +21347,9 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p // check the magic before making allocations { - gguf_fread_el(file, &magic, sizeof(magic), &offset); + bark_gguf_fread_el(file, &magic, sizeof(magic), &offset); - if (magic != GGUF_MAGIC) { + if (magic != BARK_GGUF_MAGIC) { fprintf(stderr, "%s: invalid magic number %08x\n", __func__, magic); fclose(file); return NULL; @@ -21362,7 +21358,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p bool ok = true; - struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context)); + struct bark_gguf_context * ctx = BARK_GGML_ALIGNED_MALLOC(sizeof(struct bark_gguf_context)); // read the header { @@ -21372,105 +21368,105 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p ctx->infos = NULL; ctx->data = NULL; - ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset); + ok = ok && bark_gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset); if (ctx->header.version == 1) { // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023 uint32_t n_tensors = 0; uint32_t n_kv = 0; - ok = ok && gguf_fread_el(file, &n_tensors, sizeof(n_tensors), &offset); - ok = ok && gguf_fread_el(file, &n_kv, sizeof(n_kv), &offset); + ok = ok && bark_gguf_fread_el(file, &n_tensors, sizeof(n_tensors), &offset); + ok = ok && bark_gguf_fread_el(file, &n_kv, sizeof(n_kv), &offset); ctx->header.n_tensors = n_tensors; ctx->header.n_kv = n_kv; } else { - ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset); - ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset); + ok = ok && bark_gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset); + ok = ok && bark_gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset); } if (!ok) { fprintf(stderr, "%s: failed to read header\n", __func__); fclose(file); - gguf_free(ctx); + bark_gguf_free(ctx); return NULL; } } // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023 - bool (* gguf_fread_str)(FILE *, struct gguf_str *, size_t *) = gguf_fread_str_cur; + bool (* bark_gguf_fread_str)(FILE *, struct bark_gguf_str *, size_t *) = bark_gguf_fread_str_cur; if (ctx->header.version == 1) { - gguf_fread_str = gguf_fread_str_v1; + bark_gguf_fread_str = bark_gguf_fread_str_v1; } // read the kv pairs { - ctx->kv = malloc(ctx->header.n_kv * sizeof(struct gguf_kv)); + ctx->kv = malloc(ctx->header.n_kv * sizeof(struct bark_gguf_kv)); for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { - struct gguf_kv * kv = &ctx->kv[i]; + struct bark_gguf_kv * kv = &ctx->kv[i]; //fprintf(stderr, "%s: reading kv %d\n", __func__, i); - ok = ok && gguf_fread_str(file, &kv->key, &offset); - ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset); + ok = ok && bark_gguf_fread_str(file, &kv->key, &offset); + ok = ok && bark_gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset); //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data); switch (kv->type) { - case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break; - case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break; - case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break; - case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break; - case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break; - case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break; - case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break; - case GGUF_TYPE_UINT64: ok = ok && gguf_fread_el (file, &kv->value.uint64, sizeof(kv->value.uint64), &offset); break; - case GGUF_TYPE_INT64: ok = ok && gguf_fread_el (file, &kv->value.int64, sizeof(kv->value.int64), &offset); break; - case GGUF_TYPE_FLOAT64: ok = ok && gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break; - case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break; - case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(file, &kv->value.str, &offset); break; - case GGUF_TYPE_ARRAY: + case BARK_GGUF_TYPE_UINT8: ok = ok && bark_gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break; + case BARK_GGUF_TYPE_INT8: ok = ok && bark_gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break; + case BARK_GGUF_TYPE_UINT16: ok = ok && bark_gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break; + case BARK_GGUF_TYPE_INT16: ok = ok && bark_gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break; + case BARK_GGUF_TYPE_UINT32: ok = ok && bark_gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break; + case BARK_GGUF_TYPE_INT32: ok = ok && bark_gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break; + case BARK_GGUF_TYPE_FLOAT32: ok = ok && bark_gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break; + case BARK_GGUF_TYPE_UINT64: ok = ok && bark_gguf_fread_el (file, &kv->value.uint64, sizeof(kv->value.uint64), &offset); break; + case BARK_GGUF_TYPE_INT64: ok = ok && bark_gguf_fread_el (file, &kv->value.int64, sizeof(kv->value.int64), &offset); break; + case BARK_GGUF_TYPE_FLOAT64: ok = ok && bark_gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break; + case BARK_GGUF_TYPE_BOOL: ok = ok && bark_gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break; + case BARK_GGUF_TYPE_STRING: ok = ok && bark_gguf_fread_str(file, &kv->value.str, &offset); break; + case BARK_GGUF_TYPE_ARRAY: { - ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset); + ok = ok && bark_gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset); if (ctx->header.version == 1) { // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023 uint32_t n = 0; - ok = ok && gguf_fread_el(file, &n, sizeof(n), &offset); + ok = ok && bark_gguf_fread_el(file, &n, sizeof(n), &offset); kv->value.arr.n = n; } else { - ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset); + ok = ok && bark_gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset); } switch (kv->value.arr.type) { - case GGUF_TYPE_UINT8: - case GGUF_TYPE_INT8: - case GGUF_TYPE_UINT16: - case GGUF_TYPE_INT16: - case GGUF_TYPE_UINT32: - case GGUF_TYPE_INT32: - case GGUF_TYPE_FLOAT32: - case GGUF_TYPE_UINT64: - case GGUF_TYPE_INT64: - case GGUF_TYPE_FLOAT64: - case GGUF_TYPE_BOOL: + case BARK_GGUF_TYPE_UINT8: + case BARK_GGUF_TYPE_INT8: + case BARK_GGUF_TYPE_UINT16: + case BARK_GGUF_TYPE_INT16: + case BARK_GGUF_TYPE_UINT32: + case BARK_GGUF_TYPE_INT32: + case BARK_GGUF_TYPE_FLOAT32: + case BARK_GGUF_TYPE_UINT64: + case BARK_GGUF_TYPE_INT64: + case BARK_GGUF_TYPE_FLOAT64: + case BARK_GGUF_TYPE_BOOL: { - kv->value.arr.data = malloc(kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]); - ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type], &offset); + kv->value.arr.data = malloc(kv->value.arr.n * BARK_GGUF_TYPE_SIZE[kv->value.arr.type]); + ok = ok && bark_gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * BARK_GGUF_TYPE_SIZE[kv->value.arr.type], &offset); } break; - case GGUF_TYPE_STRING: + case BARK_GGUF_TYPE_STRING: { - kv->value.arr.data = malloc(kv->value.arr.n * sizeof(struct gguf_str)); + kv->value.arr.data = malloc(kv->value.arr.n * sizeof(struct bark_gguf_str)); for (uint32_t j = 0; j < kv->value.arr.n; ++j) { - ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset); + ok = ok && bark_gguf_fread_str(file, &((struct bark_gguf_str *) kv->value.arr.data)[j], &offset); } } break; - case GGUF_TYPE_ARRAY: - case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break; + case BARK_GGUF_TYPE_ARRAY: + case BARK_GGUF_TYPE_COUNT: BARK_GGML_ASSERT(false && "invalid type"); break; } } break; - case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); + case BARK_GGUF_TYPE_COUNT: BARK_GGML_ASSERT(false && "invalid type"); } if (!ok) { @@ -21481,51 +21477,51 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { fprintf(stderr, "%s: failed to read key-value pairs\n", __func__); fclose(file); - gguf_free(ctx); + bark_gguf_free(ctx); return NULL; } } // read the tensor infos { - ctx->infos = malloc(ctx->header.n_tensors * sizeof(struct gguf_tensor_info)); + ctx->infos = malloc(ctx->header.n_tensors * sizeof(struct bark_gguf_tensor_info)); for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { - struct gguf_tensor_info * info = &ctx->infos[i]; + struct bark_gguf_tensor_info * info = &ctx->infos[i]; - for (int j = 0; j < GGML_MAX_DIMS; ++j) { + for (int j = 0; j < BARK_GGML_MAX_DIMS; ++j) { info->ne[j] = 1; } - ok = ok && gguf_fread_str(file, &info->name, &offset); - ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset); + ok = ok && bark_gguf_fread_str(file, &info->name, &offset); + ok = ok && bark_gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset); for (uint32_t j = 0; j < info->n_dims; ++j) { if (ctx->header.version == 1) { // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023 uint32_t t = 0; - ok = ok && gguf_fread_el(file, &t, sizeof(t), &offset); + ok = ok && bark_gguf_fread_el(file, &t, sizeof(t), &offset); info->ne[j] = t; } else { - ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset); + ok = ok && bark_gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset); } } - ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset); - ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset); + ok = ok && bark_gguf_fread_el (file, &info->type, sizeof(info->type), &offset); + ok = ok && bark_gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset); if (!ok) { fprintf(stderr, "%s: failed to read tensor info\n", __func__); fclose(file); - gguf_free(ctx); + bark_gguf_free(ctx); return NULL; } } } - ctx->alignment = GGUF_DEFAULT_ALIGNMENT; + ctx->alignment = BARK_GGUF_DEFAULT_ALIGNMENT; - int alignment_idx = gguf_find_key(ctx, "general.alignment"); + int alignment_idx = bark_gguf_find_key(ctx, "general.alignment"); if (alignment_idx != -1) { - ctx->alignment = gguf_get_val_u32(ctx, alignment_idx); + ctx->alignment = bark_gguf_get_val_u32(ctx, alignment_idx); } // we require the data section to be aligned, so take into account any padding @@ -21545,7 +21541,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p { ctx->size = 0; for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { - struct gguf_tensor_info * info = &ctx->infos[i]; + struct bark_gguf_tensor_info * info = &ctx->infos[i]; const int64_t ne = (int64_t) info->ne[0] * @@ -21553,79 +21549,79 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p (int64_t) info->ne[2] * (int64_t) info->ne[3]; - if (ne % ggml_blck_size(info->type) != 0) { + if (ne % bark_ggml_blck_size(info->type) != 0) { fprintf(stderr, "%s: tensor '%s' number of elements (%" PRId64 ") is not a multiple of block size (%d)\n", - __func__, info->name.data, ne, ggml_blck_size(info->type)); + __func__, info->name.data, ne, bark_ggml_blck_size(info->type)); fclose(file); - gguf_free(ctx); + bark_gguf_free(ctx); return NULL; } - const size_t size_cur = (ne*ggml_type_size(info->type))/ggml_blck_size(info->type); + const size_t size_cur = (ne*bark_ggml_type_size(info->type))/bark_ggml_blck_size(info->type); - ctx->size += GGML_PAD(size_cur, ctx->alignment); + ctx->size += BARK_GGML_PAD(size_cur, ctx->alignment); } } // load the tensor data only if requested if (params.ctx != NULL) { - // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob - // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of - // the ggml_tensor structs to the appropriate locations in the binary blob + // if the provided bark_gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob + // otherwise, we load the binary blob into the created bark_ggml_context as well, and point the "data" members of + // the bark_ggml_tensor structs to the appropriate locations in the binary blob - // compute the exact size needed for the new ggml_context + // compute the exact size needed for the new bark_ggml_context const size_t mem_size = params.no_alloc ? - (ctx->header.n_tensors )*ggml_tensor_overhead() : - (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size; + (ctx->header.n_tensors )*bark_ggml_tensor_overhead() : + (ctx->header.n_tensors + 1)*bark_ggml_tensor_overhead() + ctx->size; - struct ggml_init_params pdata = { + struct bark_ggml_init_params pdata = { .mem_size = mem_size, .mem_buffer = NULL, .no_alloc = params.no_alloc, }; - *params.ctx = ggml_init(pdata); + *params.ctx = bark_ggml_init(pdata); - struct ggml_context * ctx_data = *params.ctx; + struct bark_ggml_context * ctx_data = *params.ctx; - struct ggml_tensor * data = NULL; + struct bark_ggml_tensor * data = NULL; if (!params.no_alloc) { - data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size); + data = bark_ggml_new_tensor_1d(ctx_data, BARK_GGML_TYPE_I8, ctx->size); ok = ok && data != NULL; // read the binary blob with the tensor data - ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset); + ok = ok && bark_gguf_fread_el(file, data->data, ctx->size, &offset); if (!ok) { fprintf(stderr, "%s: failed to read tensor data\n", __func__); fclose(file); - ggml_free(ctx_data); - gguf_free(ctx); + bark_ggml_free(ctx_data); + bark_gguf_free(ctx); return NULL; } ctx->data = data->data; } - ggml_set_no_alloc(ctx_data, true); + bark_ggml_set_no_alloc(ctx_data, true); // create the tensors for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { - const int64_t ne[GGML_MAX_DIMS] = { + const int64_t ne[BARK_GGML_MAX_DIMS] = { ctx->infos[i].ne[0], ctx->infos[i].ne[1], ctx->infos[i].ne[2], ctx->infos[i].ne[3], }; - struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne); + struct bark_ggml_tensor * cur = bark_ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne); ok = ok && cur != NULL; - ggml_set_name(cur, ctx->infos[i].name.data); + bark_ggml_set_name(cur, ctx->infos[i].name.data); if (!ok) { break; @@ -21641,12 +21637,12 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { fprintf(stderr, "%s: failed to read the tensor data\n", __func__); fclose(file); - ggml_free(ctx_data); - gguf_free(ctx); + bark_ggml_free(ctx_data); + bark_gguf_free(ctx); return NULL; } - ggml_set_no_alloc(ctx_data, params.no_alloc); + bark_ggml_set_no_alloc(ctx_data, params.no_alloc); } fclose(file); @@ -21654,7 +21650,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p return ctx; } -void gguf_free(struct gguf_context * ctx) { +void bark_gguf_free(struct bark_gguf_context * ctx) { if (ctx == NULL) { return; } @@ -21662,23 +21658,23 @@ void gguf_free(struct gguf_context * ctx) { if (ctx->kv) { // free string memory - not great.. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { - struct gguf_kv * kv = &ctx->kv[i]; + struct bark_gguf_kv * kv = &ctx->kv[i]; if (kv->key.data) { free(kv->key.data); } - if (kv->type == GGUF_TYPE_STRING) { + if (kv->type == BARK_GGUF_TYPE_STRING) { if (kv->value.str.data) { free(kv->value.str.data); } } - if (kv->type == GGUF_TYPE_ARRAY) { + if (kv->type == BARK_GGUF_TYPE_ARRAY) { if (kv->value.arr.data) { - if (kv->value.arr.type == GGUF_TYPE_STRING) { + if (kv->value.arr.type == BARK_GGUF_TYPE_STRING) { for (uint32_t j = 0; j < kv->value.arr.n; ++j) { - struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j]; + struct bark_gguf_str * str = &((struct bark_gguf_str *) kv->value.arr.data)[j]; if (str->data) { free(str->data); } @@ -21694,7 +21690,7 @@ void gguf_free(struct gguf_context * ctx) { if (ctx->infos) { for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { - struct gguf_tensor_info * info = &ctx->infos[i]; + struct bark_gguf_tensor_info * info = &ctx->infos[i]; if (info->name.data) { free(info->name.data); @@ -21704,41 +21700,41 @@ void gguf_free(struct gguf_context * ctx) { free(ctx->infos); } - GGML_ALIGNED_FREE(ctx); + BARK_GGML_ALIGNED_FREE(ctx); } -const char * gguf_type_name(enum gguf_type type) { - return GGUF_TYPE_NAME[type]; +const char * bark_gguf_type_name(enum bark_gguf_type type) { + return BARK_GGUF_TYPE_NAME[type]; } -int gguf_get_version(const struct gguf_context * ctx) { +int bark_gguf_get_version(const struct bark_gguf_context * ctx) { return ctx->header.version; } -size_t gguf_get_alignment(const struct gguf_context * ctx) { +size_t bark_gguf_get_alignment(const struct bark_gguf_context * ctx) { return ctx->alignment; } -size_t gguf_get_data_offset(const struct gguf_context * ctx) { +size_t bark_gguf_get_data_offset(const struct bark_gguf_context * ctx) { return ctx->offset; } -void * gguf_get_data(const struct gguf_context * ctx) { +void * bark_gguf_get_data(const struct bark_gguf_context * ctx) { return ctx->data; } -int gguf_get_n_kv(const struct gguf_context * ctx) { +int bark_gguf_get_n_kv(const struct bark_gguf_context * ctx) { return ctx->header.n_kv; } -int gguf_find_key(const struct gguf_context * ctx, const char * key) { +int bark_gguf_find_key(const struct bark_gguf_context * ctx, const char * key) { // return -1 if key not found int keyfound = -1; - const int n_kv = gguf_get_n_kv(ctx); + const int n_kv = bark_gguf_get_n_kv(ctx); for (int i = 0; i < n_kv; ++i) { - if (strcmp(key, gguf_get_key(ctx, i)) == 0) { + if (strcmp(key, bark_gguf_get_key(ctx, i)) == 0) { keyfound = i; break; } @@ -21747,108 +21743,108 @@ int gguf_find_key(const struct gguf_context * ctx, const char * key) { return keyfound; } -const char * gguf_get_key(const struct gguf_context * ctx, int key_id) { +const char * bark_gguf_get_key(const struct bark_gguf_context * ctx, int key_id) { return ctx->kv[key_id].key.data; } -enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) { +enum bark_gguf_type bark_gguf_get_kv_type(const struct bark_gguf_context * ctx, int key_id) { return ctx->kv[key_id].type; } -enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY); +enum bark_gguf_type bark_gguf_get_arr_type(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_ARRAY); return ctx->kv[key_id].value.arr.type; } -const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY); +const void * bark_gguf_get_arr_data(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_ARRAY); return ctx->kv[key_id].value.arr.data; } -const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY); - struct gguf_kv * kv = &ctx->kv[key_id]; - struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i]; +const char * bark_gguf_get_arr_str(const struct bark_gguf_context * ctx, int key_id, int i) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_ARRAY); + struct bark_gguf_kv * kv = &ctx->kv[key_id]; + struct bark_gguf_str * str = &((struct bark_gguf_str *) kv->value.arr.data)[i]; return str->data; } -int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY); +int bark_gguf_get_arr_n(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_ARRAY); return ctx->kv[key_id].value.arr.n; } -uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8); +uint8_t bark_gguf_get_val_u8(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_UINT8); return ctx->kv[key_id].value.uint8; } -int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8); +int8_t bark_gguf_get_val_i8(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_INT8); return ctx->kv[key_id].value.int8; } -uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16); +uint16_t bark_gguf_get_val_u16(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_UINT16); return ctx->kv[key_id].value.uint16; } -int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16); +int16_t bark_gguf_get_val_i16(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_INT16); return ctx->kv[key_id].value.int16; } -uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32); +uint32_t bark_gguf_get_val_u32(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_UINT32); return ctx->kv[key_id].value.uint32; } -int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32); +int32_t bark_gguf_get_val_i32(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_INT32); return ctx->kv[key_id].value.int32; } -float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32); +float bark_gguf_get_val_f32(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_FLOAT32); return ctx->kv[key_id].value.float32; } -uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64); +uint64_t bark_gguf_get_val_u64(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_UINT64); return ctx->kv[key_id].value.uint64; } -int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64); +int64_t bark_gguf_get_val_i64(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_INT64); return ctx->kv[key_id].value.int64; } -double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64); +double bark_gguf_get_val_f64(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_FLOAT64); return ctx->kv[key_id].value.float64; } -bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL); +bool bark_gguf_get_val_bool(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_BOOL); return ctx->kv[key_id].value.bool_; } -const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) { - GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING); +const char * bark_gguf_get_val_str(const struct bark_gguf_context * ctx, int key_id) { + BARK_GGML_ASSERT(ctx->kv[key_id].type == BARK_GGUF_TYPE_STRING); return ctx->kv[key_id].value.str.data; } -int gguf_get_n_tensors(const struct gguf_context * ctx) { +int bark_gguf_get_n_tensors(const struct bark_gguf_context * ctx) { return ctx->header.n_tensors; } -int gguf_find_tensor(const struct gguf_context * ctx, const char * name) { +int bark_gguf_find_tensor(const struct bark_gguf_context * ctx, const char * name) { // return -1 if tensor not found int tensorfound = -1; - const int n_tensors = gguf_get_n_tensors(ctx); + const int n_tensors = bark_gguf_get_n_tensors(ctx); for (int i = 0; i < n_tensors; ++i) { - if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) { + if (strcmp(name, bark_gguf_get_tensor_name(ctx, i)) == 0) { tensorfound = i; break; } @@ -21857,24 +21853,24 @@ int gguf_find_tensor(const struct gguf_context * ctx, const char * name) { return tensorfound; } -size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i) { +size_t bark_gguf_get_tensor_offset(const struct bark_gguf_context * ctx, int i) { return ctx->infos[i].offset; } -char * gguf_get_tensor_name(const struct gguf_context * ctx, int i) { +char * bark_gguf_get_tensor_name(const struct bark_gguf_context * ctx, int i) { return ctx->infos[i].name.data; } // returns the index -static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) { - const int idx = gguf_find_key(ctx, key); +static int bark_gguf_get_or_add_key(struct bark_gguf_context * ctx, const char * key) { + const int idx = bark_gguf_find_key(ctx, key); if (idx >= 0) { return idx; } - const int n_kv = gguf_get_n_kv(ctx); + const int n_kv = bark_gguf_get_n_kv(ctx); - ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv)); + ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct bark_gguf_kv)); ctx->kv[n_kv].key.n = strlen(key); ctx->kv[n_kv].key.data = strdup(key); ctx->header.n_kv++; @@ -21882,161 +21878,161 @@ static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) { return n_kv; } -void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_val_u8(struct bark_gguf_context * ctx, const char * key, uint8_t val) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_UINT8; + ctx->kv[idx].type = BARK_GGUF_TYPE_UINT8; ctx->kv[idx].value.uint8 = val; } -void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_val_i8(struct bark_gguf_context * ctx, const char * key, int8_t val) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_INT8; + ctx->kv[idx].type = BARK_GGUF_TYPE_INT8; ctx->kv[idx].value.int8 = val; } -void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_val_u16(struct bark_gguf_context * ctx, const char * key, uint16_t val) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_UINT16; + ctx->kv[idx].type = BARK_GGUF_TYPE_UINT16; ctx->kv[idx].value.uint16 = val; } -void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_val_i16(struct bark_gguf_context * ctx, const char * key, int16_t val) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_INT16; + ctx->kv[idx].type = BARK_GGUF_TYPE_INT16; ctx->kv[idx].value.int16 = val; } -void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_val_u32(struct bark_gguf_context * ctx, const char * key, uint32_t val) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_UINT32; + ctx->kv[idx].type = BARK_GGUF_TYPE_UINT32; ctx->kv[idx].value.uint32 = val; } -void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_val_i32(struct bark_gguf_context * ctx, const char * key, int32_t val) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_INT32; + ctx->kv[idx].type = BARK_GGUF_TYPE_INT32; ctx->kv[idx].value.int32 = val; } -void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_val_f32(struct bark_gguf_context * ctx, const char * key, float val) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_FLOAT32; + ctx->kv[idx].type = BARK_GGUF_TYPE_FLOAT32; ctx->kv[idx].value.float32 = val; } -void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_val_u64(struct bark_gguf_context * ctx, const char * key, uint64_t val) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_UINT64; + ctx->kv[idx].type = BARK_GGUF_TYPE_UINT64; ctx->kv[idx].value.uint64 = val; } -void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_val_i64(struct bark_gguf_context * ctx, const char * key, int64_t val) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_INT64; + ctx->kv[idx].type = BARK_GGUF_TYPE_INT64; ctx->kv[idx].value.int64 = val; } -void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_val_f64(struct bark_gguf_context * ctx, const char * key, double val) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_FLOAT64; + ctx->kv[idx].type = BARK_GGUF_TYPE_FLOAT64; ctx->kv[idx].value.float64 = val; } -void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_val_bool(struct bark_gguf_context * ctx, const char * key, bool val) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_BOOL; + ctx->kv[idx].type = BARK_GGUF_TYPE_BOOL; ctx->kv[idx].value.bool_ = val; } -void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_val_str(struct bark_gguf_context * ctx, const char * key, const char * val) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_STRING; + ctx->kv[idx].type = BARK_GGUF_TYPE_STRING; ctx->kv[idx].value.str.n = strlen(val); ctx->kv[idx].value.str.data = strdup(val); } -void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_arr_data(struct bark_gguf_context * ctx, const char * key, enum bark_gguf_type type, const void * data, int n) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_ARRAY; + ctx->kv[idx].type = BARK_GGUF_TYPE_ARRAY; ctx->kv[idx].value.arr.type = type; ctx->kv[idx].value.arr.n = n; - ctx->kv[idx].value.arr.data = malloc(n*GGUF_TYPE_SIZE[type]); - memcpy(ctx->kv[idx].value.arr.data, data, n*GGUF_TYPE_SIZE[type]); + ctx->kv[idx].value.arr.data = malloc(n*BARK_GGUF_TYPE_SIZE[type]); + memcpy(ctx->kv[idx].value.arr.data, data, n*BARK_GGUF_TYPE_SIZE[type]); } -void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) { - const int idx = gguf_get_or_add_key(ctx, key); +void bark_gguf_set_arr_str(struct bark_gguf_context * ctx, const char * key, const char ** data, int n) { + const int idx = bark_gguf_get_or_add_key(ctx, key); - ctx->kv[idx].type = GGUF_TYPE_ARRAY; - ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING; + ctx->kv[idx].type = BARK_GGUF_TYPE_ARRAY; + ctx->kv[idx].value.arr.type = BARK_GGUF_TYPE_STRING; ctx->kv[idx].value.arr.n = n; - ctx->kv[idx].value.arr.data = malloc(n*sizeof(struct gguf_str)); + ctx->kv[idx].value.arr.data = malloc(n*sizeof(struct bark_gguf_str)); for (int i = 0; i < n; i++) { - struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i]; + struct bark_gguf_str * str = &((struct bark_gguf_str *)ctx->kv[idx].value.arr.data)[i]; str->n = strlen(data[i]); str->data = strdup(data[i]); } } // set or add KV pairs from another context -void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) { +void bark_gguf_set_kv(struct bark_gguf_context * ctx, struct bark_gguf_context * src) { for (uint32_t i = 0; i < src->header.n_kv; i++) { switch (src->kv[i].type) { - case GGUF_TYPE_UINT8: gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break; - case GGUF_TYPE_INT8: gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break; - case GGUF_TYPE_UINT16: gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break; - case GGUF_TYPE_INT16: gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break; - case GGUF_TYPE_UINT32: gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break; - case GGUF_TYPE_INT32: gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break; - case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break; - case GGUF_TYPE_UINT64: gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64); break; - case GGUF_TYPE_INT64: gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64); break; - case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64); break; - case GGUF_TYPE_BOOL: gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break; - case GGUF_TYPE_STRING: gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break; - case GGUF_TYPE_ARRAY: + case BARK_GGUF_TYPE_UINT8: bark_gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break; + case BARK_GGUF_TYPE_INT8: bark_gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break; + case BARK_GGUF_TYPE_UINT16: bark_gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break; + case BARK_GGUF_TYPE_INT16: bark_gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break; + case BARK_GGUF_TYPE_UINT32: bark_gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break; + case BARK_GGUF_TYPE_INT32: bark_gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break; + case BARK_GGUF_TYPE_FLOAT32: bark_gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break; + case BARK_GGUF_TYPE_UINT64: bark_gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64); break; + case BARK_GGUF_TYPE_INT64: bark_gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64); break; + case BARK_GGUF_TYPE_FLOAT64: bark_gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64); break; + case BARK_GGUF_TYPE_BOOL: bark_gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break; + case BARK_GGUF_TYPE_STRING: bark_gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break; + case BARK_GGUF_TYPE_ARRAY: { - if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) { + if (src->kv[i].value.arr.type == BARK_GGUF_TYPE_STRING) { const char ** data = malloc(src->kv[i].value.arr.n*sizeof(char *)); for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) { - data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data; + data[j] = ((struct bark_gguf_str *)src->kv[i].value.arr.data)[j].data; } - gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n); + bark_gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n); free(data); - } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) { - GGML_ASSERT(false && "nested arrays not supported"); + } else if (src->kv[i].value.arr.type == BARK_GGUF_TYPE_ARRAY) { + BARK_GGML_ASSERT(false && "nested arrays not supported"); } else { - gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n); + bark_gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n); } } break; - case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break; + case BARK_GGUF_TYPE_COUNT: BARK_GGML_ASSERT(false && "invalid type"); break; } } } -void gguf_add_tensor( - struct gguf_context * ctx, - const struct ggml_tensor * tensor) { +void bark_gguf_add_tensor( + struct bark_gguf_context * ctx, + const struct bark_ggml_tensor * tensor) { const int idx = ctx->header.n_tensors; - ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info)); + ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct bark_gguf_tensor_info)); ctx->infos[idx].name.n = strlen(tensor->name); ctx->infos[idx].name.data = strdup(tensor->name); - for (int i = 0; i < GGML_MAX_DIMS; ++i) { + for (int i = 0; i < BARK_GGML_MAX_DIMS; ++i) { ctx->infos[idx].ne[i] = 1; } @@ -22048,28 +22044,28 @@ void gguf_add_tensor( ctx->infos[idx].type = tensor->type; ctx->infos[idx].offset = 0; ctx->infos[idx].data = tensor->data; - ctx->infos[idx].size = ggml_nbytes(tensor); + ctx->infos[idx].size = bark_ggml_nbytes(tensor); if (ctx->header.n_tensors > 0) { - ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment); + ctx->infos[idx].offset = ctx->infos[idx - 1].offset + BARK_GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment); } ctx->header.n_tensors++; } -void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) { - const int idx = gguf_find_tensor(ctx, name); +void bark_gguf_set_tensor_type(struct bark_gguf_context * ctx, const char * name, enum bark_ggml_type type) { + const int idx = bark_gguf_find_tensor(ctx, name); if (idx < 0) { - GGML_ASSERT(false && "tensor not found"); + BARK_GGML_ASSERT(false && "tensor not found"); } ctx->infos[idx].type = type; } -void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) { - const int idx = gguf_find_tensor(ctx, name); +void bark_gguf_set_tensor_data(struct bark_gguf_context * ctx, const char * name, const void * data, size_t size) { + const int idx = bark_gguf_find_tensor(ctx, name); if (idx < 0) { - GGML_ASSERT(false && "tensor not found"); + BARK_GGML_ASSERT(false && "tensor not found"); } ctx->infos[idx].data = data; @@ -22077,27 +22073,27 @@ void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const vo // update offsets for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) { - ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment); + ctx->infos[i].offset = ctx->infos[i - 1].offset + BARK_GGML_PAD(ctx->infos[i - 1].size, ctx->alignment); } } -//static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) { +//static void bark_gguf_fwrite_str(FILE * file, const struct bark_gguf_str * val) { // fwrite(&val->n, sizeof(val->n), 1, file); // fwrite(val->data, sizeof(char), val->n, file); //} // -//static void gguf_fwrite_el(FILE * file, const void * val, size_t size) { +//static void bark_gguf_fwrite_el(FILE * file, const void * val, size_t size) { // fwrite(val, sizeof(char), size, file); //} -struct gguf_buf { +struct bark_gguf_buf { void * data; size_t size; size_t offset; }; -static struct gguf_buf gguf_buf_init(size_t size) { - struct gguf_buf buf = { +static struct bark_gguf_buf bark_gguf_buf_init(size_t size) { + struct bark_gguf_buf buf = { /*buf.data =*/ size == 0 ? NULL : malloc(size), /*buf.size =*/ size, /*buf.offset =*/ 0, @@ -22106,13 +22102,13 @@ static struct gguf_buf gguf_buf_init(size_t size) { return buf; } -static void gguf_buf_free(struct gguf_buf buf) { +static void bark_gguf_buf_free(struct bark_gguf_buf buf) { if (buf.data) { free(buf.data); } } -static void gguf_buf_grow(struct gguf_buf * buf, size_t size) { +static void bark_gguf_buf_grow(struct bark_gguf_buf * buf, size_t size) { if (buf->offset + size > buf->size) { buf->size = 1.5*(buf->offset + size); if (buf->data) { @@ -22121,8 +22117,8 @@ static void gguf_buf_grow(struct gguf_buf * buf, size_t size) { } } -static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) { - gguf_buf_grow(buf, sizeof(val->n) + val->n); +static void bark_gguf_bwrite_str(struct bark_gguf_buf * buf, const struct bark_gguf_str * val) { + bark_gguf_buf_grow(buf, sizeof(val->n) + val->n); if (buf->data) { memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n)); @@ -22135,8 +22131,8 @@ static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) buf->offset += val->n; } -static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) { - gguf_buf_grow(buf, el_size); +static void bark_gguf_bwrite_el(struct bark_gguf_buf * buf, const void * val, size_t el_size) { + bark_gguf_buf_grow(buf, el_size); if (buf->data) { memcpy((char *) buf->data + buf->offset, val, el_size); @@ -22144,89 +22140,89 @@ static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_si buf->offset += el_size; } -static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) { +static void bark_gguf_write_to_buf(const struct bark_gguf_context * ctx, struct bark_gguf_buf * buf, bool only_meta) { // write header - gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic)); - gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version)); - gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors)); - gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv)); + bark_gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic)); + bark_gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version)); + bark_gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors)); + bark_gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv)); // write key-value pairs for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { - struct gguf_kv * kv = &ctx->kv[i]; + struct bark_gguf_kv * kv = &ctx->kv[i]; - gguf_bwrite_str(buf, &kv->key); - gguf_bwrite_el (buf, &kv->type, sizeof(kv->type)); + bark_gguf_bwrite_str(buf, &kv->key); + bark_gguf_bwrite_el (buf, &kv->type, sizeof(kv->type)); switch (kv->type) { - case GGUF_TYPE_UINT8: gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break; - case GGUF_TYPE_INT8: gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break; - case GGUF_TYPE_UINT16: gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break; - case GGUF_TYPE_INT16: gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break; - case GGUF_TYPE_UINT32: gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break; - case GGUF_TYPE_INT32: gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break; - case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break; - case GGUF_TYPE_UINT64: gguf_bwrite_el (buf, &kv->value.uint64, sizeof(kv->value.uint64) ); break; - case GGUF_TYPE_INT64: gguf_bwrite_el (buf, &kv->value.int64, sizeof(kv->value.int64) ); break; - case GGUF_TYPE_FLOAT64: gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break; - case GGUF_TYPE_BOOL: gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break; - case GGUF_TYPE_STRING: gguf_bwrite_str(buf, &kv->value.str ); break; - case GGUF_TYPE_ARRAY: + case BARK_GGUF_TYPE_UINT8: bark_gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break; + case BARK_GGUF_TYPE_INT8: bark_gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break; + case BARK_GGUF_TYPE_UINT16: bark_gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break; + case BARK_GGUF_TYPE_INT16: bark_gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break; + case BARK_GGUF_TYPE_UINT32: bark_gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break; + case BARK_GGUF_TYPE_INT32: bark_gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break; + case BARK_GGUF_TYPE_FLOAT32: bark_gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break; + case BARK_GGUF_TYPE_UINT64: bark_gguf_bwrite_el (buf, &kv->value.uint64, sizeof(kv->value.uint64) ); break; + case BARK_GGUF_TYPE_INT64: bark_gguf_bwrite_el (buf, &kv->value.int64, sizeof(kv->value.int64) ); break; + case BARK_GGUF_TYPE_FLOAT64: bark_gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break; + case BARK_GGUF_TYPE_BOOL: bark_gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break; + case BARK_GGUF_TYPE_STRING: bark_gguf_bwrite_str(buf, &kv->value.str ); break; + case BARK_GGUF_TYPE_ARRAY: { - gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type)); - gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) ); + bark_gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type)); + bark_gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) ); switch (kv->value.arr.type) { - case GGUF_TYPE_UINT8: - case GGUF_TYPE_INT8: - case GGUF_TYPE_UINT16: - case GGUF_TYPE_INT16: - case GGUF_TYPE_UINT32: - case GGUF_TYPE_INT32: - case GGUF_TYPE_FLOAT32: - case GGUF_TYPE_UINT64: - case GGUF_TYPE_INT64: - case GGUF_TYPE_FLOAT64: - case GGUF_TYPE_BOOL: + case BARK_GGUF_TYPE_UINT8: + case BARK_GGUF_TYPE_INT8: + case BARK_GGUF_TYPE_UINT16: + case BARK_GGUF_TYPE_INT16: + case BARK_GGUF_TYPE_UINT32: + case BARK_GGUF_TYPE_INT32: + case BARK_GGUF_TYPE_FLOAT32: + case BARK_GGUF_TYPE_UINT64: + case BARK_GGUF_TYPE_INT64: + case BARK_GGUF_TYPE_FLOAT64: + case BARK_GGUF_TYPE_BOOL: { - gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]); + bark_gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * BARK_GGUF_TYPE_SIZE[kv->value.arr.type]); } break; - case GGUF_TYPE_STRING: + case BARK_GGUF_TYPE_STRING: { for (uint32_t j = 0; j < kv->value.arr.n; ++j) { - gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]); + bark_gguf_bwrite_str(buf, &((struct bark_gguf_str *) kv->value.arr.data)[j]); } } break; - case GGUF_TYPE_ARRAY: - case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break; + case BARK_GGUF_TYPE_ARRAY: + case BARK_GGUF_TYPE_COUNT: BARK_GGML_ASSERT(false && "invalid type"); break; } } break; - case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); + case BARK_GGUF_TYPE_COUNT: BARK_GGML_ASSERT(false && "invalid type"); } } // write tensor infos for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { - struct gguf_tensor_info * info = &ctx->infos[i]; + struct bark_gguf_tensor_info * info = &ctx->infos[i]; - gguf_bwrite_str(buf, &info->name); - gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims)); + bark_gguf_bwrite_str(buf, &info->name); + bark_gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims)); for (uint32_t j = 0; j < info->n_dims; ++j) { - gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j])); + bark_gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j])); } - gguf_bwrite_el(buf, &info->type, sizeof(info->type)); - gguf_bwrite_el(buf, &info->offset, sizeof(info->offset)); + bark_gguf_bwrite_el(buf, &info->type, sizeof(info->type)); + bark_gguf_bwrite_el(buf, &info->offset, sizeof(info->offset)); } // we require the data section to be aligned, so take into account any padding { const size_t offset = buf->offset; - const size_t offset_pad = GGML_PAD(offset, ctx->alignment); + const size_t offset_pad = BARK_GGML_PAD(offset, ctx->alignment); if (offset_pad != offset) { uint8_t pad = 0; for (size_t i = 0; i < offset_pad - offset; ++i) { - gguf_bwrite_el(buf, &pad, sizeof(pad)); + bark_gguf_bwrite_el(buf, &pad, sizeof(pad)); } } } @@ -22239,65 +22235,65 @@ static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * // write tensor data for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { - struct gguf_tensor_info * info = &ctx->infos[i]; + struct bark_gguf_tensor_info * info = &ctx->infos[i]; const size_t size = info->size; - const size_t size_pad = GGML_PAD(size, ctx->alignment); + const size_t size_pad = BARK_GGML_PAD(size, ctx->alignment); - gguf_bwrite_el(buf, info->data, size); + bark_gguf_bwrite_el(buf, info->data, size); if (size_pad != size) { uint8_t pad = 0; for (size_t j = 0; j < size_pad - size; ++j) { - gguf_bwrite_el(buf, &pad, sizeof(pad)); + bark_gguf_bwrite_el(buf, &pad, sizeof(pad)); } } - GGML_ASSERT(offset == info->offset); + BARK_GGML_ASSERT(offset == info->offset); offset += size_pad; } } -void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) { +void bark_gguf_write_to_file(const struct bark_gguf_context * ctx, const char * fname, bool only_meta) { FILE * file = fopen(fname, "wb"); if (!file) { - GGML_ASSERT(false && "failed to open file for writing"); + BARK_GGML_ASSERT(false && "failed to open file for writing"); } - struct gguf_buf buf = gguf_buf_init(16*1024); + struct bark_gguf_buf buf = bark_gguf_buf_init(16*1024); - gguf_write_to_buf(ctx, &buf, only_meta); + bark_gguf_write_to_buf(ctx, &buf, only_meta); fwrite(buf.data, 1, buf.offset, file); - gguf_buf_free(buf); + bark_gguf_buf_free(buf); fclose(file); } -size_t gguf_get_meta_size(const struct gguf_context * ctx) { +size_t bark_gguf_get_meta_size(const struct bark_gguf_context * ctx) { // no allocs - only compute size - struct gguf_buf buf = gguf_buf_init(0); + struct bark_gguf_buf buf = bark_gguf_buf_init(0); - gguf_write_to_buf(ctx, &buf, true); + bark_gguf_write_to_buf(ctx, &buf, true); return buf.offset; } -void gguf_get_meta_data(const struct gguf_context * ctx, void * data) { - struct gguf_buf buf = gguf_buf_init(16*1024); +void bark_gguf_get_meta_data(const struct bark_gguf_context * ctx, void * data) { + struct bark_gguf_buf buf = bark_gguf_buf_init(16*1024); - gguf_write_to_buf(ctx, &buf, true); + bark_gguf_write_to_buf(ctx, &buf, true); memcpy(data, buf.data, buf.offset); - gguf_buf_free(buf); + bark_gguf_buf_free(buf); } //////////////////////////////////////////////////////////////////////////////// -int ggml_cpu_has_avx(void) { +int bark_ggml_cpu_has_avx(void) { #if defined(__AVX__) return 1; #else @@ -22305,7 +22301,7 @@ int ggml_cpu_has_avx(void) { #endif } -int ggml_cpu_has_avx2(void) { +int bark_ggml_cpu_has_avx2(void) { #if defined(__AVX2__) return 1; #else @@ -22313,7 +22309,7 @@ int ggml_cpu_has_avx2(void) { #endif } -int ggml_cpu_has_avx512(void) { +int bark_ggml_cpu_has_avx512(void) { #if defined(__AVX512F__) return 1; #else @@ -22321,7 +22317,7 @@ int ggml_cpu_has_avx512(void) { #endif } -int ggml_cpu_has_avx512_vbmi(void) { +int bark_ggml_cpu_has_avx512_vbmi(void) { #if defined(__AVX512VBMI__) return 1; #else @@ -22329,7 +22325,7 @@ int ggml_cpu_has_avx512_vbmi(void) { #endif } -int ggml_cpu_has_avx512_vnni(void) { +int bark_ggml_cpu_has_avx512_vnni(void) { #if defined(__AVX512VNNI__) return 1; #else @@ -22337,7 +22333,7 @@ int ggml_cpu_has_avx512_vnni(void) { #endif } -int ggml_cpu_has_fma(void) { +int bark_ggml_cpu_has_fma(void) { #if defined(__FMA__) return 1; #else @@ -22345,7 +22341,7 @@ int ggml_cpu_has_fma(void) { #endif } -int ggml_cpu_has_neon(void) { +int bark_ggml_cpu_has_neon(void) { #if defined(__ARM_NEON) return 1; #else @@ -22353,7 +22349,7 @@ int ggml_cpu_has_neon(void) { #endif } -int ggml_cpu_has_arm_fma(void) { +int bark_ggml_cpu_has_arm_fma(void) { #if defined(__ARM_FEATURE_FMA) return 1; #else @@ -22361,15 +22357,15 @@ int ggml_cpu_has_arm_fma(void) { #endif } -int ggml_cpu_has_metal(void) { -#if defined(GGML_USE_METAL) +int bark_ggml_cpu_has_metal(void) { +#if defined(BARK_GGML_USE_METAL) return 1; #else return 0; #endif } -int ggml_cpu_has_f16c(void) { +int bark_ggml_cpu_has_f16c(void) { #if defined(__F16C__) return 1; #else @@ -22377,7 +22373,7 @@ int ggml_cpu_has_f16c(void) { #endif } -int ggml_cpu_has_fp16_va(void) { +int bark_ggml_cpu_has_fp16_va(void) { #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) return 1; #else @@ -22385,7 +22381,7 @@ int ggml_cpu_has_fp16_va(void) { #endif } -int ggml_cpu_has_wasm_simd(void) { +int bark_ggml_cpu_has_wasm_simd(void) { #if defined(__wasm_simd128__) return 1; #else @@ -22393,35 +22389,35 @@ int ggml_cpu_has_wasm_simd(void) { #endif } -int ggml_cpu_has_blas(void) { -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) +int bark_ggml_cpu_has_blas(void) { +#if defined(BARK_GGML_USE_ACCELERATE) || defined(BARK_GGML_USE_OPENBLAS) || defined(BARK_GGML_USE_CUBLAS) || defined(BARK_GGML_USE_CLBLAST) return 1; #else return 0; #endif } -int ggml_cpu_has_cublas(void) { -#if defined(GGML_USE_CUBLAS) +int bark_ggml_cpu_has_cublas(void) { +#if defined(BARK_GGML_USE_CUBLAS) return 1; #else return 0; #endif } -int ggml_cpu_has_clblast(void) { -#if defined(GGML_USE_CLBLAST) +int bark_ggml_cpu_has_clblast(void) { +#if defined(BARK_GGML_USE_CLBLAST) return 1; #else return 0; #endif } -int ggml_cpu_has_gpublas(void) { - return ggml_cpu_has_cublas() || ggml_cpu_has_clblast(); +int bark_ggml_cpu_has_gpublas(void) { + return bark_ggml_cpu_has_cublas() || bark_ggml_cpu_has_clblast(); } -int ggml_cpu_has_sse3(void) { +int bark_ggml_cpu_has_sse3(void) { #if defined(__SSE3__) return 1; #else @@ -22429,7 +22425,7 @@ int ggml_cpu_has_sse3(void) { #endif } -int ggml_cpu_has_ssse3(void) { +int bark_ggml_cpu_has_ssse3(void) { #if defined(__SSSE3__) return 1; #else @@ -22437,7 +22433,7 @@ int ggml_cpu_has_ssse3(void) { #endif } -int ggml_cpu_has_vsx(void) { +int bark_ggml_cpu_has_vsx(void) { #if defined(__POWER9_VECTOR__) return 1; #else diff --git a/cpp/ggml.h b/cpp/ggml.h index e947b8a..767b198 100644 --- a/cpp/ggml.h +++ b/cpp/ggml.h @@ -32,22 +32,22 @@ // For example, here we define the function: f(x) = a*x^2 + b // // { -// struct ggml_init_params params = { +// struct bark_ggml_init_params params = { // .mem_size = 16*1024*1024, // .mem_buffer = NULL, // }; // // // memory allocation happens here -// struct ggml_context * ctx = ggml_init(params); +// struct bark_ggml_context * ctx = bark_ggml_init(params); // -// struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); +// struct bark_ggml_tensor * x = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 1); // -// ggml_set_param(ctx, x); // x is an input variable +// bark_ggml_set_param(ctx, x); // x is an input variable // -// struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); -// struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); -// struct ggml_tensor * x2 = ggml_mul(ctx, x, x); -// struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b); +// struct bark_ggml_tensor * a = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 1); +// struct bark_ggml_tensor * b = bark_ggml_new_tensor_1d(ctx, BARK_GGML_TYPE_F32, 1); +// struct bark_ggml_tensor * x2 = bark_ggml_mul(ctx, x, x); +// struct bark_ggml_tensor * f = bark_ggml_add(ctx, bark_ggml_mul(ctx, a, x2), b); // // ... // } @@ -58,33 +58,33 @@ // { // ... // -// struct ggml_cgraph gf = ggml_build_forward(f); +// struct bark_ggml_cgraph gf = bark_ggml_build_forward(f); // // // set the input variable and parameter values -// ggml_set_f32(x, 2.0f); -// ggml_set_f32(a, 3.0f); -// ggml_set_f32(b, 4.0f); +// bark_ggml_set_f32(x, 2.0f); +// bark_ggml_set_f32(a, 3.0f); +// bark_ggml_set_f32(b, 4.0f); // -// ggml_graph_compute_with_ctx(ctx, &gf, n_threads); +// bark_ggml_graph_compute_with_ctx(ctx, &gf, n_threads); // -// printf("f = %f\n", ggml_get_f32_1d(f, 0)); +// printf("f = %f\n", bark_ggml_get_f32_1d(f, 0)); // // ... // } // -// The actual computation is performed in the ggml_graph_compute() function. +// The actual computation is performed in the bark_ggml_graph_compute() function. // -// The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the -// ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know +// The bark_ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the +// bark_ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know // in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory -// and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was +// and after defining the computation graph, call the bark_ggml_used_mem() function to find out how much memory was // actually needed. // -// The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic +// The bark_ggml_set_param() function marks a tensor as an input variable. This is used by the automatic // differentiation and optimization algorithms. // // The described approach allows to define the function graph once and then compute its forward or backward graphs -// multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way +// multiple times. All computations will use the same memory buffer allocated in the bark_ggml_init() function. This way // the user can avoid the memory allocation overhead at runtime. // // The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class @@ -95,9 +95,9 @@ // clear that the library needs to support more complex operations. The way to support these operations is not clear // yet, but a few examples are demonstrated in the following operations: // -// - ggml_permute() -// - ggml_conv_1d_1s() -// - ggml_conv_1d_2s() +// - bark_ggml_permute() +// - bark_ggml_conv_1d_1s() +// - bark_ggml_conv_1d_2s() // // For each tensor operator, the library implements a forward and backward computation function. The forward function // computes the output tensor value given the input tensor values. The backward function computes the adjoint of the @@ -108,20 +108,20 @@ // https://www.youtube.com/watch?v=wG_nF1awSSY // // -// ## Tensor data (struct ggml_tensor) +// ## Tensor data (struct bark_ggml_tensor) // -// The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of +// The tensors are stored in memory via the bark_ggml_tensor struct. The structure provides information about the size of // the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains // pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example: // // { -// struct ggml_tensor * c = ggml_add(ctx, a, b); +// struct bark_ggml_tensor * c = bark_ggml_add(ctx, a, b); // // assert(c->src[0] == a); // assert(c->src[1] == b); // } // -// The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the +// The multi-dimensional tensors are stored in row-major order. The bark_ggml_tensor struct contains fields for the // number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows // to store tensors that are not contiguous in memory, which is useful for operations such as transposition and // permutation. All tensor operations have to take the stride into account and not assume that the tensor is @@ -133,7 +133,7 @@ // const int nx = 2; // const int ny = 3; // -// struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, ny); +// struct bark_ggml_tensor * a = bark_ggml_new_tensor_2d(ctx, BARK_GGML_TYPE_F32, nx, ny); // // for (int y = 0; y < ny; y++) { // for (int x = 0; x < nx; x++) { @@ -144,9 +144,9 @@ // ... // } // -// Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used. +// Alternatively, there are helper functions, such as bark_ggml_get_f32_1d() and bark_ggml_set_f32_1d() that can be used. // -// ## The matrix multiplication operator (ggml_mul_mat) +// ## The matrix multiplication operator (bark_ggml_mul_mat) // // TODO // @@ -172,88 +172,88 @@ // // -#ifdef GGML_SHARED +#ifdef BARK_GGML_SHARED # if defined(_WIN32) && !defined(__MINGW32__) -# ifdef GGML_BUILD -# define GGML_API __declspec(dllexport) +# ifdef BARK_GGML_BUILD +# define BARK_GGML_API __declspec(dllexport) # else -# define GGML_API __declspec(dllimport) +# define BARK_GGML_API __declspec(dllimport) # endif # else -# define GGML_API __attribute__ ((visibility ("default"))) +# define BARK_GGML_API __attribute__ ((visibility ("default"))) # endif #else -# define GGML_API +# define BARK_GGML_API #endif // TODO: support for clang #ifdef __GNUC__ -# define GGML_DEPRECATED(func, hint) func __attribute__((deprecated(hint))) +# define BARK_GGML_DEPRECATED(func, hint) func __attribute__((deprecated(hint))) #elif defined(_MSC_VER) -# define GGML_DEPRECATED(func, hint) __declspec(deprecated(hint)) func +# define BARK_GGML_DEPRECATED(func, hint) __declspec(deprecated(hint)) func #else -# define GGML_DEPRECATED(func, hint) func +# define BARK_GGML_DEPRECATED(func, hint) func #endif #ifndef __GNUC__ -# define GGML_ATTRIBUTE_FORMAT(...) +# define BARK_GGML_ATTRIBUTE_FORMAT(...) #elif defined(__MINGW32__) -# define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__))) +# define BARK_GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__))) #else -# define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__))) +# define BARK_GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__))) #endif #include #include #include -#define GGML_FILE_MAGIC 0x67676d6c // "ggml" -#define GGML_FILE_VERSION 1 +#define BARK_GGML_FILE_MAGIC 0x67676d6c // "ggml" +#define BARK_GGML_FILE_VERSION 1 -#define GGML_QNT_VERSION 2 // bump this on quantization format changes -#define GGML_QNT_VERSION_FACTOR 1000 // do not change this +#define BARK_GGML_QNT_VERSION 2 // bump this on quantization format changes +#define BARK_GGML_QNT_VERSION_FACTOR 1000 // do not change this -#define GGML_MAX_DIMS 4 -#define GGML_MAX_NODES 100000 -#define GGML_MAX_PARAMS 1024 -#define GGML_MAX_CONTEXTS 64 -#define GGML_MAX_SRC 6 -#define GGML_MAX_NAME 64 -#define GGML_MAX_OP_PARAMS 32 -#define GGML_DEFAULT_N_THREADS 4 +#define BARK_GGML_MAX_DIMS 4 +#define BARK_GGML_MAX_NODES 100000 +#define BARK_GGML_MAX_PARAMS 1024 +#define BARK_GGML_MAX_CONTEXTS 64 +#define BARK_GGML_MAX_SRC 6 +#define BARK_GGML_MAX_NAME 64 +#define BARK_GGML_MAX_OP_PARAMS 32 +#define BARK_GGML_DEFAULT_N_THREADS 4 #if UINTPTR_MAX == 0xFFFFFFFF - #define GGML_MEM_ALIGN 4 + #define BARK_GGML_MEM_ALIGN 4 #else - #define GGML_MEM_ALIGN 16 + #define BARK_GGML_MEM_ALIGN 16 #endif -#define GGML_EXIT_SUCCESS 0 -#define GGML_EXIT_ABORTED 1 +#define BARK_GGML_EXIT_SUCCESS 0 +#define BARK_GGML_EXIT_ABORTED 1 -#define GGUF_MAGIC 0x46554747 // "GGUF" -#define GGUF_VERSION 2 +#define BARK_GGUF_MAGIC 0x46554747 // "GGUF" +#define BARK_GGUF_VERSION 2 -#define GGUF_DEFAULT_ALIGNMENT 32 +#define BARK_GGUF_DEFAULT_ALIGNMENT 32 -#define GGML_UNUSED(x) (void)(x) +#define BARK_GGML_UNUSED(x) (void)(x) -#define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1)) +#define BARK_GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1)) -#define GGML_ASSERT(x) \ +#define BARK_GGML_ASSERT(x) \ do { \ if (!(x)) { \ - fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \ + fprintf(stderr, "BARK_GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \ abort(); \ } \ } while (0) #ifndef NDEBUG -#define GGML_UNREACHABLE() GGML_ASSERT(!"statement should not be reached") +#define BARK_GGML_UNREACHABLE() BARK_GGML_ASSERT(!"statement should not be reached") #elif defined(__GNUC__) -#define GGML_UNREACHABLE() __builtin_unreachable() +#define BARK_GGML_UNREACHABLE() __builtin_unreachable() #else -#define GGML_UNREACHABLE() ((void) 0) +#define BARK_GGML_UNREACHABLE() ((void) 0) #endif // used to copy the number of elements and stride in bytes of tensors into local variables. @@ -261,309 +261,309 @@ // // example: // -// GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne); -// GGML_TENSOR_LOCALS(size_t, nb1, src1, nb); +// BARK_GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne); +// BARK_GGML_TENSOR_LOCALS(size_t, nb1, src1, nb); // -#define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \ +#define BARK_GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \ const type prefix##0 = (pointer)->array[0]; \ - GGML_UNUSED(prefix##0); -#define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \ - GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \ + BARK_GGML_UNUSED(prefix##0); +#define BARK_GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \ + BARK_GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \ const type prefix##1 = (pointer)->array[1]; \ - GGML_UNUSED(prefix##1); -#define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \ - GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \ + BARK_GGML_UNUSED(prefix##1); +#define BARK_GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \ + BARK_GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \ const type prefix##2 = (pointer)->array[2]; \ - GGML_UNUSED(prefix##2); -#define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \ - GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \ + BARK_GGML_UNUSED(prefix##2); +#define BARK_GGML_TENSOR_LOCALS(type, prefix, pointer, array) \ + BARK_GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \ const type prefix##3 = (pointer)->array[3]; \ - GGML_UNUSED(prefix##3); + BARK_GGML_UNUSED(prefix##3); #ifdef __cplusplus extern "C" { #endif #if defined(__ARM_NEON) && defined(__CUDACC__) - typedef half ggml_fp16_t; + typedef half bark_ggml_fp16_t; #elif defined(__ARM_NEON) - typedef __fp16 ggml_fp16_t; + typedef __fp16 bark_ggml_fp16_t; #else - typedef uint16_t ggml_fp16_t; + typedef uint16_t bark_ggml_fp16_t; #endif // convert FP16 <-> FP32 - GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x); - GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x); - - GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n); - GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n); - - struct ggml_object; - struct ggml_context; - - enum ggml_type { - GGML_TYPE_F32 = 0, - GGML_TYPE_F16 = 1, - GGML_TYPE_Q4_0 = 2, - GGML_TYPE_Q4_1 = 3, - // GGML_TYPE_Q4_2 = 4, support has been removed - // GGML_TYPE_Q4_3 (5) support has been removed - GGML_TYPE_Q5_0 = 6, - GGML_TYPE_Q5_1 = 7, - GGML_TYPE_Q8_0 = 8, - GGML_TYPE_Q8_1 = 9, + BARK_GGML_API float bark_ggml_fp16_to_fp32(bark_ggml_fp16_t x); + BARK_GGML_API bark_ggml_fp16_t bark_ggml_fp32_to_fp16(float x); + + BARK_GGML_API void bark_ggml_fp16_to_fp32_row(const bark_ggml_fp16_t * x, float * y, int n); + BARK_GGML_API void bark_ggml_fp32_to_fp16_row(const float * x, bark_ggml_fp16_t * y, int n); + + struct bark_ggml_object; + struct bark_ggml_context; + + enum bark_ggml_type { + BARK_GGML_TYPE_F32 = 0, + BARK_GGML_TYPE_F16 = 1, + BARK_GGML_TYPE_Q4_0 = 2, + BARK_GGML_TYPE_Q4_1 = 3, + // BARK_GGML_TYPE_Q4_2 = 4, support has been removed + // BARK_GGML_TYPE_Q4_3 (5) support has been removed + BARK_GGML_TYPE_Q5_0 = 6, + BARK_GGML_TYPE_Q5_1 = 7, + BARK_GGML_TYPE_Q8_0 = 8, + BARK_GGML_TYPE_Q8_1 = 9, // k-quantizations - GGML_TYPE_Q2_K = 10, - GGML_TYPE_Q3_K = 11, - GGML_TYPE_Q4_K = 12, - GGML_TYPE_Q5_K = 13, - GGML_TYPE_Q6_K = 14, - GGML_TYPE_Q8_K = 15, - GGML_TYPE_I8, - GGML_TYPE_I16, - GGML_TYPE_I32, - GGML_TYPE_COUNT, + BARK_GGML_TYPE_Q2_K = 10, + BARK_GGML_TYPE_Q3_K = 11, + BARK_GGML_TYPE_Q4_K = 12, + BARK_GGML_TYPE_Q5_K = 13, + BARK_GGML_TYPE_Q6_K = 14, + BARK_GGML_TYPE_Q8_K = 15, + BARK_GGML_TYPE_I8, + BARK_GGML_TYPE_I16, + BARK_GGML_TYPE_I32, + BARK_GGML_TYPE_COUNT, }; - enum ggml_backend_type { - GGML_BACKEND_CPU = 0, - GGML_BACKEND_GPU = 10, - GGML_BACKEND_GPU_SPLIT = 20, + enum bark_ggml_backend_type { + BARK_GGML_BACKEND_CPU = 0, + BARK_GGML_BACKEND_GPU = 10, + BARK_GGML_BACKEND_GPU_SPLIT = 20, }; // model file types - enum ggml_ftype { - GGML_FTYPE_UNKNOWN = -1, - GGML_FTYPE_ALL_F32 = 0, - GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors - GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors - GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors - GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 - GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors - GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors - GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors - GGML_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors - GGML_FTYPE_MOSTLY_Q3_K = 11, // except 1d tensors - GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors - GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors - GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors + enum bark_ggml_ftype { + BARK_GGML_FTYPE_UNKNOWN = -1, + BARK_GGML_FTYPE_ALL_F32 = 0, + BARK_GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors + BARK_GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors + BARK_GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors + BARK_GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 + BARK_GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors + BARK_GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors + BARK_GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors + BARK_GGML_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors + BARK_GGML_FTYPE_MOSTLY_Q3_K = 11, // except 1d tensors + BARK_GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors + BARK_GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors + BARK_GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors }; // available tensor operations: - enum ggml_op { - GGML_OP_NONE = 0, - - GGML_OP_DUP, - GGML_OP_ADD, - GGML_OP_ADD1, - GGML_OP_ACC, - GGML_OP_SUB, - GGML_OP_MUL, - GGML_OP_DIV, - GGML_OP_SQR, - GGML_OP_SQRT, - GGML_OP_LOG, - GGML_OP_SUM, - GGML_OP_SUM_ROWS, - GGML_OP_MEAN, - GGML_OP_ARGMAX, - GGML_OP_REPEAT, - GGML_OP_REPEAT_BACK, - GGML_OP_CONCAT, - GGML_OP_SILU_BACK, - GGML_OP_NORM, // normalize - GGML_OP_RMS_NORM, - GGML_OP_RMS_NORM_BACK, - GGML_OP_GROUP_NORM, - - GGML_OP_MUL_MAT, - GGML_OP_OUT_PROD, - - GGML_OP_SCALE, - GGML_OP_SET, - GGML_OP_CPY, - GGML_OP_CONT, - GGML_OP_RESHAPE, - GGML_OP_VIEW, - GGML_OP_PERMUTE, - GGML_OP_TRANSPOSE, - GGML_OP_GET_ROWS, - GGML_OP_GET_ROWS_BACK, - GGML_OP_DIAG, - GGML_OP_DIAG_MASK_INF, - GGML_OP_DIAG_MASK_ZERO, - GGML_OP_SOFT_MAX, - GGML_OP_SOFT_MAX_BACK, - GGML_OP_ROPE, - GGML_OP_ROPE_BACK, - GGML_OP_ALIBI, - GGML_OP_CLAMP, - GGML_OP_CONV_1D, - GGML_OP_CONV_1D_STAGE_0, // internal - GGML_OP_CONV_1D_STAGE_1, // internal - GGML_OP_CONV_TRANSPOSE_1D, - GGML_OP_CONV_2D, - GGML_OP_CONV_2D_STAGE_0, // internal - GGML_OP_CONV_2D_STAGE_1, // internal - GGML_OP_CONV_TRANSPOSE_2D, - GGML_OP_POOL_1D, - GGML_OP_POOL_2D, - GGML_OP_PAD_REFLEC_1D, - - GGML_OP_UPSCALE, // nearest interpolate - - GGML_OP_FLASH_ATTN, - GGML_OP_FLASH_FF, - GGML_OP_FLASH_ATTN_BACK, - GGML_OP_WIN_PART, - GGML_OP_WIN_UNPART, - GGML_OP_GET_REL_POS, - GGML_OP_ADD_REL_POS, - - GGML_OP_UNARY, - - GGML_OP_MAP_UNARY, - GGML_OP_MAP_BINARY, - - GGML_OP_MAP_CUSTOM1_F32, - GGML_OP_MAP_CUSTOM2_F32, - GGML_OP_MAP_CUSTOM3_F32, - - GGML_OP_MAP_CUSTOM1, - GGML_OP_MAP_CUSTOM2, - GGML_OP_MAP_CUSTOM3, - - GGML_OP_CROSS_ENTROPY_LOSS, - GGML_OP_CROSS_ENTROPY_LOSS_BACK, - - GGML_OP_COUNT, + enum bark_ggml_op { + BARK_GGML_OP_NONE = 0, + + BARK_GGML_OP_DUP, + BARK_GGML_OP_ADD, + BARK_GGML_OP_ADD1, + BARK_GGML_OP_ACC, + BARK_GGML_OP_SUB, + BARK_GGML_OP_MUL, + BARK_GGML_OP_DIV, + BARK_GGML_OP_SQR, + BARK_GGML_OP_SQRT, + BARK_GGML_OP_LOG, + BARK_GGML_OP_SUM, + BARK_GGML_OP_SUM_ROWS, + BARK_GGML_OP_MEAN, + BARK_GGML_OP_ARGMAX, + BARK_GGML_OP_REPEAT, + BARK_GGML_OP_REPEAT_BACK, + BARK_GGML_OP_CONCAT, + BARK_GGML_OP_SILU_BACK, + BARK_GGML_OP_NORM, // normalize + BARK_GGML_OP_RMS_NORM, + BARK_GGML_OP_RMS_NORM_BACK, + BARK_GGML_OP_GROUP_NORM, + + BARK_GGML_OP_MUL_MAT, + BARK_GGML_OP_OUT_PROD, + + BARK_GGML_OP_SCALE, + BARK_GGML_OP_SET, + BARK_GGML_OP_CPY, + BARK_GGML_OP_CONT, + BARK_GGML_OP_RESHAPE, + BARK_GGML_OP_VIEW, + BARK_GGML_OP_PERMUTE, + BARK_GGML_OP_TRANSPOSE, + BARK_GGML_OP_GET_ROWS, + BARK_GGML_OP_GET_ROWS_BACK, + BARK_GGML_OP_DIAG, + BARK_GGML_OP_DIAG_MASK_INF, + BARK_GGML_OP_DIAG_MASK_ZERO, + BARK_GGML_OP_SOFT_MAX, + BARK_GGML_OP_SOFT_MAX_BACK, + BARK_GGML_OP_ROPE, + BARK_GGML_OP_ROPE_BACK, + BARK_GGML_OP_ALIBI, + BARK_GGML_OP_CLAMP, + BARK_GGML_OP_CONV_1D, + BARK_GGML_OP_CONV_1D_STAGE_0, // internal + BARK_GGML_OP_CONV_1D_STAGE_1, // internal + BARK_GGML_OP_CONV_TRANSPOSE_1D, + BARK_GGML_OP_CONV_2D, + BARK_GGML_OP_CONV_2D_STAGE_0, // internal + BARK_GGML_OP_CONV_2D_STAGE_1, // internal + BARK_GGML_OP_CONV_TRANSPOSE_2D, + BARK_GGML_OP_POOL_1D, + BARK_GGML_OP_POOL_2D, + BARK_GGML_OP_PAD_REFLEC_1D, + + BARK_GGML_OP_UPSCALE, // nearest interpolate + + BARK_GGML_OP_FLASH_ATTN, + BARK_GGML_OP_FLASH_FF, + BARK_GGML_OP_FLASH_ATTN_BACK, + BARK_GGML_OP_WIN_PART, + BARK_GGML_OP_WIN_UNPART, + BARK_GGML_OP_GET_REL_POS, + BARK_GGML_OP_ADD_REL_POS, + + BARK_GGML_OP_UNARY, + + BARK_GGML_OP_MAP_UNARY, + BARK_GGML_OP_MAP_BINARY, + + BARK_GGML_OP_MAP_CUSTOM1_F32, + BARK_GGML_OP_MAP_CUSTOM2_F32, + BARK_GGML_OP_MAP_CUSTOM3_F32, + + BARK_GGML_OP_MAP_CUSTOM1, + BARK_GGML_OP_MAP_CUSTOM2, + BARK_GGML_OP_MAP_CUSTOM3, + + BARK_GGML_OP_CROSS_ENTROPY_LOSS, + BARK_GGML_OP_CROSS_ENTROPY_LOSS_BACK, + + BARK_GGML_OP_COUNT, }; - enum ggml_unary_op { - GGML_UNARY_OP_ABS, - GGML_UNARY_OP_SGN, - GGML_UNARY_OP_NEG, - GGML_UNARY_OP_STEP, - GGML_UNARY_OP_TANH, - GGML_UNARY_OP_ELU, - GGML_UNARY_OP_RELU, - GGML_UNARY_OP_GELU, - GGML_UNARY_OP_GELU_QUICK, - GGML_UNARY_OP_SILU, + enum bark_ggml_unary_op { + BARK_GGML_UNARY_OP_ABS, + BARK_GGML_UNARY_OP_SGN, + BARK_GGML_UNARY_OP_NEG, + BARK_GGML_UNARY_OP_STEP, + BARK_GGML_UNARY_OP_TANH, + BARK_GGML_UNARY_OP_ELU, + BARK_GGML_UNARY_OP_RELU, + BARK_GGML_UNARY_OP_GELU, + BARK_GGML_UNARY_OP_GELU_QUICK, + BARK_GGML_UNARY_OP_SILU, }; - enum ggml_object_type { - GGML_OBJECT_TENSOR, - GGML_OBJECT_GRAPH, - GGML_OBJECT_WORK_BUFFER + enum bark_ggml_object_type { + BARK_GGML_OBJECT_TENSOR, + BARK_GGML_OBJECT_GRAPH, + BARK_GGML_OBJECT_WORK_BUFFER }; - enum ggml_log_level { - GGML_LOG_LEVEL_ERROR = 2, - GGML_LOG_LEVEL_WARN = 3, - GGML_LOG_LEVEL_INFO = 4 + enum bark_ggml_log_level { + BARK_GGML_LOG_LEVEL_ERROR = 2, + BARK_GGML_LOG_LEVEL_WARN = 3, + BARK_GGML_LOG_LEVEL_INFO = 4 }; // ggml object - struct ggml_object { + struct bark_ggml_object { size_t offs; size_t size; - struct ggml_object * next; + struct bark_ggml_object * next; - enum ggml_object_type type; + enum bark_ggml_object_type type; char padding[4]; }; - static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object); + static const size_t BARK_GGML_OBJECT_SIZE = sizeof(struct bark_ggml_object); // n-dimensional tensor - struct ggml_tensor { - enum ggml_type type; - enum ggml_backend_type backend; + struct bark_ggml_tensor { + enum bark_ggml_type type; + enum bark_ggml_backend_type backend; - struct ggml_backend_buffer * buffer; + struct bark_ggml_backend_buffer * buffer; int n_dims; - int64_t ne[GGML_MAX_DIMS]; // number of elements - size_t nb[GGML_MAX_DIMS]; // stride in bytes: - // nb[0] = ggml_type_size(type) - // nb[1] = nb[0] * (ne[0] / ggml_blck_size(type)) + padding + int64_t ne[BARK_GGML_MAX_DIMS]; // number of elements + size_t nb[BARK_GGML_MAX_DIMS]; // stride in bytes: + // nb[0] = bark_ggml_type_size(type) + // nb[1] = nb[0] * (ne[0] / bark_ggml_blck_size(type)) + padding // nb[i] = nb[i-1] * ne[i-1] // compute data - enum ggml_op op; + enum bark_ggml_op op; // op params - allocated as int32_t for alignment - int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)]; + int32_t op_params[BARK_GGML_MAX_OP_PARAMS / sizeof(int32_t)]; bool is_param; - struct ggml_tensor * grad; - struct ggml_tensor * src[GGML_MAX_SRC]; + struct bark_ggml_tensor * grad; + struct bark_ggml_tensor * src[BARK_GGML_MAX_SRC]; // performance int perf_runs; int64_t perf_cycles; int64_t perf_time_us; - struct ggml_tensor * view_src; + struct bark_ggml_tensor * view_src; size_t view_offs; void * data; - char name[GGML_MAX_NAME]; + char name[BARK_GGML_MAX_NAME]; void * extra; // extra things e.g. for ggml-cuda.cu char padding[12]; }; - static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor); + static const size_t BARK_GGML_TENSOR_SIZE = sizeof(struct bark_ggml_tensor); - // the compute plan that needs to be prepared for ggml_graph_compute() + // the compute plan that needs to be prepared for bark_ggml_graph_compute() // since https://github.com/ggerganov/ggml/issues/287 - struct ggml_cplan { - size_t work_size; // size of work buffer, calculated by `ggml_graph_plan()` - uint8_t * work_data; // work buffer, to be allocated by caller before calling to `ggml_graph_compute()` + struct bark_ggml_cplan { + size_t work_size; // size of work buffer, calculated by `bark_ggml_graph_plan()` + uint8_t * work_data; // work buffer, to be allocated by caller before calling to `bark_ggml_graph_compute()` int n_threads; // the `n_tasks` of nodes, 1:1 mapping to cgraph nodes - int n_tasks[GGML_MAX_NODES]; + int n_tasks[BARK_GGML_MAX_NODES]; - // abort ggml_graph_compute when true + // abort bark_ggml_graph_compute when true bool (*abort_callback)(void * data); void * abort_callback_data; }; - // next prime after GGML_MAX_NODES - // #define GGML_GRAPH_HASHTABLE_SIZE 4099 - // next prime after GGML_MAX_NODES * 2 (nodes + leafs) - // #define GGML_GRAPH_HASHTABLE_SIZE 8273 - // #define GGML_GRAPH_HASHTABLE_SIZE 16411 - #define GGML_GRAPH_HASHTABLE_SIZE 200003 - - enum ggml_cgraph_eval_order { - GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0, - GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT, - GGML_CGRAPH_EVAL_ORDER_COUNT + // next prime after BARK_GGML_MAX_NODES + // #define BARK_GGML_GRAPH_HASHTABLE_SIZE 4099 + // next prime after BARK_GGML_MAX_NODES * 2 (nodes + leafs) + // #define BARK_GGML_GRAPH_HASHTABLE_SIZE 8273 + // #define BARK_GGML_GRAPH_HASHTABLE_SIZE 16411 + #define BARK_GGML_GRAPH_HASHTABLE_SIZE 200003 + + enum bark_ggml_cgraph_eval_order { + BARK_GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0, + BARK_GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT, + BARK_GGML_CGRAPH_EVAL_ORDER_COUNT }; // computation graph - struct ggml_cgraph { + struct bark_ggml_cgraph { int n_nodes; int n_leafs; - struct ggml_tensor * nodes[GGML_MAX_NODES]; - struct ggml_tensor * grads[GGML_MAX_NODES]; - struct ggml_tensor * leafs[GGML_MAX_NODES]; + struct bark_ggml_tensor * nodes[BARK_GGML_MAX_NODES]; + struct bark_ggml_tensor * grads[BARK_GGML_MAX_NODES]; + struct bark_ggml_tensor * leafs[BARK_GGML_MAX_NODES]; - void * visited_hash_table[GGML_GRAPH_HASHTABLE_SIZE]; + void * visited_hash_table[BARK_GGML_GRAPH_HASHTABLE_SIZE]; - enum ggml_cgraph_eval_order order; + enum bark_ggml_cgraph_eval_order order; // performance int perf_runs; @@ -571,16 +571,16 @@ extern "C" { int64_t perf_time_us; }; - static const size_t GGML_GRAPH_SIZE = sizeof(struct ggml_cgraph); + static const size_t BARK_GGML_GRAPH_SIZE = sizeof(struct bark_ggml_cgraph); // scratch buffer - struct ggml_scratch { + struct bark_ggml_scratch { size_t offs; size_t size; void * data; }; - struct ggml_init_params { + struct bark_ggml_init_params { // memory pool size_t mem_size; // bytes void * mem_buffer; // if NULL, memory will be allocated internally @@ -592,14 +592,14 @@ extern "C" { // NOTE: the INIT or FINALIZE pass is not scheduled unless explicitly enabled. // This behavior was changed since https://github.com/ggerganov/llama.cpp/pull/1995. - enum ggml_task_type { - GGML_TASK_INIT = 0, - GGML_TASK_COMPUTE, - GGML_TASK_FINALIZE, + enum bark_ggml_task_type { + BARK_GGML_TASK_INIT = 0, + BARK_GGML_TASK_COMPUTE, + BARK_GGML_TASK_FINALIZE, }; - struct ggml_compute_params { - enum ggml_task_type type; + struct bark_ggml_compute_params { + enum bark_ggml_task_type type; // ith = thread index, nth = number of threads int ith, nth; @@ -611,536 +611,536 @@ extern "C" { // misc - GGML_API void ggml_time_init(void); // call this once at the beginning of the program - GGML_API int64_t ggml_time_ms(void); - GGML_API int64_t ggml_time_us(void); - GGML_API int64_t ggml_cycles(void); - GGML_API int64_t ggml_cycles_per_ms(void); + BARK_GGML_API void bark_ggml_time_init(void); // call this once at the beginning of the program + BARK_GGML_API int64_t bark_ggml_time_ms(void); + BARK_GGML_API int64_t bark_ggml_time_us(void); + BARK_GGML_API int64_t bark_ggml_cycles(void); + BARK_GGML_API int64_t bark_ggml_cycles_per_ms(void); - GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems - GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node + BARK_GGML_API void bark_ggml_numa_init(void); // call once for better performance on NUMA systems + BARK_GGML_API bool bark_ggml_is_numa(void); // true if init detected that system has >1 NUMA node - GGML_API void ggml_print_object (const struct ggml_object * obj); - GGML_API void ggml_print_objects(const struct ggml_context * ctx); + BARK_GGML_API void bark_ggml_print_object (const struct bark_ggml_object * obj); + BARK_GGML_API void bark_ggml_print_objects(const struct bark_ggml_context * ctx); - GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor); - GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor); - GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor); - GGML_API size_t ggml_nbytes_pad (const struct ggml_tensor * tensor); // same as ggml_nbytes() but padded to GGML_MEM_ALIGN - GGML_API size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split); + BARK_GGML_API int64_t bark_ggml_nelements (const struct bark_ggml_tensor * tensor); + BARK_GGML_API int64_t bark_ggml_nrows (const struct bark_ggml_tensor * tensor); + BARK_GGML_API size_t bark_ggml_nbytes (const struct bark_ggml_tensor * tensor); + BARK_GGML_API size_t bark_ggml_nbytes_pad (const struct bark_ggml_tensor * tensor); // same as bark_ggml_nbytes() but padded to BARK_GGML_MEM_ALIGN + BARK_GGML_API size_t bark_ggml_nbytes_split(const struct bark_ggml_tensor * tensor, int nrows_split); - GGML_API int ggml_blck_size (enum ggml_type type); - GGML_API size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block - GGML_API float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float + BARK_GGML_API int bark_ggml_blck_size (enum bark_ggml_type type); + BARK_GGML_API size_t bark_ggml_type_size (enum bark_ggml_type type); // size in bytes for all elements in a block + BARK_GGML_API float bark_ggml_type_sizef(enum bark_ggml_type type); // bark_ggml_type_size()/bark_ggml_blck_size() as float - GGML_API const char * ggml_type_name(enum ggml_type type); - GGML_API const char * ggml_op_name (enum ggml_op op); - GGML_API const char * ggml_op_symbol(enum ggml_op op); + BARK_GGML_API const char * bark_ggml_type_name(enum bark_ggml_type type); + BARK_GGML_API const char * bark_ggml_op_name (enum bark_ggml_op op); + BARK_GGML_API const char * bark_ggml_op_symbol(enum bark_ggml_op op); - GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor); + BARK_GGML_API size_t bark_ggml_element_size(const struct bark_ggml_tensor * tensor); - GGML_API bool ggml_is_quantized(enum ggml_type type); + BARK_GGML_API bool bark_ggml_is_quantized(enum bark_ggml_type type); // TODO: temporary until model loading of ggml examples is refactored - GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype); + BARK_GGML_API enum bark_ggml_type bark_ggml_ftype_to_bark_ggml_type(enum bark_ggml_ftype ftype); - GGML_API bool ggml_is_transposed(const struct ggml_tensor * tensor); - GGML_API bool ggml_is_contiguous(const struct ggml_tensor * tensor); - GGML_API bool ggml_is_permuted (const struct ggml_tensor * tensor); + BARK_GGML_API bool bark_ggml_is_transposed(const struct bark_ggml_tensor * tensor); + BARK_GGML_API bool bark_ggml_is_contiguous(const struct bark_ggml_tensor * tensor); + BARK_GGML_API bool bark_ggml_is_permuted (const struct bark_ggml_tensor * tensor); - GGML_API bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1); + BARK_GGML_API bool bark_ggml_are_same_shape(const struct bark_ggml_tensor * t0, const struct bark_ggml_tensor * t1); // use this to compute the memory overhead of a tensor - GGML_API size_t ggml_tensor_overhead(void); + BARK_GGML_API size_t bark_ggml_tensor_overhead(void); // main - GGML_API struct ggml_context * ggml_init(struct ggml_init_params params); - GGML_API void ggml_free(struct ggml_context * ctx); + BARK_GGML_API struct bark_ggml_context * bark_ggml_init(struct bark_ggml_init_params params); + BARK_GGML_API void bark_ggml_free(struct bark_ggml_context * ctx); - GGML_API size_t ggml_used_mem(const struct ggml_context * ctx); + BARK_GGML_API size_t bark_ggml_used_mem(const struct bark_ggml_context * ctx); - GGML_API size_t ggml_set_scratch (struct ggml_context * ctx, struct ggml_scratch scratch); - GGML_API bool ggml_get_no_alloc(struct ggml_context * ctx); - GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc); + BARK_GGML_API size_t bark_ggml_set_scratch (struct bark_ggml_context * ctx, struct bark_ggml_scratch scratch); + BARK_GGML_API bool bark_ggml_get_no_alloc(struct bark_ggml_context * ctx); + BARK_GGML_API void bark_ggml_set_no_alloc(struct bark_ggml_context * ctx, bool no_alloc); - GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx); - GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx); - GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx); + BARK_GGML_API void * bark_ggml_get_mem_buffer (const struct bark_ggml_context * ctx); + BARK_GGML_API size_t bark_ggml_get_mem_size (const struct bark_ggml_context * ctx); + BARK_GGML_API size_t bark_ggml_get_max_tensor_size(const struct bark_ggml_context * ctx); - GGML_API struct ggml_tensor * ggml_new_tensor( - struct ggml_context * ctx, - enum ggml_type type, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_new_tensor( + struct bark_ggml_context * ctx, + enum bark_ggml_type type, int n_dims, const int64_t *ne); - GGML_API struct ggml_tensor * ggml_new_tensor_1d( - struct ggml_context * ctx, - enum ggml_type type, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_new_tensor_1d( + struct bark_ggml_context * ctx, + enum bark_ggml_type type, int64_t ne0); - GGML_API struct ggml_tensor * ggml_new_tensor_2d( - struct ggml_context * ctx, - enum ggml_type type, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_new_tensor_2d( + struct bark_ggml_context * ctx, + enum bark_ggml_type type, int64_t ne0, int64_t ne1); - GGML_API struct ggml_tensor * ggml_new_tensor_3d( - struct ggml_context * ctx, - enum ggml_type type, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_new_tensor_3d( + struct bark_ggml_context * ctx, + enum bark_ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2); - GGML_API struct ggml_tensor * ggml_new_tensor_4d( - struct ggml_context * ctx, - enum ggml_type type, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_new_tensor_4d( + struct bark_ggml_context * ctx, + enum bark_ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3); - GGML_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value); - GGML_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_new_i32(struct bark_ggml_context * ctx, int32_t value); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_new_f32(struct bark_ggml_context * ctx, float value); - GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src); - GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, struct ggml_tensor * src); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_dup_tensor (struct bark_ggml_context * ctx, const struct bark_ggml_tensor * src); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_view_tensor(struct bark_ggml_context * ctx, struct bark_ggml_tensor * src); - GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_get_tensor(struct bark_ggml_context * ctx, const char * name); - GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); - GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value); - GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_set_zero(struct bark_ggml_tensor * tensor); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_set_i32 (struct bark_ggml_tensor * tensor, int32_t value); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_set_f32 (struct bark_ggml_tensor * tensor, float value); // Converts a flat index into coordinates - GGML_API void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3); + BARK_GGML_API void bark_ggml_unravel_index(const struct bark_ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3); - GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i); - GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value); + BARK_GGML_API int32_t bark_ggml_get_i32_1d(const struct bark_ggml_tensor * tensor, int i); + BARK_GGML_API void bark_ggml_set_i32_1d(const struct bark_ggml_tensor * tensor, int i, int32_t value); - GGML_API int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3); - GGML_API void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value); + BARK_GGML_API int32_t bark_ggml_get_i32_nd(const struct bark_ggml_tensor * tensor, int i0, int i1, int i2, int i3); + BARK_GGML_API void bark_ggml_set_i32_nd(const struct bark_ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value); - GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i); - GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value); + BARK_GGML_API float bark_ggml_get_f32_1d(const struct bark_ggml_tensor * tensor, int i); + BARK_GGML_API void bark_ggml_set_f32_1d(const struct bark_ggml_tensor * tensor, int i, float value); - GGML_API float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3); - GGML_API void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value); + BARK_GGML_API float bark_ggml_get_f32_nd(const struct bark_ggml_tensor * tensor, int i0, int i1, int i2, int i3); + BARK_GGML_API void bark_ggml_set_f32_nd(const struct bark_ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value); - GGML_API void * ggml_get_data (const struct ggml_tensor * tensor); - GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor); + BARK_GGML_API void * bark_ggml_get_data (const struct bark_ggml_tensor * tensor); + BARK_GGML_API float * bark_ggml_get_data_f32(const struct bark_ggml_tensor * tensor); - GGML_API enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor); + BARK_GGML_API enum bark_ggml_unary_op bark_ggml_get_unary_op(const struct bark_ggml_tensor * tensor); - GGML_API const char * ggml_get_name (const struct ggml_tensor * tensor); - GGML_API struct ggml_tensor * ggml_set_name ( struct ggml_tensor * tensor, const char * name); - GGML_ATTRIBUTE_FORMAT(2, 3) - GGML_API struct ggml_tensor * ggml_format_name( struct ggml_tensor * tensor, const char * fmt, ...); + BARK_GGML_API const char * bark_ggml_get_name (const struct bark_ggml_tensor * tensor); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_set_name ( struct bark_ggml_tensor * tensor, const char * name); + BARK_GGML_ATTRIBUTE_FORMAT(2, 3) + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_format_name( struct bark_ggml_tensor * tensor, const char * fmt, ...); // // operations on tensors with backpropagation // - GGML_API struct ggml_tensor * ggml_dup( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_dup( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); // in-place, returns view(a) - GGML_API struct ggml_tensor * ggml_dup_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); - - GGML_API struct ggml_tensor * ggml_add( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - - GGML_API struct ggml_tensor * ggml_add_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - - GGML_API struct ggml_tensor * ggml_add_cast( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - enum ggml_type type); - - GGML_API struct ggml_tensor * ggml_add1( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - - GGML_API struct ggml_tensor * ggml_add1_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - - GGML_API struct ggml_tensor * ggml_acc( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_dup_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); + + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_add( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); + + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_add_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); + + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_add_cast( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + enum bark_ggml_type type); + + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_add1( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); + + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_add1_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); + + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_acc( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset); - GGML_API struct ggml_tensor * ggml_acc_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_acc_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset); - GGML_API struct ggml_tensor * ggml_sub( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_sub( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); - GGML_API struct ggml_tensor * ggml_sub_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_sub_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); - GGML_API struct ggml_tensor * ggml_mul( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_mul( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); - GGML_API struct ggml_tensor * ggml_mul_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_mul_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); - GGML_API struct ggml_tensor * ggml_div( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_div( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); - GGML_API struct ggml_tensor * ggml_div_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_div_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); - GGML_API struct ggml_tensor * ggml_sqr( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_sqr( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_sqr_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_sqr_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_sqrt( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_sqrt( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_sqrt_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_sqrt_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_log( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_log( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_log_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_log_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); // return scalar - GGML_API struct ggml_tensor * ggml_sum( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_sum( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); // sums along rows, with input shape [a,b,c,d] return shape [1,b,c,d] - GGML_API struct ggml_tensor * ggml_sum_rows( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_sum_rows( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); // mean along rows - GGML_API struct ggml_tensor * ggml_mean( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_mean( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); // argmax along rows - GGML_API struct ggml_tensor * ggml_argmax( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_argmax( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); // if a is the same shape as b, and a is not parameter, return a // otherwise, return a new tensor: repeat(a) to fit in b - GGML_API struct ggml_tensor * ggml_repeat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_repeat( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // sums repetitions in a into shape of b - GGML_API struct ggml_tensor * ggml_repeat_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_repeat_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // concat a and b on dim 2 // used in stable-diffusion - GGML_API struct ggml_tensor * ggml_concat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_concat( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); - GGML_API struct ggml_tensor * ggml_abs( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_abs( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_abs_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_abs_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_sgn( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_sgn( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_sgn_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_sgn_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_neg( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_neg( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_neg_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_neg_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_step( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_step( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_step_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_step_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_tanh( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_tanh( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_tanh_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_tanh_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_elu( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_elu( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_elu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_elu_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_relu( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_relu( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_relu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_relu_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); // TODO: double-check this computation is correct - GGML_API struct ggml_tensor * ggml_gelu( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_gelu( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_gelu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_gelu_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_gelu_quick( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_gelu_quick( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_gelu_quick_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_gelu_quick_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_silu( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_silu( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_silu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_silu_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); // a - x // b - dy - GGML_API struct ggml_tensor * ggml_silu_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_silu_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // normalize along rows - GGML_API struct ggml_tensor * ggml_norm( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_norm( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, float eps); - GGML_API struct ggml_tensor * ggml_norm_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_norm_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, float eps); - GGML_API struct ggml_tensor * ggml_rms_norm( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_rms_norm( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, float eps); - GGML_API struct ggml_tensor * ggml_rms_norm_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_rms_norm_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, float eps); // group normalize along ne0*ne1*n_groups // used in stable-diffusion // TODO: eps is hardcoded to 1e-6 for now - GGML_API struct ggml_tensor * ggml_group_norm( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_group_norm( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_groups); - GGML_API struct ggml_tensor * ggml_group_norm_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_group_norm_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_groups); // a - x // b - dy - GGML_API struct ggml_tensor * ggml_rms_norm_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_rms_norm_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, float eps); // A: k columns, n rows => [ne03, ne02, n, k] // B: k columns, m rows (i.e. we transpose it internally) => [ne03 * x, ne02 * y, m, k] // result is n columns, m rows => [ne03 * x, ne02 * y, m, n] - GGML_API struct ggml_tensor * ggml_mul_mat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_mul_mat( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // A: m columns, n rows, // B: p columns, n rows, // result is m columns, p rows - GGML_API struct ggml_tensor * ggml_out_prod( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_out_prod( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // // operations on tensors without backpropagation // - GGML_API struct ggml_tensor * ggml_scale( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_scale( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // in-place, returns view(a) - GGML_API struct ggml_tensor * ggml_scale_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_scale_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // b -> view(a,offset,nb1,nb2,3), return modified a - GGML_API struct ggml_tensor * ggml_set( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_set( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset); // b -> view(a,offset,nb1,nb2,3), return view(a) - GGML_API struct ggml_tensor * ggml_set_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_set_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset); - GGML_API struct ggml_tensor * ggml_set_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_set_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t offset); - GGML_API struct ggml_tensor * ggml_set_1d_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_set_1d_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t offset); // b -> view(a,offset,nb1,nb2,3), return modified a - GGML_API struct ggml_tensor * ggml_set_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_set_2d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t offset); // b -> view(a,offset,nb1,nb2,3), return view(a) - GGML_API struct ggml_tensor * ggml_set_2d_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_set_2d_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, size_t nb1, size_t offset); // a -> b, return view(b) - GGML_API struct ggml_tensor * ggml_cpy( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cpy( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // a -> b, in-place, return view(b) - GGML_API struct ggml_tensor * ggml_cpy_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cpy_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // make contiguous - GGML_API struct ggml_tensor * ggml_cont( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cont( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); // make contiguous, in-place - GGML_API struct ggml_tensor * ggml_cont_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cont_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); // make contiguous, with new shape - GGML_API struct ggml_tensor * ggml_cont_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cont_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0); - GGML_API struct ggml_tensor * ggml_cont_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cont_2d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1); - GGML_API struct ggml_tensor * ggml_cont_3d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cont_3d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2); - GGML_API struct ggml_tensor * ggml_cont_4d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cont_4d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, @@ -1148,59 +1148,59 @@ extern "C" { // return view(a), b specifies the new shape // TODO: when we start computing gradient, make a copy instead of view - GGML_API struct ggml_tensor * ggml_reshape( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_reshape( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // return view(a) // TODO: when we start computing gradient, make a copy instead of view - GGML_API struct ggml_tensor * ggml_reshape_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_reshape_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0); - GGML_API struct ggml_tensor * ggml_reshape_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_reshape_2d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1); // return view(a) // TODO: when we start computing gradient, make a copy instead of view - GGML_API struct ggml_tensor * ggml_reshape_3d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_reshape_3d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2); - GGML_API struct ggml_tensor * ggml_reshape_4d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_reshape_4d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3); // offset in bytes - GGML_API struct ggml_tensor * ggml_view_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_view_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, size_t offset); - GGML_API struct ggml_tensor * ggml_view_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_view_2d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, size_t nb1, // row stride in bytes size_t offset); - GGML_API struct ggml_tensor * ggml_view_3d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_view_3d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, @@ -1208,9 +1208,9 @@ extern "C" { size_t nb2, // slice stride in bytes size_t offset); - GGML_API struct ggml_tensor * ggml_view_4d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_view_4d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int64_t ne0, int64_t ne1, int64_t ne2, @@ -1220,77 +1220,77 @@ extern "C" { size_t nb3, size_t offset); - GGML_API struct ggml_tensor * ggml_permute( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_permute( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int axis0, int axis1, int axis2, int axis3); - // alias for ggml_permute(ctx, a, 1, 0, 2, 3) - GGML_API struct ggml_tensor * ggml_transpose( - struct ggml_context * ctx, - struct ggml_tensor * a); + // alias for bark_ggml_permute(ctx, a, 1, 0, 2, 3) + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_transpose( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_get_rows( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_get_rows( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); - GGML_API struct ggml_tensor * ggml_get_rows_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_get_rows_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c); - GGML_API struct ggml_tensor * ggml_diag( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_diag( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); // set elements above the diagonal to -INF - GGML_API struct ggml_tensor * ggml_diag_mask_inf( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_diag_mask_inf( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_past); // in-place, returns view(a) - GGML_API struct ggml_tensor * ggml_diag_mask_inf_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_diag_mask_inf_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_past); // set elements above the diagonal to 0 - GGML_API struct ggml_tensor * ggml_diag_mask_zero( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_diag_mask_zero( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_past); // in-place, returns view(a) - GGML_API struct ggml_tensor * ggml_diag_mask_zero_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_diag_mask_zero_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_past); - GGML_API struct ggml_tensor * ggml_soft_max( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_soft_max( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); // in-place, returns view(a) - GGML_API struct ggml_tensor * ggml_soft_max_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_soft_max_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a); - GGML_API struct ggml_tensor * ggml_soft_max_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_soft_max_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // in-place, returns view(a) - GGML_API struct ggml_tensor * ggml_soft_max_back_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_soft_max_back_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // rotary position embedding // if mode & 1 == 1, skip n_past elements (DEPRECATED) @@ -1298,28 +1298,28 @@ extern "C" { // if mode & 4 == 1, ChatGLM style // // b is an int32 vector with size a->ne[2], it contains the positions - GGML_API struct ggml_tensor * ggml_rope( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_rope( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, int mode, int n_ctx); // in-place, returns view(a) - GGML_API struct ggml_tensor * ggml_rope_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_rope_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, int mode, int n_ctx); // custom RoPE - GGML_API struct ggml_tensor * ggml_rope_custom( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_rope_custom( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, int mode, int n_ctx, @@ -1327,10 +1327,10 @@ extern "C" { float freq_scale); // in-place, returns view(a) - GGML_API struct ggml_tensor * ggml_rope_custom_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_rope_custom_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, int mode, int n_ctx, @@ -1338,20 +1338,20 @@ extern "C" { float freq_scale); // xPos RoPE, in-place, returns view(a) - GGML_API struct ggml_tensor * ggml_rope_xpos_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_rope_xpos_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, float base, bool down); // rotary position embedding backward, i.e compute dx from dy // a - dy - GGML_API struct ggml_tensor * ggml_rope_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_rope_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int n_dims, int mode, int n_ctx, @@ -1362,56 +1362,56 @@ extern "C" { // alibi position embedding // in-place, returns view(a) - GGML_API struct ggml_tensor * ggml_alibi( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_alibi( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int n_past, int n_head, float bias_max); // clamp // in-place, returns view(a) - GGML_API struct ggml_tensor * ggml_clamp( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_clamp( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, float min, float max); - GGML_API struct ggml_tensor * ggml_conv_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_conv_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int s0, // stride int p0, // padding int d0); // dilation // conv_1d with padding = half - // alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d) - GGML_API struct ggml_tensor* ggml_conv_1d_ph( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + // alias for bark_ggml_conv_1d(a, b, s, a->ne[0]/2, d) + BARK_GGML_API struct bark_ggml_tensor* bark_ggml_conv_1d_ph( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int s, int d); - GGML_API struct ggml_tensor * ggml_conv_transpose_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_conv_transpose_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int s0, int p0, int d0); - GGML_API struct ggml_tensor * ggml_pad_reflec_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_pad_reflec_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int p0, int p1); - GGML_API struct ggml_tensor * ggml_conv_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_conv_2d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int s0, int s1, int p0, @@ -1427,10 +1427,10 @@ extern "C" { // b: 1024 1024 3 1 // res: 64 64 768 1 // used in sam - GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_conv_2d_sk_p0( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); // kernel size is a->ne[0] x a->ne[1] // stride is 1 @@ -1440,35 +1440,35 @@ extern "C" { // b: 64 64 256 1 // res: 64 64 256 1 // used in sam - GGML_API struct ggml_tensor * ggml_conv_2d_s1_ph( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - - GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_conv_2d_s1_ph( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); + + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_conv_transpose_2d_p0( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, int stride); - enum ggml_op_pool { - GGML_OP_POOL_MAX, - GGML_OP_POOL_AVG, - GGML_OP_POOL_COUNT, + enum bark_ggml_op_pool { + BARK_GGML_OP_POOL_MAX, + BARK_GGML_OP_POOL_AVG, + BARK_GGML_OP_POOL_COUNT, }; - GGML_API struct ggml_tensor * ggml_pool_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, - enum ggml_op_pool op, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_pool_1d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + enum bark_ggml_op_pool op, int k0, // kernel size int s0, // stride int p0); // padding - GGML_API struct ggml_tensor * ggml_pool_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, - enum ggml_op_pool op, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_pool_2d( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + enum bark_ggml_op_pool op, int k0, int k1, int s0, @@ -1478,33 +1478,33 @@ extern "C" { // nearest interpolate // used in stable-diffusion - GGML_API struct ggml_tensor * ggml_upscale( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_upscale( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int scale_factor); - GGML_API struct ggml_tensor * ggml_flash_attn( - struct ggml_context * ctx, - struct ggml_tensor * q, - struct ggml_tensor * k, - struct ggml_tensor * v, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_flash_attn( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * q, + struct bark_ggml_tensor * k, + struct bark_ggml_tensor * v, bool masked); - GGML_API struct ggml_tensor * ggml_flash_attn_back( - struct ggml_context * ctx, - struct ggml_tensor * q, - struct ggml_tensor * k, - struct ggml_tensor * v, - struct ggml_tensor * d, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_flash_attn_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * q, + struct bark_ggml_tensor * k, + struct bark_ggml_tensor * v, + struct bark_ggml_tensor * d, bool masked); - GGML_API struct ggml_tensor * ggml_flash_ff( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b0, - struct ggml_tensor * b1, - struct ggml_tensor * c0, - struct ggml_tensor * c1); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_flash_ff( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b0, + struct bark_ggml_tensor * b1, + struct bark_ggml_tensor * c0, + struct bark_ggml_tensor * c1); // partition into non-overlapping windows with padding if needed // example: @@ -1512,292 +1512,292 @@ extern "C" { // w: 14 // res: 768 14 14 25 // used in sam - GGML_API struct ggml_tensor * ggml_win_part( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_win_part( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int w); - // reverse of ggml_win_part + // reverse of bark_ggml_win_part // used in sam - GGML_API struct ggml_tensor * ggml_win_unpart( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_win_unpart( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int w0, int h0, int w); - GGML_API struct ggml_tensor * ggml_unary( - struct ggml_context * ctx, - struct ggml_tensor * a, - enum ggml_unary_op op); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_unary( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + enum bark_ggml_unary_op op); - GGML_API struct ggml_tensor * ggml_unary_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - enum ggml_unary_op op); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_unary_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + enum bark_ggml_unary_op op); // used in sam - GGML_API struct ggml_tensor * ggml_get_rel_pos( - struct ggml_context * ctx, - struct ggml_tensor * a, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_get_rel_pos( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, int qh, int kh); // used in sam - GGML_API struct ggml_tensor * ggml_add_rel_pos( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * pw, - struct ggml_tensor * ph); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_add_rel_pos( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * pw, + struct bark_ggml_tensor * ph); - GGML_API struct ggml_tensor * ggml_add_rel_pos_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * pw, - struct ggml_tensor * ph); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_add_rel_pos_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * pw, + struct bark_ggml_tensor * ph); // custom operators - typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *); - typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *); - - typedef void (*ggml_custom1_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *); - typedef void (*ggml_custom2_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *); - typedef void (*ggml_custom3_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *); - - GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - ggml_unary_op_f32_t fun), - "use ggml_map_custom1 instead"); - - GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - ggml_unary_op_f32_t fun), - "use ggml_map_custom1_inplace instead"); - - GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - ggml_binary_op_f32_t fun), - "use ggml_map_custom2 instead"); - - GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - ggml_binary_op_f32_t fun), - "use ggml_map_custom2_inplace instead"); - - GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - ggml_custom1_op_f32_t fun), - "use ggml_map_custom1 instead"); - - GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - ggml_custom1_op_f32_t fun), - "use ggml_map_custom1_inplace instead"); - - GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - ggml_custom2_op_f32_t fun), - "use ggml_map_custom2 instead"); - - GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - ggml_custom2_op_f32_t fun), - "use ggml_map_custom2_inplace instead"); - - GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c, - ggml_custom3_op_f32_t fun), - "use ggml_map_custom3 instead"); - - GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c, - ggml_custom3_op_f32_t fun), - "use ggml_map_custom3_inplace instead"); + typedef void (*bark_ggml_unary_op_f32_t) (const int, float *, const float *); + typedef void (*bark_ggml_binary_op_f32_t)(const int, float *, const float *, const float *); + + typedef void (*bark_ggml_custom1_op_f32_t)(struct bark_ggml_tensor *, const struct bark_ggml_tensor *); + typedef void (*bark_ggml_custom2_op_f32_t)(struct bark_ggml_tensor *, const struct bark_ggml_tensor *, const struct bark_ggml_tensor *); + typedef void (*bark_ggml_custom3_op_f32_t)(struct bark_ggml_tensor *, const struct bark_ggml_tensor *, const struct bark_ggml_tensor *, const struct bark_ggml_tensor *); + + BARK_GGML_DEPRECATED(BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_unary_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + bark_ggml_unary_op_f32_t fun), + "use bark_ggml_map_custom1 instead"); + + BARK_GGML_DEPRECATED(BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_unary_inplace_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + bark_ggml_unary_op_f32_t fun), + "use bark_ggml_map_custom1_inplace instead"); + + BARK_GGML_DEPRECATED(BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_binary_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + bark_ggml_binary_op_f32_t fun), + "use bark_ggml_map_custom2 instead"); + + BARK_GGML_DEPRECATED(BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_binary_inplace_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + bark_ggml_binary_op_f32_t fun), + "use bark_ggml_map_custom2_inplace instead"); + + BARK_GGML_DEPRECATED(BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_custom1_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + bark_ggml_custom1_op_f32_t fun), + "use bark_ggml_map_custom1 instead"); + + BARK_GGML_DEPRECATED(BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_custom1_inplace_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + bark_ggml_custom1_op_f32_t fun), + "use bark_ggml_map_custom1_inplace instead"); + + BARK_GGML_DEPRECATED(BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_custom2_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + bark_ggml_custom2_op_f32_t fun), + "use bark_ggml_map_custom2 instead"); + + BARK_GGML_DEPRECATED(BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_custom2_inplace_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + bark_ggml_custom2_op_f32_t fun), + "use bark_ggml_map_custom2_inplace instead"); + + BARK_GGML_DEPRECATED(BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_custom3_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c, + bark_ggml_custom3_op_f32_t fun), + "use bark_ggml_map_custom3 instead"); + + BARK_GGML_DEPRECATED(BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_custom3_inplace_f32( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c, + bark_ggml_custom3_op_f32_t fun), + "use bark_ggml_map_custom3_inplace instead"); // custom operators v2 - typedef void (*ggml_custom1_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, int ith, int nth, void * userdata); - typedef void (*ggml_custom2_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, int ith, int nth, void * userdata); - typedef void (*ggml_custom3_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, const struct ggml_tensor * c, int ith, int nth, void * userdata); + typedef void (*bark_ggml_custom1_op_t)(struct bark_ggml_tensor * dst , const struct bark_ggml_tensor * a, int ith, int nth, void * userdata); + typedef void (*bark_ggml_custom2_op_t)(struct bark_ggml_tensor * dst , const struct bark_ggml_tensor * a, const struct bark_ggml_tensor * b, int ith, int nth, void * userdata); + typedef void (*bark_ggml_custom3_op_t)(struct bark_ggml_tensor * dst , const struct bark_ggml_tensor * a, const struct bark_ggml_tensor * b, const struct bark_ggml_tensor * c, int ith, int nth, void * userdata); - #define GGML_N_TASKS_MAX -1 + #define BARK_GGML_N_TASKS_MAX -1 - GGML_API struct ggml_tensor * ggml_map_custom1( - struct ggml_context * ctx, - struct ggml_tensor * a, - ggml_custom1_op_t fun, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_custom1( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + bark_ggml_custom1_op_t fun, int n_tasks, void * userdata); - GGML_API struct ggml_tensor * ggml_map_custom1_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - ggml_custom1_op_t fun, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_custom1_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + bark_ggml_custom1_op_t fun, int n_tasks, void * userdata); - GGML_API struct ggml_tensor * ggml_map_custom2( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - ggml_custom2_op_t fun, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_custom2( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + bark_ggml_custom2_op_t fun, int n_tasks, void * userdata); - GGML_API struct ggml_tensor * ggml_map_custom2_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - ggml_custom2_op_t fun, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_custom2_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + bark_ggml_custom2_op_t fun, int n_tasks, void * userdata); - GGML_API struct ggml_tensor * ggml_map_custom3( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c, - ggml_custom3_op_t fun, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_custom3( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c, + bark_ggml_custom3_op_t fun, int n_tasks, void * userdata); - GGML_API struct ggml_tensor * ggml_map_custom3_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c, - ggml_custom3_op_t fun, + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_map_custom3_inplace( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c, + bark_ggml_custom3_op_t fun, int n_tasks, void * userdata); // loss function - GGML_API struct ggml_tensor * ggml_cross_entropy_loss( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cross_entropy_loss( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b); - GGML_API struct ggml_tensor * ggml_cross_entropy_loss_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - struct ggml_tensor * c); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_cross_entropy_loss_back( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * a, + struct bark_ggml_tensor * b, + struct bark_ggml_tensor * c); // // automatic differentiation // - GGML_API void ggml_set_param( - struct ggml_context * ctx, - struct ggml_tensor * tensor); + BARK_GGML_API void bark_ggml_set_param( + struct bark_ggml_context * ctx, + struct bark_ggml_tensor * tensor); - GGML_API void ggml_build_forward_expand (struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); - GGML_API void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep); + BARK_GGML_API void bark_ggml_build_forward_expand (struct bark_ggml_cgraph * cgraph, struct bark_ggml_tensor * tensor); + BARK_GGML_API void bark_ggml_build_backward_expand(struct bark_ggml_context * ctx, struct bark_ggml_cgraph * gf, struct bark_ggml_cgraph * gb, bool keep); - GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor); - GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep); + BARK_GGML_API struct bark_ggml_cgraph bark_ggml_build_forward (struct bark_ggml_tensor * tensor); + BARK_GGML_API struct bark_ggml_cgraph bark_ggml_build_backward(struct bark_ggml_context * ctx, struct bark_ggml_cgraph * gf, bool keep); // graph allocation in a context - GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); - GGML_API struct ggml_cgraph * ggml_build_forward_ctx(struct ggml_context * ctx, struct ggml_tensor * tensor); - GGML_API size_t ggml_graph_overhead(void); + BARK_GGML_API struct bark_ggml_cgraph * bark_ggml_new_graph (struct bark_ggml_context * ctx); + BARK_GGML_API struct bark_ggml_cgraph * bark_ggml_build_forward_ctx(struct bark_ggml_context * ctx, struct bark_ggml_tensor * tensor); + BARK_GGML_API size_t bark_ggml_graph_overhead(void); - // ggml_graph_plan() has to be called before ggml_graph_compute() + // bark_ggml_graph_plan() has to be called before bark_ggml_graph_compute() // when plan.work_size > 0, caller must allocate memory for plan.work_data - GGML_API struct ggml_cplan ggml_graph_plan (struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/); - GGML_API int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan); - GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph); + BARK_GGML_API struct bark_ggml_cplan bark_ggml_graph_plan (struct bark_ggml_cgraph * cgraph, int n_threads /*= BARK_GGML_DEFAULT_N_THREADS*/); + BARK_GGML_API int bark_ggml_graph_compute(struct bark_ggml_cgraph * cgraph, struct bark_ggml_cplan * cplan); + BARK_GGML_API void bark_ggml_graph_reset (struct bark_ggml_cgraph * cgraph); - // same as ggml_graph_compute() but the work data is allocated as a part of the context + // same as bark_ggml_graph_compute() but the work data is allocated as a part of the context // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data - GGML_API void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads); + BARK_GGML_API void bark_ggml_graph_compute_with_ctx(struct bark_ggml_context * ctx, struct bark_ggml_cgraph * cgraph, int n_threads); - GGML_API struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name); + BARK_GGML_API struct bark_ggml_tensor * bark_ggml_graph_get_tensor(struct bark_ggml_cgraph * cgraph, const char * name); - GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname); - GGML_API struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval); + BARK_GGML_API void bark_ggml_graph_export(const struct bark_ggml_cgraph * cgraph, const char * fname); + BARK_GGML_API struct bark_ggml_cgraph bark_ggml_graph_import(const char * fname, struct bark_ggml_context ** ctx_data, struct bark_ggml_context ** ctx_eval); // print info and performance information for the graph - GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph); + BARK_GGML_API void bark_ggml_graph_print(const struct bark_ggml_cgraph * cgraph); // dump the graph into a file using the dot format - GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename); + BARK_GGML_API void bark_ggml_graph_dump_dot(const struct bark_ggml_cgraph * gb, const struct bark_ggml_cgraph * gf, const char * filename); // build gradient checkpointing backward graph gb for gf using provided checkpoints // gb_tmp will contain original backward graph with rewritten backward process nodes, // but without the second forward pass nodes. - GGML_API void ggml_build_backward_gradient_checkpointing( - struct ggml_context * ctx, - struct ggml_cgraph * gf, - struct ggml_cgraph * gb, - struct ggml_cgraph * gb_tmp, - struct ggml_tensor * * checkpoints, + BARK_GGML_API void bark_ggml_build_backward_gradient_checkpointing( + struct bark_ggml_context * ctx, + struct bark_ggml_cgraph * gf, + struct bark_ggml_cgraph * gb, + struct bark_ggml_cgraph * gb_tmp, + struct bark_ggml_tensor * * checkpoints, int n_checkpoints); // // optimization // // optimization methods - enum ggml_opt_type { - GGML_OPT_ADAM, - GGML_OPT_LBFGS, + enum bark_ggml_opt_type { + BARK_GGML_OPT_ADAM, + BARK_GGML_OPT_LBFGS, }; // linesearch methods - enum ggml_linesearch { - GGML_LINESEARCH_DEFAULT = 1, + enum bark_ggml_linesearch { + BARK_GGML_LINESEARCH_DEFAULT = 1, - GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0, - GGML_LINESEARCH_BACKTRACKING_WOLFE = 1, - GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2, + BARK_GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0, + BARK_GGML_LINESEARCH_BACKTRACKING_WOLFE = 1, + BARK_GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2, }; // optimization return values - enum ggml_opt_result { - GGML_OPT_OK = 0, - GGML_OPT_DID_NOT_CONVERGE, - GGML_OPT_NO_CONTEXT, - GGML_OPT_INVALID_WOLFE, - GGML_OPT_FAIL, - GGML_OPT_CANCEL, - - GGML_LINESEARCH_FAIL = -128, - GGML_LINESEARCH_MINIMUM_STEP, - GGML_LINESEARCH_MAXIMUM_STEP, - GGML_LINESEARCH_MAXIMUM_ITERATIONS, - GGML_LINESEARCH_INVALID_PARAMETERS, + enum bark_ggml_opt_result { + BARK_GGML_OPT_OK = 0, + BARK_GGML_OPT_DID_NOT_CONVERGE, + BARK_GGML_OPT_NO_CONTEXT, + BARK_GGML_OPT_INVALID_WOLFE, + BARK_GGML_OPT_FAIL, + BARK_GGML_OPT_CANCEL, + + BARK_GGML_LINESEARCH_FAIL = -128, + BARK_GGML_LINESEARCH_MINIMUM_STEP, + BARK_GGML_LINESEARCH_MAXIMUM_STEP, + BARK_GGML_LINESEARCH_MAXIMUM_ITERATIONS, + BARK_GGML_LINESEARCH_INVALID_PARAMETERS, }; - typedef void (*ggml_opt_callback)(void * data, int accum_step, float * sched, bool * cancel); - typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data); + typedef void (*bark_ggml_opt_callback)(void * data, int accum_step, float * sched, bool * cancel); + typedef void (*bark_ggml_log_callback)(enum bark_ggml_log_level level, const char * text, void * user_data); // optimization parameters // - // see ggml.c (ggml_opt_default_params) for default values + // see ggml.c (bark_ggml_opt_default_params) for default values // - struct ggml_opt_params { - enum ggml_opt_type type; + struct bark_ggml_opt_params { + enum bark_ggml_opt_type type; int n_threads; @@ -1851,13 +1851,13 @@ extern "C" { float min_step; float max_step; - enum ggml_linesearch linesearch; + enum bark_ggml_linesearch linesearch; } lbfgs; }; - struct ggml_opt_context { - struct ggml_context * ctx; - struct ggml_opt_params params; + struct bark_ggml_opt_context { + struct bark_ggml_context * ctx; + struct bark_ggml_opt_params params; int iter; int64_t nx; // number of parameter elements @@ -1868,26 +1868,26 @@ extern "C" { float loss_after; struct { - struct ggml_tensor * g; // current gradient - struct ggml_tensor * m; // first moment - struct ggml_tensor * v; // second moment - struct ggml_tensor * pf; // past function values + struct bark_ggml_tensor * g; // current gradient + struct bark_ggml_tensor * m; // first moment + struct bark_ggml_tensor * v; // second moment + struct bark_ggml_tensor * pf; // past function values float fx_best; float fx_prev; int n_no_improvement; } adam; struct { - struct ggml_tensor * x; // current parameters - struct ggml_tensor * xp; // previous parameters - struct ggml_tensor * g; // current gradient - struct ggml_tensor * gp; // previous gradient - struct ggml_tensor * d; // search direction - struct ggml_tensor * pf; // past function values - struct ggml_tensor * lmal; // the L-BFGS memory alpha - struct ggml_tensor * lmys; // the L-BFGS memory ys - struct ggml_tensor * lms; // the L-BFGS memory s - struct ggml_tensor * lmy; // the L-BFGS memory y + struct bark_ggml_tensor * x; // current parameters + struct bark_ggml_tensor * xp; // previous parameters + struct bark_ggml_tensor * g; // current gradient + struct bark_ggml_tensor * gp; // previous gradient + struct bark_ggml_tensor * d; // search direction + struct bark_ggml_tensor * pf; // past function values + struct bark_ggml_tensor * lmal; // the L-BFGS memory alpha + struct bark_ggml_tensor * lmys; // the L-BFGS memory ys + struct bark_ggml_tensor * lms; // the L-BFGS memory s + struct bark_ggml_tensor * lmy; // the L-BFGS memory y float fx_best; float step; int j; @@ -1897,193 +1897,193 @@ extern "C" { } lbfgs; }; - GGML_API struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type); + BARK_GGML_API struct bark_ggml_opt_params bark_ggml_opt_default_params(enum bark_ggml_opt_type type); // optimize the function defined by the tensor f - GGML_API enum ggml_opt_result ggml_opt( - struct ggml_context * ctx, - struct ggml_opt_params params, - struct ggml_tensor * f); + BARK_GGML_API enum bark_ggml_opt_result bark_ggml_opt( + struct bark_ggml_context * ctx, + struct bark_ggml_opt_params params, + struct bark_ggml_tensor * f); // initialize optimizer context - GGML_API void ggml_opt_init( - struct ggml_context * ctx, - struct ggml_opt_context * opt, - struct ggml_opt_params params, + BARK_GGML_API void bark_ggml_opt_init( + struct bark_ggml_context * ctx, + struct bark_ggml_opt_context * opt, + struct bark_ggml_opt_params params, int64_t nx); // continue optimizing the function defined by the tensor f - GGML_API enum ggml_opt_result ggml_opt_resume( - struct ggml_context * ctx, - struct ggml_opt_context * opt, - struct ggml_tensor * f); + BARK_GGML_API enum bark_ggml_opt_result bark_ggml_opt_resume( + struct bark_ggml_context * ctx, + struct bark_ggml_opt_context * opt, + struct bark_ggml_tensor * f); // continue optimizing the function defined by the tensor f - GGML_API enum ggml_opt_result ggml_opt_resume_g( - struct ggml_context * ctx, - struct ggml_opt_context * opt, - struct ggml_tensor * f, - struct ggml_cgraph * gf, - struct ggml_cgraph * gb, - ggml_opt_callback callback, + BARK_GGML_API enum bark_ggml_opt_result bark_ggml_opt_resume_g( + struct bark_ggml_context * ctx, + struct bark_ggml_opt_context * opt, + struct bark_ggml_tensor * f, + struct bark_ggml_cgraph * gf, + struct bark_ggml_cgraph * gb, + bark_ggml_opt_callback callback, void * callback_data); // // quantization // - GGML_API size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist); - GGML_API size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist); - GGML_API size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist); - GGML_API size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist); - GGML_API size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist); + BARK_GGML_API size_t bark_ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist); + BARK_GGML_API size_t bark_ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist); + BARK_GGML_API size_t bark_ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist); + BARK_GGML_API size_t bark_ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist); + BARK_GGML_API size_t bark_ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist); - GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist); + BARK_GGML_API size_t bark_ggml_quantize_chunk(enum bark_ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist); // // gguf // - enum gguf_type { - GGUF_TYPE_UINT8 = 0, - GGUF_TYPE_INT8 = 1, - GGUF_TYPE_UINT16 = 2, - GGUF_TYPE_INT16 = 3, - GGUF_TYPE_UINT32 = 4, - GGUF_TYPE_INT32 = 5, - GGUF_TYPE_FLOAT32 = 6, - GGUF_TYPE_BOOL = 7, - GGUF_TYPE_STRING = 8, - GGUF_TYPE_ARRAY = 9, - GGUF_TYPE_UINT64 = 10, - GGUF_TYPE_INT64 = 11, - GGUF_TYPE_FLOAT64 = 12, - GGUF_TYPE_COUNT, // marks the end of the enum + enum bark_gguf_type { + BARK_GGUF_TYPE_UINT8 = 0, + BARK_GGUF_TYPE_INT8 = 1, + BARK_GGUF_TYPE_UINT16 = 2, + BARK_GGUF_TYPE_INT16 = 3, + BARK_GGUF_TYPE_UINT32 = 4, + BARK_GGUF_TYPE_INT32 = 5, + BARK_GGUF_TYPE_FLOAT32 = 6, + BARK_GGUF_TYPE_BOOL = 7, + BARK_GGUF_TYPE_STRING = 8, + BARK_GGUF_TYPE_ARRAY = 9, + BARK_GGUF_TYPE_UINT64 = 10, + BARK_GGUF_TYPE_INT64 = 11, + BARK_GGUF_TYPE_FLOAT64 = 12, + BARK_GGUF_TYPE_COUNT, // marks the end of the enum }; - struct gguf_context; + struct bark_gguf_context; - struct gguf_init_params { + struct bark_gguf_init_params { bool no_alloc; - // if not NULL, create a ggml_context and allocate the tensor data in it - struct ggml_context ** ctx; + // if not NULL, create a bark_ggml_context and allocate the tensor data in it + struct bark_ggml_context ** ctx; }; - GGML_API struct gguf_context * gguf_init_empty(void); - GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params); - //GGML_API struct gguf_context * gguf_init_from_buffer(..); + BARK_GGML_API struct bark_gguf_context * bark_gguf_init_empty(void); + BARK_GGML_API struct bark_gguf_context * bark_gguf_init_from_file(const char * fname, struct bark_gguf_init_params params); + //BARK_GGML_API struct bark_gguf_context * bark_gguf_init_from_buffer(..); - GGML_API void gguf_free(struct gguf_context * ctx); + BARK_GGML_API void bark_gguf_free(struct bark_gguf_context * ctx); - GGML_API const char * gguf_type_name(enum gguf_type type); + BARK_GGML_API const char * bark_gguf_type_name(enum bark_gguf_type type); - GGML_API int gguf_get_version (const struct gguf_context * ctx); - GGML_API size_t gguf_get_alignment (const struct gguf_context * ctx); - GGML_API size_t gguf_get_data_offset(const struct gguf_context * ctx); - GGML_API void * gguf_get_data (const struct gguf_context * ctx); + BARK_GGML_API int bark_gguf_get_version (const struct bark_gguf_context * ctx); + BARK_GGML_API size_t bark_gguf_get_alignment (const struct bark_gguf_context * ctx); + BARK_GGML_API size_t bark_gguf_get_data_offset(const struct bark_gguf_context * ctx); + BARK_GGML_API void * bark_gguf_get_data (const struct bark_gguf_context * ctx); - GGML_API int gguf_get_n_kv(const struct gguf_context * ctx); - GGML_API int gguf_find_key(const struct gguf_context * ctx, const char * key); - GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int key_id); + BARK_GGML_API int bark_gguf_get_n_kv(const struct bark_gguf_context * ctx); + BARK_GGML_API int bark_gguf_find_key(const struct bark_gguf_context * ctx, const char * key); + BARK_GGML_API const char * bark_gguf_get_key (const struct bark_gguf_context * ctx, int key_id); - GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int key_id); - GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id); + BARK_GGML_API enum bark_gguf_type bark_gguf_get_kv_type (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API enum bark_gguf_type bark_gguf_get_arr_type(const struct bark_gguf_context * ctx, int key_id); // will abort if the wrong type is used for the key - GGML_API uint8_t gguf_get_val_u8 (const struct gguf_context * ctx, int key_id); - GGML_API int8_t gguf_get_val_i8 (const struct gguf_context * ctx, int key_id); - GGML_API uint16_t gguf_get_val_u16 (const struct gguf_context * ctx, int key_id); - GGML_API int16_t gguf_get_val_i16 (const struct gguf_context * ctx, int key_id); - GGML_API uint32_t gguf_get_val_u32 (const struct gguf_context * ctx, int key_id); - GGML_API int32_t gguf_get_val_i32 (const struct gguf_context * ctx, int key_id); - GGML_API float gguf_get_val_f32 (const struct gguf_context * ctx, int key_id); - GGML_API uint64_t gguf_get_val_u64 (const struct gguf_context * ctx, int key_id); - GGML_API int64_t gguf_get_val_i64 (const struct gguf_context * ctx, int key_id); - GGML_API double gguf_get_val_f64 (const struct gguf_context * ctx, int key_id); - GGML_API bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id); - GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int key_id); - GGML_API int gguf_get_arr_n (const struct gguf_context * ctx, int key_id); - GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id); - GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int key_id, int i); - - GGML_API int gguf_get_n_tensors (const struct gguf_context * ctx); - GGML_API int gguf_find_tensor (const struct gguf_context * ctx, const char * name); - GGML_API size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i); - GGML_API char * gguf_get_tensor_name (const struct gguf_context * ctx, int i); + BARK_GGML_API uint8_t bark_gguf_get_val_u8 (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API int8_t bark_gguf_get_val_i8 (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API uint16_t bark_gguf_get_val_u16 (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API int16_t bark_gguf_get_val_i16 (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API uint32_t bark_gguf_get_val_u32 (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API int32_t bark_gguf_get_val_i32 (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API float bark_gguf_get_val_f32 (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API uint64_t bark_gguf_get_val_u64 (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API int64_t bark_gguf_get_val_i64 (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API double bark_gguf_get_val_f64 (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API bool bark_gguf_get_val_bool(const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API const char * bark_gguf_get_val_str (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API int bark_gguf_get_arr_n (const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API const void * bark_gguf_get_arr_data(const struct bark_gguf_context * ctx, int key_id); + BARK_GGML_API const char * bark_gguf_get_arr_str (const struct bark_gguf_context * ctx, int key_id, int i); + + BARK_GGML_API int bark_gguf_get_n_tensors (const struct bark_gguf_context * ctx); + BARK_GGML_API int bark_gguf_find_tensor (const struct bark_gguf_context * ctx, const char * name); + BARK_GGML_API size_t bark_gguf_get_tensor_offset(const struct bark_gguf_context * ctx, int i); + BARK_GGML_API char * bark_gguf_get_tensor_name (const struct bark_gguf_context * ctx, int i); // overrides existing values or adds a new one - GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val); - GGML_API void gguf_set_val_i8 (struct gguf_context * ctx, const char * key, int8_t val); - GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val); - GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t val); - GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val); - GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t val); - GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float val); - GGML_API void gguf_set_val_u64 (struct gguf_context * ctx, const char * key, uint64_t val); - GGML_API void gguf_set_val_i64 (struct gguf_context * ctx, const char * key, int64_t val); - GGML_API void gguf_set_val_f64 (struct gguf_context * ctx, const char * key, double val); - GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val); - GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val); - GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n); - GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n); + BARK_GGML_API void bark_gguf_set_val_u8 (struct bark_gguf_context * ctx, const char * key, uint8_t val); + BARK_GGML_API void bark_gguf_set_val_i8 (struct bark_gguf_context * ctx, const char * key, int8_t val); + BARK_GGML_API void bark_gguf_set_val_u16 (struct bark_gguf_context * ctx, const char * key, uint16_t val); + BARK_GGML_API void bark_gguf_set_val_i16 (struct bark_gguf_context * ctx, const char * key, int16_t val); + BARK_GGML_API void bark_gguf_set_val_u32 (struct bark_gguf_context * ctx, const char * key, uint32_t val); + BARK_GGML_API void bark_gguf_set_val_i32 (struct bark_gguf_context * ctx, const char * key, int32_t val); + BARK_GGML_API void bark_gguf_set_val_f32 (struct bark_gguf_context * ctx, const char * key, float val); + BARK_GGML_API void bark_gguf_set_val_u64 (struct bark_gguf_context * ctx, const char * key, uint64_t val); + BARK_GGML_API void bark_gguf_set_val_i64 (struct bark_gguf_context * ctx, const char * key, int64_t val); + BARK_GGML_API void bark_gguf_set_val_f64 (struct bark_gguf_context * ctx, const char * key, double val); + BARK_GGML_API void bark_gguf_set_val_bool(struct bark_gguf_context * ctx, const char * key, bool val); + BARK_GGML_API void bark_gguf_set_val_str (struct bark_gguf_context * ctx, const char * key, const char * val); + BARK_GGML_API void bark_gguf_set_arr_data(struct bark_gguf_context * ctx, const char * key, enum bark_gguf_type type, const void * data, int n); + BARK_GGML_API void bark_gguf_set_arr_str (struct bark_gguf_context * ctx, const char * key, const char ** data, int n); // set or add KV pairs from another context - GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src); + BARK_GGML_API void bark_gguf_set_kv(struct bark_gguf_context * ctx, struct bark_gguf_context * src); // manage tensor info - GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor); - GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type); - GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size); + BARK_GGML_API void bark_gguf_add_tensor(struct bark_gguf_context * ctx, const struct bark_ggml_tensor * tensor); + BARK_GGML_API void bark_gguf_set_tensor_type(struct bark_gguf_context * ctx, const char * name, enum bark_ggml_type type); + BARK_GGML_API void bark_gguf_set_tensor_data(struct bark_gguf_context * ctx, const char * name, const void * data, size_t size); // writing gguf files can be done in 2 ways: // - // - write the entire gguf_context to a binary file in a single pass: + // - write the entire bark_gguf_context to a binary file in a single pass: // - // gguf_write_to_file(ctx, fname); + // bark_gguf_write_to_file(ctx, fname); // // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data: // // FILE * f = fopen(fname, "wb"); - // fseek(f, gguf_get_meta_size(ctx), SEEK_SET); + // fseek(f, bark_gguf_get_meta_size(ctx), SEEK_SET); // fwrite(f, ...); - // void * data = gguf_meta_get_meta_data(ctx); + // void * data = bark_gguf_meta_get_meta_data(ctx); // fseek(f, 0, SEEK_SET); - // fwrite(f, data, gguf_get_meta_size(ctx)); + // fwrite(f, data, bark_gguf_get_meta_size(ctx)); // free(data); // fclose(f); // // write the entire context to a binary file - GGML_API void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta); + BARK_GGML_API void bark_gguf_write_to_file(const struct bark_gguf_context * ctx, const char * fname, bool only_meta); // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding - GGML_API size_t gguf_get_meta_size(const struct gguf_context * ctx); - GGML_API void gguf_get_meta_data(const struct gguf_context * ctx, void * data); + BARK_GGML_API size_t bark_gguf_get_meta_size(const struct bark_gguf_context * ctx); + BARK_GGML_API void bark_gguf_get_meta_data(const struct bark_gguf_context * ctx, void * data); // // system info // - GGML_API int ggml_cpu_has_avx (void); - GGML_API int ggml_cpu_has_avx2 (void); - GGML_API int ggml_cpu_has_avx512 (void); - GGML_API int ggml_cpu_has_avx512_vbmi(void); - GGML_API int ggml_cpu_has_avx512_vnni(void); - GGML_API int ggml_cpu_has_fma (void); - GGML_API int ggml_cpu_has_neon (void); - GGML_API int ggml_cpu_has_arm_fma (void); - GGML_API int ggml_cpu_has_metal (void); - GGML_API int ggml_cpu_has_f16c (void); - GGML_API int ggml_cpu_has_fp16_va (void); - GGML_API int ggml_cpu_has_wasm_simd (void); - GGML_API int ggml_cpu_has_blas (void); - GGML_API int ggml_cpu_has_cublas (void); - GGML_API int ggml_cpu_has_clblast (void); - GGML_API int ggml_cpu_has_gpublas (void); - GGML_API int ggml_cpu_has_sse3 (void); - GGML_API int ggml_cpu_has_ssse3 (void); - GGML_API int ggml_cpu_has_vsx (void); + BARK_GGML_API int bark_ggml_cpu_has_avx (void); + BARK_GGML_API int bark_ggml_cpu_has_avx2 (void); + BARK_GGML_API int bark_ggml_cpu_has_avx512 (void); + BARK_GGML_API int bark_ggml_cpu_has_avx512_vbmi(void); + BARK_GGML_API int bark_ggml_cpu_has_avx512_vnni(void); + BARK_GGML_API int bark_ggml_cpu_has_fma (void); + BARK_GGML_API int bark_ggml_cpu_has_neon (void); + BARK_GGML_API int bark_ggml_cpu_has_arm_fma (void); + BARK_GGML_API int bark_ggml_cpu_has_metal (void); + BARK_GGML_API int bark_ggml_cpu_has_f16c (void); + BARK_GGML_API int bark_ggml_cpu_has_fp16_va (void); + BARK_GGML_API int bark_ggml_cpu_has_wasm_simd (void); + BARK_GGML_API int bark_ggml_cpu_has_blas (void); + BARK_GGML_API int bark_ggml_cpu_has_cublas (void); + BARK_GGML_API int bark_ggml_cpu_has_clblast (void); + BARK_GGML_API int bark_ggml_cpu_has_gpublas (void); + BARK_GGML_API int bark_ggml_cpu_has_sse3 (void); + BARK_GGML_API int bark_ggml_cpu_has_ssse3 (void); + BARK_GGML_API int bark_ggml_cpu_has_vsx (void); // // Internal types and functions exposed for tests and benchmarks @@ -2091,27 +2091,27 @@ extern "C" { #ifdef __cplusplus // restrict not standard in C++ -#define GGML_RESTRICT +#define BARK_GGML_RESTRICT #else -#define GGML_RESTRICT restrict +#define BARK_GGML_RESTRICT restrict #endif - typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k); - typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k); - typedef void (*ggml_vec_dot_t) (const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y); + typedef void (*bark_ggml_to_float_t) (const void * BARK_GGML_RESTRICT x, float * BARK_GGML_RESTRICT y, int k); + typedef void (*bark_ggml_from_float_t)(const float * BARK_GGML_RESTRICT x, void * BARK_GGML_RESTRICT y, int k); + typedef void (*bark_ggml_vec_dot_t) (const int n, float * BARK_GGML_RESTRICT s, const void * BARK_GGML_RESTRICT x, const void * BARK_GGML_RESTRICT y); typedef struct { const char * type_name; int blck_size; size_t type_size; bool is_quantized; - ggml_to_float_t to_float; - ggml_from_float_t from_float; - ggml_from_float_t from_float_reference; - ggml_vec_dot_t vec_dot; - enum ggml_type vec_dot_type; - } ggml_type_traits_t; - - GGML_API ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type); + bark_ggml_to_float_t to_float; + bark_ggml_from_float_t from_float; + bark_ggml_from_float_t from_float_reference; + bark_ggml_vec_dot_t vec_dot; + enum bark_ggml_type vec_dot_type; + } bark_ggml_type_traits_t; + + BARK_GGML_API bark_ggml_type_traits_t bark_ggml_internal_get_type_traits(enum bark_ggml_type type); #ifdef __cplusplus } diff --git a/cpp/log.h b/cpp/log.h new file mode 100644 index 0000000..b469f6c --- /dev/null +++ b/cpp/log.h @@ -0,0 +1,16 @@ +#pragma once + +#ifdef __ANDROID__ +#include +#define LOG_TAG "BarkRN" +#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__) +#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__) +#elif __APPLE__ +#define LOGI(...) printf("[BarkRN] INFO: "); printf(__VA_ARGS__); printf("\n") +#define LOGD(...) printf("[BarkRN] DEBUG: "); printf(__VA_ARGS__); printf("\n") +#define LOGW(...) printf("[BarkRN] WARN: "); printf(__VA_ARGS__); printf("\n") +#define LOGE(...) printf("[BarkRN] ERROR: "); printf(__VA_ARGS__); printf("\n") +#else +#define LOGI(...) fprintf(stderr, __VA_ARGS__) +#define LOGE(...) fprintf(stderr, __VA_ARGS__) +#endif diff --git a/cpp/utils.cpp b/cpp/utils.cpp index 15b819c..10f3dc9 100644 --- a/cpp/utils.cpp +++ b/cpp/utils.cpp @@ -4,8 +4,8 @@ namespace barkrn { -void pcmToWav(const std::vector &data, const int sample_rate, - const std::string dest_path) { +void pcmToWav(float *data, int size, const int sample_rate, + const char *dest_path) { drwav_data_format format; format.bitsPerSample = 32; format.sampleRate = sample_rate; @@ -14,8 +14,8 @@ void pcmToWav(const std::vector &data, const int sample_rate, format.format = DR_WAVE_FORMAT_IEEE_FLOAT; drwav wav; - drwav_init_file_write(&wav, dest_path.c_str(), &format, NULL); - drwav_uint64 frames = drwav_write_pcm_frames(&wav, data.size(), data.data()); + drwav_init_file_write(&wav, dest_path, &format, NULL); + drwav_uint64 frames = drwav_write_pcm_frames(&wav, size, data); drwav_uninit(&wav); } diff --git a/cpp/utils.h b/cpp/utils.h index d8c7a8c..976883a 100644 --- a/cpp/utils.h +++ b/cpp/utils.h @@ -1,11 +1,11 @@ +#include + #ifndef BARKRN_H #define BARKRN_H -#include -#include namespace barkrn { -void pcmToWav(const std::vector &data, const int sample_rate, - const std::string dest_path); +void pcmToWav(float *data, int size, const int sample_rate, + const char *dest_path); } #endif /* BARKRN_H */ diff --git a/ios/BarkContext.mm b/ios/BarkContext.mm index 7e44da8..bfb3487 100644 --- a/ios/BarkContext.mm +++ b/ios/BarkContext.mm @@ -22,7 +22,7 @@ - (instancetype)initWithModelPath:(NSString *)model_path params:(NSDictionary *) if (ns_params && ns_params[@"sample_rate"]) sample_rate = [ns_params[@"sample_rate"] intValue]; n_threads = -1; if (ns_params && ns_params[@"n_threads"]) n_threads = [ns_params[@"n_threads"] intValue]; - if (n_threads < 0) n_threads = std::thread::hardware_concurrency() << 1; + if (n_threads < 0) n_threads = std::thread::hardware_concurrency() >> 1; if (n_threads == 0) n_threads = 1; bark_context_params params = [Convert convert_params:ns_params]; try { @@ -44,15 +44,16 @@ - (NSDictionary *)generate:(NSString *)text out_path:(NSString *)out_path { } bool success = false; try { - success = bark_generate_audio(context.get(), [text UTF8String], n_threads); + const char *c_text = [text UTF8String]; + NSLog(@"Generating %s with %d threads", c_text, n_threads); + success = bark_generate_audio(context.get(), c_text, n_threads); } catch (const std::exception &e) { @throw [NSException exceptionWithName:@"BarkContext" reason:[NSString stringWithUTF8String:e.what()] userInfo:nil]; } if (success) { int audio_samples = bark_get_audio_data_size(context.get()); - const float *audio_data = bark_get_audio_data(context.get()); - std::vector audio_data_vec(audio_data, audio_data + audio_samples); - barkrn::pcmToWav(audio_data_vec, sample_rate, [out_path UTF8String]); + float *audio_data = bark_get_audio_data(context.get()); + barkrn::pcmToWav(audio_data, audio_samples, sample_rate, [out_path UTF8String]); } return @{ @"success": @(success), diff --git a/scripts/bootstrap.sh b/scripts/bootstrap.sh index b8a5837..412578b 100755 --- a/scripts/bootstrap.sh +++ b/scripts/bootstrap.sh @@ -23,5 +23,37 @@ FILES=( ) for file in "${FILES[@]}"; do - cp "$file" "cpp" + cp "$file" "cpp/" +done + +patch -p0 < ./scripts/ggml-alloc.c.patch +patch -p0 < ./scripts/ggml.c.patch + +if [ "$(uname)" == "Darwin" ]; then + SED="sed -i ''" +else + SED="sed -i" +fi + +PATCH_LOG_FILES=( + cpp/encodec.h + cpp/encodec.cpp + cpp/bark.h + cpp/bark.cpp +) + +for file in "${PATCH_LOG_FILES[@]}"; do + $SED 's/fprintf(stderr, /LOGE(/g' "$file" + $SED 's/printf(/LOGI(/g' "$file" + $SED '/#pragma once/a #include "log.h"' "$file" +done + +for file in "${FILES[@]}"; do + filename=$(basename "$file") + # Add prefix to avoid redefinition with other libraries using ggml like whisper.rn + $SED 's/GGML_/BARK_GGML_/g' "cpp/$filename" + $SED 's/ggml_/bark_ggml_/g' "cpp/$filename" + $SED 's/GGUF_/BARK_GGUF_/g' "cpp/$filename" + $SED 's/gguf_/bark_gguf_/g' "cpp/$filename" + $SED 's/GGMLMetalClass/BARKGGMLMetalClass/g' "cpp/$filename" done diff --git a/scripts/ggml-alloc.c.patch b/scripts/ggml-alloc.c.patch new file mode 100644 index 0000000..2373e45 --- /dev/null +++ b/scripts/ggml-alloc.c.patch @@ -0,0 +1,72 @@ +--- cpp/ggml-alloc.c 2024-10-24 14:42:04.320406093 +0800 ++++ cpp/ggml-alloc.c 2024-10-24 15:49:02.307240277 +0800 +@@ -272,22 +272,20 @@ + + struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr)); + +- *alloc = (struct ggml_allocr){ +- /*.buffer = */ buffer, +- /*.buffer_owned = */ true, +- /*.base = */ ggml_backend_buffer_get_base(buffer), +- /*.alignment = */ alignment, +- /*.n_free_blocks = */ 0, +- /*.free_blocks = */ {{0}}, +- /*.hash_table = */ {{0}}, +- /*.max_size = */ 0, +- /*.measure = */ false, +- /*.parse_seq = */ {0}, +- /*.parse_seq_len = */ 0, ++ alloc->buffer = buffer; ++ alloc->buffer_owned = true; ++ alloc->data = ggml_backend_buffer_get_base(buffer); ++ alloc->alignment = alignment; ++ alloc->n_free_blocks = 0; ++ memset(alloc->free_blocks, 0, sizeof(alloc->free_blocks)); ++ memset(alloc->hash_table, 0, sizeof(alloc->hash_table)); ++ alloc->max_size = 0; ++ alloc->measure = false; ++ memset(alloc->parse_seq, 0, sizeof(alloc->parse_seq)); ++ alloc->parse_seq_len = 0; + #ifdef GGML_ALLOCATOR_DEBUG +- /*.allocated_tensors = */ {0}, ++ memset(alloc->allocated_tensors, 0, sizeof(alloc->allocated_tensors)); + #endif +- }; + + ggml_allocr_reset(alloc); + +@@ -304,22 +302,20 @@ + struct ggml_allocr * ggml_allocr_new_from_buffer(struct ggml_backend_buffer * buffer) { + struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr)); + +- *alloc = (struct ggml_allocr){ +- /*.buffer = */ buffer, +- /*.buffer_owned = */ false, +- /*.base = */ ggml_backend_buffer_get_base(buffer), +- /*.alignment = */ ggml_backend_buffer_get_alignment(buffer), +- /*.n_free_blocks = */ 0, +- /*.free_blocks = */ {{0}}, +- /*.hash_table = */ {{0}}, +- /*.max_size = */ 0, +- /*.measure = */ false, +- /*.parse_seq = */ {0}, +- /*.parse_seq_len = */ 0, ++ alloc->buffer = buffer; ++ alloc->buffer_owned = false; ++ alloc->data = ggml_backend_buffer_get_base(buffer); ++ alloc->alignment = ggml_backend_buffer_get_alignment(buffer); ++ alloc->n_free_blocks = 0; ++ memset(alloc->free_blocks, 0, sizeof(alloc->free_blocks)); ++ memset(alloc->hash_table, 0, sizeof(alloc->hash_table)); ++ alloc->max_size = 0; ++ alloc->measure = false; ++ memset(alloc->parse_seq, 0, sizeof(alloc->parse_seq)); ++ alloc->parse_seq_len = 0; + #ifdef GGML_ALLOCATOR_DEBUG +- /*.allocated_tensors = */ {0}, ++ memset(alloc->allocated_tensors, 0, sizeof(alloc->allocated_tensors)); + #endif +- }; + + ggml_allocr_reset(alloc); + diff --git a/scripts/ggml.c.patch b/scripts/ggml.c.patch new file mode 100644 index 0000000..a6e689b --- /dev/null +++ b/scripts/ggml.c.patch @@ -0,0 +1,60 @@ +--- cpp/ggml.c 2024-10-24 14:42:04.330406051 +0800 ++++ cpp/ggml.c 2024-10-24 15:55:29.873849304 +0800 +@@ -4712,18 +4712,16 @@ + + const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN); + +- *ctx = (struct ggml_context) { +- /*.mem_size =*/ mem_size, +- /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size), +- /*.mem_buffer_owned =*/ params.mem_buffer ? false : true, +- /*.no_alloc =*/ params.no_alloc, +- /*.no_alloc_save =*/ params.no_alloc, +- /*.n_objects =*/ 0, +- /*.objects_begin =*/ NULL, +- /*.objects_end =*/ NULL, +- /*.scratch =*/ { 0, 0, NULL, }, +- /*.scratch_save =*/ { 0, 0, NULL, }, +- }; ++ ctx->mem_size = mem_size; ++ ctx->mem_buffer = params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size); ++ ctx->mem_buffer_owned = params.mem_buffer ? false : true; ++ ctx->no_alloc = params.no_alloc; ++ ctx->no_alloc_save = params.no_alloc; ++ ctx->n_objects = 0; ++ ctx->objects_begin = NULL; ++ ctx->objects_end = NULL; ++ ctx->scratch = (struct ggml_scratch) { 0, 0, NULL }; ++ ctx->scratch_save = (struct ggml_scratch) { 0, 0, NULL }; + + GGML_ASSERT(ctx->mem_buffer != NULL); + +@@ -18481,18 +18479,16 @@ + struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, GGML_GRAPH_SIZE); + struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs); + +- *cgraph = (struct ggml_cgraph) { +- /*.n_nodes =*/ 0, +- /*.n_leafs =*/ 0, +- /*.nodes =*/ { NULL }, +- /*.grads =*/ { NULL }, +- /*.leafs =*/ { NULL }, +- /*.hash_table =*/ { NULL }, +- /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT, +- /*.perf_runs =*/ 0, +- /*.perf_cycles =*/ 0, +- /*.perf_time_us =*/ 0, +- }; ++ cgraph->n_nodes = 0; ++ cgraph->n_leafs = 0; ++ memset(cgraph->nodes, 0, sizeof(cgraph->nodes)); ++ memset(cgraph->grads, 0, sizeof(cgraph->grads)); ++ memset(cgraph->leafs, 0, sizeof(cgraph->leafs)); ++ memset(cgraph->visited_hash_table, 0, sizeof(cgraph->visited_hash_table)); ++ cgraph->order = GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT; ++ cgraph->perf_runs = 0; ++ cgraph->perf_cycles = 0; ++ cgraph->perf_time_us = 0; + + return cgraph; + }