From f0ec5242d554b418ccf9e0112f9a0edca28da4fa Mon Sep 17 00:00:00 2001 From: quzard <1191890118@qq.com> Date: Mon, 23 Dec 2024 11:51:00 +0800 Subject: [PATCH 01/12] set the logtail_mode flag to true for enterprise builds (#1976) --- core/app_config/AppConfig.cpp | 56 +++++++---------------------------- core/app_config/AppConfig.h | 1 - 2 files changed, 11 insertions(+), 46 deletions(-) diff --git a/core/app_config/AppConfig.cpp b/core/app_config/AppConfig.cpp index 87a9a1ac4c..27aa6116cf 100644 --- a/core/app_config/AppConfig.cpp +++ b/core/app_config/AppConfig.cpp @@ -40,11 +40,13 @@ using namespace std; -#define ILOGTAIL_PREFIX "ilogtail_" -#define ILOGTAIL_PIDFILE_SUFFIX ".pid" #define LOONGCOLLECTOR_PREFIX "loongcollector_" +#ifdef __ENTERPRISE__ +DEFINE_FLAG_BOOL(logtail_mode, "logtail mode", true); +#else DEFINE_FLAG_BOOL(logtail_mode, "logtail mode", false); +#endif DEFINE_FLAG_INT32(max_buffer_num, "max size", 40); DEFINE_FLAG_INT32(pub_max_buffer_num, "max size", 8); DEFINE_FLAG_INT32(pub_max_send_byte_per_sec, "the max send speed per sec, realtime thread", 20 * 1024 * 1024); @@ -423,11 +425,7 @@ string GetAgentLoggersPrefix() { } string GetAgentLogName() { - if (BOOL_FLAG(logtail_mode)) { - return "ilogtail.LOG"; - } else { - return "loongcollector.LOG"; - } + return "loongcollector.LOG"; } string GetObserverEbpfHostPath() { @@ -481,19 +479,11 @@ string GetContinuousPipelineConfigDir() { } string GetPluginLogName() { - if (BOOL_FLAG(logtail_mode)) { - return "logtail_plugin.LOG"; - } else { - return "go_plugin.LOG"; - } + return "go_plugin.LOG"; } std::string GetVersionTag() { - if (BOOL_FLAG(logtail_mode)) { - return "logtail_version"; - } else { - return "loongcollector_version"; - } + return "loongcollector_version"; } std::string GetGoPluginCheckpoint() { @@ -505,43 +495,19 @@ std::string GetGoPluginCheckpoint() { } std::string GetAgentName() { - if (BOOL_FLAG(logtail_mode)) { - return "ilogtail"; - } else { - return "loongcollector"; - } + return "loongcollector"; } std::string GetMonitorInfoFileName() { - if (BOOL_FLAG(logtail_mode)) { - return "logtail_monitor_info"; - } else { - return "loongcollector_monitor_info"; - } + return "loongcollector_monitor_info"; } std::string GetSymLinkName() { - if (BOOL_FLAG(logtail_mode)) { - return GetProcessExecutionDir() + "ilogtail"; - } else { - return GetProcessExecutionDir() + "loongcollector"; - } -} - -std::string GetPidFileName() { - if (BOOL_FLAG(logtail_mode)) { - return GetProcessExecutionDir() + ILOGTAIL_PREFIX + ILOGTAIL_VERSION + ILOGTAIL_PIDFILE_SUFFIX; - } else { - return GetAgentRunDir() + "loongcollector.pid"; - } + return GetProcessExecutionDir() + "loongcollector"; } std::string GetAgentPrefix() { - if (BOOL_FLAG(logtail_mode)) { - return ILOGTAIL_PREFIX; - } else { - return LOONGCOLLECTOR_PREFIX; - } + return LOONGCOLLECTOR_PREFIX; } AppConfig::AppConfig() { diff --git a/core/app_config/AppConfig.h b/core/app_config/AppConfig.h index a1f0af7ec5..609c4379f6 100644 --- a/core/app_config/AppConfig.h +++ b/core/app_config/AppConfig.h @@ -60,7 +60,6 @@ std::string GetGoPluginCheckpoint(); std::string GetAgentName(); std::string GetMonitorInfoFileName(); std::string GetSymLinkName(); -std::string GetPidFileName(); std::string GetAgentPrefix(); template From cacbf206cf66307819992b8fe393f8c36086ac0a Mon Sep 17 00:00:00 2001 From: Tom Yu Date: Mon, 23 Dec 2024 17:11:51 +0800 Subject: [PATCH 02/12] fix: add check future object valid before wait on it (#1986) --- core/common/http/AsynCurlRunner.cpp | 3 +++ core/common/timer/Timer.cpp | 3 +++ core/monitor/SelfMonitorServer.cpp | 3 +++ core/unittest/polling/CMakeLists.txt | 6 +++--- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/core/common/http/AsynCurlRunner.cpp b/core/common/http/AsynCurlRunner.cpp index dccfa4c2e8..ee9e514137 100644 --- a/core/common/http/AsynCurlRunner.cpp +++ b/core/common/http/AsynCurlRunner.cpp @@ -39,6 +39,9 @@ bool AsynCurlRunner::Init() { void AsynCurlRunner::Stop() { mIsFlush = true; future_status s = mThreadRes.wait_for(chrono::seconds(1)); + if (!mThreadRes.valid()) { + return; + } if (s == future_status::ready) { LOG_INFO(sLogger, ("async curl runner", "stopped successfully")); } else { diff --git a/core/common/timer/Timer.cpp b/core/common/timer/Timer.cpp index 08ed7ed12c..8a99fc4348 100644 --- a/core/common/timer/Timer.cpp +++ b/core/common/timer/Timer.cpp @@ -34,6 +34,9 @@ void Timer::Stop() { mIsThreadRunning = false; } mCV.notify_one(); + if (!mThreadRes.valid()) { + return; + } future_status s = mThreadRes.wait_for(chrono::seconds(1)); if (s == future_status::ready) { LOG_INFO(sLogger, ("timer", "stopped successfully")); diff --git a/core/monitor/SelfMonitorServer.cpp b/core/monitor/SelfMonitorServer.cpp index d4a75e141a..42433dd928 100644 --- a/core/monitor/SelfMonitorServer.cpp +++ b/core/monitor/SelfMonitorServer.cpp @@ -64,6 +64,9 @@ void SelfMonitorServer::Stop() { mIsThreadRunning = false; } mStopCV.notify_one(); + if (!mThreadRes.valid()) { + return; + } future_status s = mThreadRes.wait_for(chrono::seconds(1)); if (s == future_status::ready) { LOG_INFO(sLogger, ("self-monitor", "stopped successfully")); diff --git a/core/unittest/polling/CMakeLists.txt b/core/unittest/polling/CMakeLists.txt index 80fe2a46c1..23843d1891 100644 --- a/core/unittest/polling/CMakeLists.txt +++ b/core/unittest/polling/CMakeLists.txt @@ -18,8 +18,8 @@ project(polling_unittest) # add_executable(polling_unittest PollingUnittest.cpp) # target_link_libraries(polling_unittest ${UT_BASE_TARGET}) -# add_executable(polling_preserved_dir_depth_unittest PollingPreservedDirDepthUnittest.cpp) -# target_link_libraries(polling_preserved_dir_depth_unittest ${UT_BASE_TARGET}) +add_executable(polling_preserved_dir_depth_unittest PollingPreservedDirDepthUnittest.cpp) +target_link_libraries(polling_preserved_dir_depth_unittest ${UT_BASE_TARGET}) include(GoogleTest) -# gtest_discover_tests(polling_preserved_dir_depth_unittest) \ No newline at end of file +gtest_discover_tests(polling_preserved_dir_depth_unittest) \ No newline at end of file From 82aed458b10bcf6dbc923509e42f0dabeb7f8d95 Mon Sep 17 00:00:00 2001 From: Takuka0311 <1914426213@qq.com> Date: Thu, 26 Dec 2024 18:45:12 +0800 Subject: [PATCH 03/12] init (#1993) --- docs/cn/SUMMARY.md | 12 +- docs/cn/configuration/collection-config.md | 8 +- docs/cn/developer-guide/data-model-cpp.md | 3 + .../{data-model.md => data-model-golang.md} | 5 +- .../plugin-debug/plugin-self-monitor-guide.md | 2 +- .../metrics/how-to-add-internal-metrics.md | 9 + .../how-to-collect-internal-metrics.md | 66 ++++++ .../metrics/internal-metrics-description.md | 196 ++++++++++++++++++ .../cn/plugins/flusher/native/flusher-file.md | 73 +++++++ docs/cn/plugins/input/inputs.md | 53 ++--- .../input/native/input-internal-metrics.md | 87 ++++++++ docs/cn/plugins/overview.md | 2 + 12 files changed, 481 insertions(+), 35 deletions(-) create mode 100644 docs/cn/developer-guide/data-model-cpp.md rename docs/cn/developer-guide/{data-model.md => data-model-golang.md} (97%) create mode 100644 docs/cn/developer-guide/self-monitor/metrics/how-to-add-internal-metrics.md create mode 100644 docs/cn/developer-guide/self-monitor/metrics/how-to-collect-internal-metrics.md create mode 100644 docs/cn/developer-guide/self-monitor/metrics/internal-metrics-description.md create mode 100644 docs/cn/plugins/flusher/native/flusher-file.md create mode 100644 docs/cn/plugins/input/native/input-internal-metrics.md diff --git a/docs/cn/SUMMARY.md b/docs/cn/SUMMARY.md index 5bd39bf545..6fa512a6ca 100644 --- a/docs/cn/SUMMARY.md +++ b/docs/cn/SUMMARY.md @@ -47,6 +47,7 @@ * [eBPF网络安全数据](plugins/input/native/input-network-security.md) * [eBPF文件安全数据](plugins/input/native/input-file-security.md) * [eBPF网络调用数据](plugins/input/native/metric-observer.md) + * [自监控指标数据](plugins/input/native/input-internal-metrics.md) * 扩展输入插件 * [容器标准输出](plugins/input/extended/service-docker-stdout.md) * [脚本执行数据](plugins/input/extended/input-command.md) @@ -111,6 +112,7 @@ * [什么是输出插件](plugins/flusher/flushers.md) * 原生输出插件 * [SLS](plugins/flusher/native/flusher-sls.md) + * [本地文件](plugins/flusher/native/flusher-file.md) * [【Debug】Blackhole](plugins/flusher/native/flusher-blackhole.md) * [多Flusher路由](plugins/flusher/native/router.md) * 扩展输出插件 @@ -140,7 +142,8 @@ * [开发环境](developer-guide/development-environment.md) * [代码风格](developer-guide/codestyle.md) -* [数据模型](developer-guide/data-model.md) +* [数据模型(C++)](developer-guide/data-model-cpp.md) +* [数据模型(Golang)](developer-guide/data-model-golang.md) * 日志协议 * [什么是日志协议](developer-guide/log-protocol/log-protocol.md) * [协议转换](developer-guide/log-protocol/converter.md) @@ -149,6 +152,11 @@ * [sls协议](developer-guide/log-protocol/protocol-spec/sls.md) * [单条协议](developer-guide/log-protocol/protocol-spec/custom-single.md) * [raw协议](developer-guide/log-protocol/protocol-spec/raw.md) +* 自监控 + * 指标 + * [自监控指标说明](developer-guide/self-monitor/metrics/internal-metrics-description.md) + * [如何收集自监控指标](developer-guide/self-monitor/metrics/how-to-collect-internal-metrics.md) + * [如何添加自监控指标](developer-guide/self-monitor/metrics/how-to-add-internal-metrics.md) * 插件开发 * [开源插件开发引导](developer-guide/plugin-development/plugin-development-guide.md) * 原生插件开发 @@ -168,7 +176,7 @@ * [插件文档规范](developer-guide/plugin-development/plugin-docs/plugin-doc-templete.md) * 插件调试 * [Logger接口](developer-guide/plugin-development/plugin-debug/logger-api.md) - * [自监控指标接口](developer-guide/plugin-development/plugin-debug/plugin-self-monitor-guide.md) + * [Golang 自监控指标接口](developer-guide/plugin-development/plugin-debug/plugin-self-monitor-guide.md) * [纯插件模式启动](developer-guide/plugin-development/plugin-debug/pure-plugin-start.md) * 测试 * [单元测试](developer-guide/test/unit-test.md) diff --git a/docs/cn/configuration/collection-config.md b/docs/cn/configuration/collection-config.md index b6b26190a2..14f90fc05a 100644 --- a/docs/cn/configuration/collection-config.md +++ b/docs/cn/configuration/collection-config.md @@ -1,6 +1,6 @@ # 采集配置 -`iLogtail`流水线是通过采集配置文件来定义的,每一个采集配置文件对应一条流水线。 +`LoongCollector`流水线是通过采集配置文件来定义的,每一个采集配置文件对应一条流水线。 ## 格式 @@ -24,11 +24,11 @@ ## 组织形式 -本地的采集配置文件默认均存放在`./config/local`目录下,每个采集配置一个文件,文件名即为采集配置的名称。 +本地的采集配置文件默认均存放在`./conf/continuous_pipeline_config/local`目录下,每个采集配置一个文件,文件名即为采集配置的名称。 ## 热加载 -采集配置文件支持热加载,当您在`./config/local`目录下新增或修改已有配置文件,iLogtail将自动感知并重新加载配置。生效等待时间最长默认为10秒,可通过启动参数`config_scan_interval`进行调整。 +采集配置文件支持热加载,当您在`./conf/continuous_pipeline_config/local`目录下新增或修改已有配置文件,LoongCollector 将自动感知并重新加载配置。生效等待时间最长默认为10秒,可通过启动参数`config_scan_interval`进行调整。 ## 示例 @@ -52,4 +52,4 @@ flushers: OnlyStdout: true ``` -其它常见的采集配置可参考[`example_config`](../../../example_config/)目录. +其它常见的采集配置可参考源代码中的[`example_config`](https://github.com/alibaba/loongcollector/tree/main/example_config)目录. diff --git a/docs/cn/developer-guide/data-model-cpp.md b/docs/cn/developer-guide/data-model-cpp.md new file mode 100644 index 0000000000..224bca7112 --- /dev/null +++ b/docs/cn/developer-guide/data-model-cpp.md @@ -0,0 +1,3 @@ +# 数据模型(C++) + +【文档施工中,请耐心等待】 diff --git a/docs/cn/developer-guide/data-model.md b/docs/cn/developer-guide/data-model-golang.md similarity index 97% rename from docs/cn/developer-guide/data-model.md rename to docs/cn/developer-guide/data-model-golang.md index 3e2185835d..d115ed30c2 100644 --- a/docs/cn/developer-guide/data-model.md +++ b/docs/cn/developer-guide/data-model-golang.md @@ -1,6 +1,7 @@ -# 数据模型 +# 数据模型(Golang) + +LoongCollector Golang 部分目前支持 `SLS Log Protocol` 和 `Pipeline Event` 两种数据模型,两种模型的描述和对比如下: -LoongCollector 目前支持 `SLS Log Protocol` 和 `Pipeline Event` 两种数据模型,两种模型的描述和对比如下: | | SLS Log Protocol | Pipeline Event | | ---- | ---- | ---- | | 描述 | SLS 日志的专用处理结构 | 可扩展的可观测性数据模型,支持Metrics、Trace、Logging、Bytes、Profile等 | diff --git a/docs/cn/developer-guide/plugin-development/plugin-debug/plugin-self-monitor-guide.md b/docs/cn/developer-guide/plugin-development/plugin-debug/plugin-self-monitor-guide.md index 6b3f56c98f..ca054f9f90 100644 --- a/docs/cn/developer-guide/plugin-development/plugin-debug/plugin-self-monitor-guide.md +++ b/docs/cn/developer-guide/plugin-development/plugin-debug/plugin-self-monitor-guide.md @@ -1,4 +1,4 @@ -# 插件自监控接口 +# Golang 插件自监控接口 LoongCollector 提供了指标接口,可以方便地为插件增加一些自监控指标,目前支持Counter,Gauge,String,Latency等类型。 diff --git a/docs/cn/developer-guide/self-monitor/metrics/how-to-add-internal-metrics.md b/docs/cn/developer-guide/self-monitor/metrics/how-to-add-internal-metrics.md new file mode 100644 index 0000000000..1d8ef78f24 --- /dev/null +++ b/docs/cn/developer-guide/self-monitor/metrics/how-to-add-internal-metrics.md @@ -0,0 +1,9 @@ +# 如何添加自监控指标 + +## C++ 添加自监控指标 + +【文档施工中,请耐心等待】 + +## Golang 添加自监控指标 + +参见[Golang 自监控指标接口](../../plugin-development/plugin-debug/plugin-self-monitor-guide.md) \ No newline at end of file diff --git a/docs/cn/developer-guide/self-monitor/metrics/how-to-collect-internal-metrics.md b/docs/cn/developer-guide/self-monitor/metrics/how-to-collect-internal-metrics.md new file mode 100644 index 0000000000..739c4cc35a --- /dev/null +++ b/docs/cn/developer-guide/self-monitor/metrics/how-to-collect-internal-metrics.md @@ -0,0 +1,66 @@ +# 如何收集自监控指标 + +LoongCollector目前提供了暴露自监控指标数据的Input插件,可以通过配置包含该插件的Pipeline,实现自监控数据的收集。 + +## 创建采集配置Pipeline + +1. 选择输入插件[自监控指标数据](../../../plugins/input/native/input-internal-metrics.md)。这里需要注意一点,就是`input_internal_metrics`插件输出的数据格式是 C++ 的多值 MetricEvent 格式(UntypedMultiDoubleValues),需要确保数据的下游支持这种格式数据的处理。 +2. 选择输出插件[本地文件](../../../plugins/flusher/native/flusher-file.md)。该插件为原生输出插件,使用的`Serializer`支持 [C++ 的多值 MetricEvent 格式(UntypedMultiDoubleValues)的解析](https://github.com/alibaba/loongcollector/blob/cacbf206cf66307819992b8fe393f8c36086ac0a/core/pipeline/serializer/JsonSerializer.cpp#L84),所以可以直接使用来输出自监控指标数据。我们输出到`self_monitor/self_metrics.log`文件,方便查看与分析。原生输出插件与`Serializer`的关系请参见[如何开发原生Flusher插件](../../plugin-development/native-plugins/how-to-write-native-flusher-plugins.md)。 +3. 最终的yaml如下。我们将其保存到 LoongCollector 的运行目录下的 `conf/continuous_pipeline_config/local`目录, LoongCollector 会自动加载该配置。 + + ```yaml + enable: true + inputs: + - Type: input_internal_metrics + Agent: + Enable: true + Interval: 1 + Runner: + Enable: true + Interval: 1 + Pipeline: + Enable: true + Interval: 1 + Plugin: + Enable: true + Interval: 10 + Component: + Enable: true + Interval: 10 + PluginSource: + Enable: true + Interval: 10 + flushers: + - Type: flusher_file + FilePath: self_monitor/self_metrics.log + ``` + +## 查看自监控指标数据 + +采集配置生效后,大约一分钟,可以看到自监控指标数据输出到`self_monitor/self_metrics.log`文件。文件中,每行均为一条json格式的指标。下面是其中一行 agent 级指标展开后的参考样例。`__name__`是指标类型,`__labels__`是标识该条指标对应的对象的标签,`__time__`是指标输出的时间戳,`__value__`是具体指标的值的map。 + +```json +{ + "__labels__":{ + "hostname":"xxx", + "instance_id":"xxx", + "os":"Linux", + "os_detail":"xxx", + "project":"", + "start_time":"2024-12-26 06:20:25", + "uuid":"xxx", + "version":"0.0.1" + }, + "__name__":"agent", + "__source__":"xxx.xxx.xxx.xxx", + "__time__":1735194085, + "__value__":{ + "cpu":0.002, + "go_memory_used_mb":0.0, + "go_routines_total":0.0, + "memory_used_mb":25.0, + "open_fd_total":0.0, + "pipeline_config_total":1.0 + } +} +``` diff --git a/docs/cn/developer-guide/self-monitor/metrics/internal-metrics-description.md b/docs/cn/developer-guide/self-monitor/metrics/internal-metrics-description.md new file mode 100644 index 0000000000..a3cea4dd72 --- /dev/null +++ b/docs/cn/developer-guide/self-monitor/metrics/internal-metrics-description.md @@ -0,0 +1,196 @@ +# 自监控指标说明 + +## 指标类型 + +在 LoongCollector 中,有如下几种指标类型: + +* 进程级(agent):LoongCollector的整体状态,包含一些Cpu、Mem等信息。 +* Runner级(runner):LoongCollector内部的独立线程的单例,通常是一整个功能模块,例如file_server、processor_runner等。runner级指标记录的就是这些单例的状态。 +* 配置级(pipeline):每个采集配置的整体状态,例如一条采集配置的总输入、输出、延迟。 +* 组件级(component):每个采集配置运行过程中,会伴随一些组件的使用,例如Batcher、Compressor等。component级指标记录的就是这些组件的状态。 +* 插件级(plugin):每个配置中单个插件的详细指标,例如某个Processor插件的输入、输出、解析失败率。 +* 数据源级(plugin_source):每个配置的数据源的指标,例如文件采集时,每个源文件会有对应的数据,包含文件大小、读取的offset等。 + +## 指标格式 + +LoongCollector的指标为多值Metric结构。具体来说,对于某一个确定的对象(例如一条Pipeline、一个插件、一个数据源),它会存在一条指标记录,里面会以多个label来唯一标识它,并记录多个与它相关的指标值。 + +下面是一条指标的样例。`__name__`是指标类型,`__labels__`是标识该条指标对应的对象的标签,`__time__`是指标输出的时间戳,`__value__`是具体指标的值的map。 + +```json +{ + "__labels__":{ + "component_name":"process_queue", + "pipeline_name":"pipeline-demo", + "project":"", + "queue_type":"bounded" + }, + "__name__":"component", + "__time__":1735127390, + "__value__":{ + "fetch_times_total":6000.0, + "in_items_total":0.0, + "in_size_bytes":0.0, + "out_items_total":0.0, + "queue_size":0.0, + "queue_size_bytes":0.0, + "total_delay_ms":0.0, + "valid_fetch_times_total":0.0, + "valid_to_push_status":1.0 + } +} +``` + +## 指标解释 + +LoongCollector的指标较多,这里仅列举一部分重要指标作为说明,未涉及的指标有些是过于细节的内部实现监控,有些是存在变化的可能,可以参考[源代码](https://github.com/alibaba/loongcollector/tree/main/core/monitor/metric_constants)查看用途。 + +### Agent级指标 + +Agent级的指标记录了LoongCollector的整体状态,例如Cpu、Mem等,全局唯一。 + +常见Labels: + +| **Label名** | **含义** | **备注** | +| --- | --- | --- | +| instance_id | LoongCollector 的唯一标识 | | +| start_time | LoongCollector 的启动时间 | | +| hostname | LoongCollector 所在的机器名 | | +| os | LoongCollector 所处的操作系统 | | +| os_detail | LoongCollector 的系统详情 | | +| version | LoongCollector 的版本 | | + +常见Metric Key: + +| **Metric Key** | **含义** | **备注** | +| --- | --- | --- | +| cpu | LoongCollector 的cpu使用核数 | | +| memory_used_mb | LoongCollector 的内存使用情况,单位为mb | | +| go_routines_total | LoongCollector Go 部分启动的go routine数量 | k8s场景或使用扩展插件时会启动 LoongCollector Go 部分 | +| go_memory_used_mb | LoongCollector Go 部分占用的内存,单位为mb | k8s场景或使用扩展插件时会启动 LoongCollector Go 部分 | +| open_fd_total | LoongCollector 打开的文件描述符数量 | | +| pipeline_config_total | LoongCollector 应用的采集配置数量 | | + +### Runner级指标 + +Runner 是 LoongCollector 内部的独立线程的单例,通常是一整个功能模块,例如file_server、processor_runner等。 + +常见Labels: + +| **Label名** | **含义** | **备注** | +| --- | --- | --- | +| runner_name | Runner 的名称 | 常见的runner有:file_server、processor_runner、flusher_runner、http_sink等 | +| thread_no | Runner 的线程序号 | | + +常见Metric Key: + +| **Metric Key** | **含义** | **备注** | +| --- | --- | --- | +| in_events_total | 当前统计周期内,进入 Runner 的 event 总数 | event 即 PipelineEvent 数据结构,基本可以认为是一条日志 | +| in_size_bytes | 当前统计周期内,进入 Runner 的数据大小,单位为字节 | 这里统计的是进入 Runner 的数据的大小,该数据可能是压缩过的,不能完全等价于 event 的数据大小 | +| last_run_time | Runner 上次执行任务的时间,格式为秒级时间戳 | | +| total_delay_ms | Runner 执行任务的总延迟,单位为毫秒 | | + +### Pipeline级指标 + +Pipeline 是 LoongCollector 的[采集配置](../../../configuration/collection-config.md),它的指标包含Pipeline的基础信息和吞吐量。 + +常见Labels: + +| **Label名** | **含义** | **备注** | +| --- | --- | --- | +| pipeline_name | 采集配置流水线名称 | | + +常见Metric Key: + +| **Metric Key** | **含义** | **备注** | +| --- | --- | --- | +| processor_in_events_total | 当前统计周期内,进入 Processor 的 event 总数 | | +| processor_in_size_bytes | 当前统计周期内,进入 Processor 的数据大小,单位为字节 | | +| processor_total_process_time_ms | 当前统计周期内,Processor 处理 event 总耗时,单位为毫秒 | | +| flusher_in_events_total | 当前统计周期内,进入 Flusher 的 event 总数 | | +| flusher_in_size_bytes | 当前统计周期内,进入 Flusher 的数据大小,单位为字节 | | +| flusher_total_package_time_ms | 当前统计周期内,Flusher 处理 event 总耗时,单位为毫秒 | | +| start_time | Pipeline 启动时间,格式为秒级时间戳 | Pipeline更新时,会重新启动,所以该指标可以用于判断 Pipeline 是否成功更新 | + +### Component级指标 + +组件是用于辅助Pipeline运行的对象,它们归属于Pipeline,却对外部不可见(外部可见、可配置的是Plugin)。组件的指标根据组件类型而不同,这里只列举一些重要的。 + +常见Labels: + +| **Label名** | **含义** | **备注** | +| --- | --- | --- | +| component_name | 组件名称 | 有:batcher,compressor,process_queue,router,sender_queue,serializer等。 | +| pipeline_name | 组件关联的采集配置流水线名称 | | +| flusher_plugin_id | 组件关联的Flusher插件ID | 部分组件会与Pipeline中的Flusher插件关联,例如 FlusherQueue、Bacther、Compressor等,他们的关系可以参考[如何开发原生Flusher插件](../../plugin-development/native-plugins/how-to-write-native-flusher-plugins.md)。 | + +常见Metric Key: + +| **Metric Key** | **含义** | **备注** | +| --- | --- | --- | +| in_events_total | 当前统计周期内,进入组件的 event 总数 | event 即 PipelineEvent 数据结构,基本可以认为是一条日志 | +| out_events_total | 当前统计周期内,流出组件的 event 总数 | event 即 PipelineEvent 数据结构,基本可以认为是一条日志 | +| discarded_events_total | 当前统计周期内,被丢弃的 event 总数 | event 即 PipelineEvent 数据结构,基本可以认为是一条日志 | +| in_items_total | 当前统计周期内,进入组件的 item 总数 | item 是一些数据结构的统称,需要根据具体组件判断,不一定对应一条日志 | +| out_items_total | 当前统计周期内,流出组件的 item 总数 | item 是一些数据结构的统称,需要根据具体组件判断,不一定对应一条日志 | +| discarded_items_total | 当前统计周期内,被丢弃的 item 总数 | item 是一些数据结构的统称,需要根据具体组件判断,不一定对应一条日志 | +| in_size_bytes | 当前统计周期内,进入组件的数据大小,单位为字节 | 这里统计的是进入 Runner 的数据的大小,该数据可能是压缩或特殊处理过的,不能完全等价于 event 的数据大小 | +| out_size_bytes | 当前统计周期内,流出组件的数据大小,单位为字节 | 这里统计的是流出 Runner 的数据的大小,该数据可能是压缩或特殊处理过的,不能完全等价于 event 的数据大小 | +| discarded_size_bytes | 当前统计周期内,被丢弃的数据大小,单位为字节 | 这里统计的是 Runner 丢弃的数据的大小,该数据可能是压缩或特殊处理过的,不能完全等价于 event 的数据大小 | +| total_delay_ms | 当前统计周期内,组件聚合/发送等的延时,单位为毫秒 | | +| total_process_time_ms | 当前统计周期内,组件处理总耗时,单位为毫秒 | | + +### Plugin级指标 + +一条采集配置Pipeline会包含一些[插件](../../../plugins/overview.md),每个插件在运行过程中都会产生一些指标。 + +常见Labels: + +| **Label名** | **含义** | **备注** | +| --- | --- | --- | +| plugin_type | 插件名 | | +| plugin_id | 插件id | 此ID按Pipeline内插件顺序生成,暂时只用于标识插件,没有其他含义 | +| pipeline_name | 插件所属的采集配置流水线名称 | | + +常见Metric Key: + +| **Metric Key** | **含义** | **备注** | +| --- | --- | --- | +| in_events_total | 当前统计周期内,进入插件的 event 总数 | event 即 PipelineEvent 数据结构,基本可以认为是一条日志 | +| out_events_total | 当前统计周期内,流出插件的 event 总数 | event 即 PipelineEvent 数据结构,基本可以认为是一条日志 | +| discarded_events_total | 当前统计周期内,被丢弃的 event 总数 | event 即 PipelineEvent 数据结构,基本可以认为是一条日志 | +| in_size_bytes | 当前统计周期内,进入插件的数据大小,单位为字节 | 这里统计的是进入 Runner 的数据的大小,该数据可能是压缩或特殊处理过的,不能完全等价于 event 的数据大小 | +| out_size_bytes | 当前统计周期内,流出插件的数据大小,单位为字节 | 这里统计的是流出 Runner 的数据的大小,该数据可能是压缩或特殊处理过的,不能完全等价于 event 的数据大小 | +| discarded_size_bytes | 当前统计周期内,被丢弃的数据大小,单位为字节 | 这里统计的是 Runner 丢弃的数据的大小,该数据可能是压缩或特殊处理过的,不能完全等价于 event 的数据大小 | +| total_delay_ms | 当前统计周期内,插件聚合/发送等的延时,单位为毫秒 | | +| total_process_time_ms | 当前统计周期内,插件处理总耗时,单位为毫秒 | | +| monitor_file_total | 当前统计周期内,插件监控的文件总数 | 仅限文件采集场景 | +| | | | + +### PluginSource级指标 + +这一级指标是标记数据源信息的,例如对于文件采集,被采集的文件的信息就会记录到PluginSource级指标中 + +常见Labels: + +| **Label名** | **含义** | **备注** | +| --- | --- | --- | +| file_dev | 被采集的文件设备号 | 仅限文件采集 | +| file_inode | 被采集的文件inode号 | 仅限文件采集 | +| file_name | 被采集的文件路径 | 仅限文件采集 | + +常见Metric Key: + +| **Metric Key** | **含义** | **备注** | +| --- | --- | --- | +| read_offset_bytes | 当前读取的文件读到的位置 | 仅限文件采集 | +| size_bytes | 当前读取的文件的大小 | 仅限文件采集 | + +## 获取自监控指标 + +请参见[如何收集自监控指标](how-to-collect-internal-metrics.md)。 + +## 添加自监控指标 + +请参见[如何添加自监控指标](how-to-add-internal-metrics.md)。 diff --git a/docs/cn/plugins/flusher/native/flusher-file.md b/docs/cn/plugins/flusher/native/flusher-file.md new file mode 100644 index 0000000000..46d9825e05 --- /dev/null +++ b/docs/cn/plugins/flusher/native/flusher-file.md @@ -0,0 +1,73 @@ +# 本地文件 + +## 简介 + +`flusher_file` `flusher`插件将采集到的数据写入本地文件中。flusher\_file插件使用[spdlog](https://github.com/gabime/spdlog)库实现,所以写入的文件具有部分日志文件的特征,例如存在大小限制、会自动轮转。 + +## 版本 + +[Alpha](../../stability-level.md) + +## 配置参数 + +| **参数** | **类型** | **是否必填** | **默认值** | **说明** | +| --- | --- | --- | --- | --- | +| Type | string | 是 | / | 插件类型。固定为flusher\_file。 | +| FilePath | string | 是 | / | 目标文件路径。写入的文件大小超过10M时会触发轮转,最多同时存在10个文件。 | + +## 样例 + +采集LoongCollector所有自监控指标,并将采集结果写到本地文件。 + +``` yaml +enable: true +inputs: + - Type: input_internal_metrics + Agent: + Enable: true + Interval: 1 + Runner: + Enable: true + Interval: 1 + Pipeline: + Enable: true + Interval: 1 + Plugin: + Enable: true + Interval: 10 + Component: + Enable: true + Interval: 10 + PluginSource: + Enable: true + Interval: 10 +flushers: + - Type: flusher_file + FilePath: self_monitor/self_metrics.log +``` + +输出到 LoongCollector 的 `self_monitor/self_metrics.log` 文件中,每行均为一条json格式的指标。下面是其中一行展开后的参考样例: + +```json +{ + "__labels__":{ + "component_name":"process_queue", + "pipeline_name":"pipeline-demo", + "project":"", + "queue_type":"bounded" + }, + "__name__":"component", + "__time__":1735127390, + "__value__":{ + "fetch_times_total":6000.0, + "in_items_total":0.0, + "in_size_bytes":0.0, + "out_items_total":0.0, + "queue_size":0.0, + "queue_size_bytes":0.0, + "total_delay_ms":0.0, + "valid_fetch_times_total":0.0, + "valid_to_push_status":1.0 + } +} +``` diff --git a/docs/cn/plugins/input/inputs.md b/docs/cn/plugins/input/inputs.md index 636684270d..324a92a510 100644 --- a/docs/cn/plugins/input/inputs.md +++ b/docs/cn/plugins/input/inputs.md @@ -17,13 +17,14 @@ | 名称 | 提供方 | 功能简介 | |------|--------|----------| -| [`input_file`](native/input-file.md)
文本日志 | SLS官方 | 文本采集。 | -| [`input_container_stdio`](native/input-container-stdio.md)
容器标准输出(原生插件) | SLS官方 | 从容器标准输出/标准错误流中采集日志。 | -| [`input_observer_network`](native/metric-observer.md)
eBPF网络调用数据 | SLS官方 | 支持从网络系统调用中收集四层网络调用,并借助网络解析模块,可以观测七层网络调用细节。 | -| [`input_file_security`](native/input-file-security.md)
文件安全数据 | SLS官方 | 文件安全数据采集。 | -| [`input_network_observer`](native/input-network-observer.md)
网络可观测数据 | SLS官方 | 网络可观测数据采集。 | -| [`input_network_security`](native/input-network-security.md)
网络安全数据 | SLS官方 | 网络安全数据采集。 | -| [`input_process_security`](native/input-process-security.md)
进程安全数据 | SLS官方 | 进程安全数据采集。 | +| `input_file`
[文本日志](native/input-file.md) | SLS官方 | 文本采集。 | +| `input_container_stdio`
[容器标准输出](native/input-container-stdio.md) | SLS官方 | 从容器标准输出/标准错误流中采集日志。 | +| `input_observer_network`
[eBPF网络调用数据](native/metric-observer.md) | SLS官方 | 支持从网络系统调用中收集四层网络调用,并借助网络解析模块,可以观测七层网络调用细节。 | +| `input_file_security`
[文件安全数据](native/input-file-security.md) | SLS官方 | 文件安全数据采集。 | +| `input_network_observer`
[网络可观测数据](native/input-network-observer.md) | SLS官方 | 网络可观测数据采集。 | +| `input_network_security`
[网络安全数据](native/input-network-security.md) | SLS官方 | 网络安全数据采集。 | +| `input_process_security`
[进程安全数据](native/input-process-security.md) | SLS官方 | 进程安全数据采集。 | +| `input_internal_metrics`
[自监控指标数据](native/input-internal-metrics.md) | SLS官方 | 导出自监控指标数据。 | ### 扩展插件 @@ -36,25 +37,25 @@ | 名称 | 提供方 | 功能简介 | |------|--------|----------| -| [`input_command`](extended/input-command.md)
脚本执行数据 | 社区
[`didachuxing`](https://github.com/didachuxing) | 采集脚本执行数据。 | -| [`input_docker_stdout`](extended/service-docker-stdout.md)
容器标准输出 | SLS官方 | 从容器标准输出/标准错误流中采集日志。 | -| [`metric_debug_file`](extended/metric-debug-file.md)
文本日志(debug) | SLS官方 | 用于调试的读取文件内容的插件。 | -| [`metric_input_example`](extended/metric-input-example.md)
MetricInput示例插件 | SLS官方 | MetricInput示例插件。 | -| [`metric_meta_host`](extended/metric-meta-host.md)
主机Meta数据 | SLS官方 | 主机Meta数据。 | -| [`metric_mock`](extended/metric-mock.md)
Mock数据-Metric | SLS官方 | 生成metric模拟数据的插件。 | -| [`metric_system_v2`](extended/metric-system.md)
主机监控数据 | SLS官方 | 主机监控数据。 | -| [`service_canal`](extended/service-canal.md)
MySQL Binlog | SLS官方 | 将MySQL Binlog输入到iLogtail。 | -| [`service_go_profile`](extended/service-goprofile.md)
GO Profile | SLS官方 | 采集Golang pprof 性能数据。 | -| [`service_gpu_metric`](extended/service-gpu.md)
GPU数据 | SLS官方 | 支持收集英伟达GPU指标。 | -| [`service_http_server`](extended/service-http-server.md)
HTTP数据 | SLS官方 | 接收来自unix socket、http/https、tcp的请求,并支持sls协议、otlp等多种协议。 | -| [`service_input_example`](extended/service-input-example.md)
ServiceInput示例插件 | SLS官方 | ServiceInput示例插件。 | -| [`service_journal`](extended/service-journal.md)
Journal数据 | SLS官方 | 从原始的二进制文件中采集Linux系统的Journal(systemd)日志。 | -| [`service_kafka`](extended/service-kafka.md)
Kafka | SLS官方 | 将Kafka数据输入到iLogtail。 | -| [`service_mock`](extended/service-mock.md)
Mock数据-Service | SLS官方 | 生成service模拟数据的插件。 | -| [`service_mssql`](extended/service-mssql.md)
SqlServer查询数据 | SLS官方 | 将Sql Server数据输入到iLogtail。 | -| [`service_otlp`](extended/service-otlp.md)
OTLP数据 | 社区
[`Zhu Shunjia`](https://github.com/shunjiazhu) | 通过http/grpc协议,接收OTLP数据。 | -| [`service_pgsql`](extended/service-pgsql.md)
PostgreSQL查询数据 | SLS官方 | 将PostgresSQL数据输入到iLogtail。 | -| [`service_syslog`](extended/service-syslog.md)
Syslog数据 | SLS官方 | 采集syslog数据。 | +| `input_command`
[脚本执行数据](extended/input-command.md) | 社区
[didachuxing](https://github.com/didachuxing) | 采集脚本执行数据。 | +| `input_docker_stdout`
[容器标准输出](extended/service-docker-stdout.md) | SLS官方 | 从容器标准输出/标准错误流中采集日志。 | +| `metric_debug_file`
[文本日志(debug)](extended/metric-debug-file.md) | SLS官方 | 用于调试的读取文件内容的插件。 | +| `metric_input_example`
[MetricInput示例插件](extended/metric-input-example.md) | SLS官方 | MetricInput示例插件。 | +| `metric_meta_host`
[主机Meta数据](extended/metric-meta-host.md) | SLS官方 | 主机Meta数据。 | +| `metric_mock`
[Mock数据-Metric](extended/metric-mock.md) | SLS官方 | 生成metric模拟数据的插件。 | +| `metric_system_v2`
[主机监控数据](extended/metric-system.md) | SLS官方 | 主机监控数据。 | +| `service_canal`
[MySQL Binlog](extended/service-canal.md) | SLS官方 | 将MySQL Binlog输入到iLogtail。 | +| `service_go_profile`
[GO Profile](extended/service-goprofile.md) | SLS官方 | 采集Golang pprof 性能数据。 | +| `service_gpu_metric`
[GPU数据](extended/service-gpu.md) | SLS官方 | 支持收集英伟达GPU指标。 | +| `service_http_server`
[HTTP数据](extended/service-http-server.md) | SLS官方 | 接收来自unix socket、http/https、tcp的请求,并支持sls协议、otlp等多种协议。 | +| `service_input_example`
[ServiceInput示例插件](extended/service-input-example.md) | SLS官方 | ServiceInput示例插件。 | +| `service_journal`
[Journal数据](extended/service-journal.md) | SLS官方 | 从原始的二进制文件中采集Linux系统的Journal(systemd)日志。 | +| `service_kafka`
[Kafka](extended/service-kafka.md) | SLS官方 | 将Kafka数据输入到iLogtail。 | +| `service_mock`
[Mock数据-Service](extended/service-mock.md) | SLS官方 | 生成service模拟数据的插件。 | +| `service_mssql`
[SqlServer查询数据](extended/service-mssql.md) | SLS官方 | 将Sql Server数据输入到iLogtail。 | +| `service_otlp`
[OTLP数据](extended/service-otlp.md) | 社区
[Zhu Shunjia](https://github.com/shunjiazhu) | 通过http/grpc协议,接收OTLP数据。 | +| `service_pgsql`
[PostgreSQL查询数据](extended/service-pgsql.md) | SLS官方 | 将PostgresSQL数据输入到iLogtail。 | +| `service_syslog`
[Syslog数据](extended/service-syslog.md) | SLS官方 | 采集syslog数据。 | ## 插件特性对比 diff --git a/docs/cn/plugins/input/native/input-internal-metrics.md b/docs/cn/plugins/input/native/input-internal-metrics.md new file mode 100644 index 0000000000..10b4495b01 --- /dev/null +++ b/docs/cn/plugins/input/native/input-internal-metrics.md @@ -0,0 +1,87 @@ +# 自监控指标数据 + +## 简介 + +`input_internal_metrics` 插件收集 LoongCollector 自身运行时的指标数据,并以[多值MetricEvent](../../../developer-guide/data-model-cpp.md)的格式暴露出去。 + +## 版本 + +[Beta](../../stability-level.md) + +## 配置参数 + +关于具体指标的详情,请参见[自监控指标说明](../../../developer-guide/self-monitor/metrics/internal-metrics-description.md)。 + +| **参数** | **类型** | **是否必填** | **默认值** | **说明** | +| --- | --- | --- | --- | --- | +| Type | string | 是 | / | 插件类型。固定为input\_internal\_metrics。 | +| Agent | InternalMetricRule | 否 | / | 进程级指标(LoongCollector的基本信息、资源占用率等进程级别信息)的采集规则 | +| Runner | InternalMetricRule | 否 | / | Runner级指标(LoongCollector内重要单例的运行状态)的采集规则 | +| Pipeline | InternalMetricRule | 否 | / | Pipeline级指标(单个采集配置流水线的状态)的采集规则 | +| PluginSource | InternalMetricRule | 否 | / | 数据源级(例如被采集的文件的信息)的采集规则 | +| Plugin | InternalMetricRule | 否 | / | 插件级指标(单个插件的状态、吞吐量等信息)的采集规则 | +| Component | InternalMetricRule | 否 | / | 组件级指标(为了辅助Pipeline等运行的组件的状态)的采集规则 | + +InternalMetricRule 的结构如下: + +| **参数** | **类型** | **是否必填** | **默认值** | **说明** | +| --- | --- | --- | --- | --- | +| Enable | bool | 否 | true | 是否开启。默认开启。 | +| Interval | int | 否 | 10 | 统计间隔,单位为分钟,表示每隔指定时间输出一次该类型的指标。 | + +## 样例 + +采集LoongCollector所有自监控指标,并将采集结果写到本地文件。 + +``` yaml +enable: true +inputs: + - Type: input_internal_metrics + Agent: + Enable: true + Interval: 1 + Runner: + Enable: true + Interval: 1 + Pipeline: + Enable: true + Interval: 1 + Plugin: + Enable: true + Interval: 10 + Component: + Enable: true + Interval: 10 + PluginSource: + Enable: true + Interval: 10 +flushers: + - Type: flusher_file + FilePath: self_monitor/self_metrics.log +``` + +输出到 LoongCollector 的 `self_monitor/self_metrics.log` 文件中,每行均为一条json格式的指标。下面是其中一行展开后的参考样例: + +```json +{ + "__labels__":{ + "component_name":"process_queue", + "pipeline_name":"pipeline-demo", + "project":"", + "queue_type":"bounded" + }, + "__name__":"component", + "__time__":1735127390, + "__value__":{ + "fetch_times_total":6000.0, + "in_items_total":0.0, + "in_size_bytes":0.0, + "out_items_total":0.0, + "queue_size":0.0, + "queue_size_bytes":0.0, + "total_delay_ms":0.0, + "valid_fetch_times_total":0.0, + "valid_to_push_status":1.0 + } +} +``` diff --git a/docs/cn/plugins/overview.md b/docs/cn/plugins/overview.md index a9a1f1838d..7348b18fe3 100644 --- a/docs/cn/plugins/overview.md +++ b/docs/cn/plugins/overview.md @@ -13,6 +13,7 @@ | `input_ebpf_network_security`
[eBPF网络安全数据](input/native/input-network-security.md) | SLS官方 | eBPF网络安全数据采集。 | | `input_ebpf_process_security`
[eBPF进程安全数据](input/native/input-process-security.md) | SLS官方 | eBPF进程安全数据采集。 | | `input_observer_network`
[eBPF网络调用数据](input/native/metric-observer.md) | SLS官方 | 支持从网络系统调用中收集四层网络调用,并借助网络解析模块,可以观测七层网络调用细节。 | +| `input_internal_metrics`
[自监控指标数据](input/native/input-internal-metrics.md) | SLS官方 | 导出自监控指标数据。 | ### 扩展插件 @@ -97,6 +98,7 @@ | 名称 | 提供方 | 简介 | | --- | --- | --- | | `flusher_sls`
[SLS](flusher/native/flusher-sls.md) | SLS官方 | 将采集到的数据输出到SLS。 | +| `flusher_file`
[本地文件](flusher/native/flusher-file.md) | SLS官方 | 将采集到的数据写到本地文件。 | | `flusher_blackhole`
[原生Flusher测试](flusher/native/flusher-blackhole.md) | SLS官方 | 直接丢弃采集的事件,属于原生输出插件,主要用于测试。 | ### 扩展插件 From 2dc76058bbbba7062c17ac3befa8a152678acc95 Mon Sep 17 00:00:00 2001 From: linrunqi08 <90741255+linrunqi08@users.noreply.github.com> Date: Fri, 27 Dec 2024 16:49:39 +0800 Subject: [PATCH 04/12] Optimize the limiter code to meet better isolation and recovery scenarios (#1985) --- core/app_config/AppConfig.cpp | 25 ++++ core/app_config/AppConfig.h | 17 ++- core/pipeline/limiter/ConcurrencyLimiter.cpp | 92 ++++++++----- core/pipeline/limiter/ConcurrencyLimiter.h | 42 +++--- .../queue/BoundedSenderQueueInterface.cpp | 7 - .../queue/BoundedSenderQueueInterface.h | 1 - core/plugin/flusher/sls/FlusherSLS.cpp | 36 ++--- core/runner/FlusherRunner.cpp | 4 +- core/runner/sink/http/HttpSink.cpp | 2 +- core/runner/sink/http/HttpSinkRequest.h | 7 +- core/sdk/Client.cpp | 4 +- core/sdk/Common.h | 2 + .../pipeline/ConcurrencyLimiterUnittest.cpp | 125 +++++++++++++----- .../unittest/sender/FlusherRunnerUnittest.cpp | 2 + 14 files changed, 246 insertions(+), 120 deletions(-) diff --git a/core/app_config/AppConfig.cpp b/core/app_config/AppConfig.cpp index 27aa6116cf..1410714e3a 100644 --- a/core/app_config/AppConfig.cpp +++ b/core/app_config/AppConfig.cpp @@ -175,6 +175,7 @@ DEFINE_FLAG_STRING(logtail_snapshot_dir, "snapshot dir on local disk", "snapshot DEFINE_FLAG_STRING(logtail_profile_snapshot, "reader profile on local disk", "logtail_profile_snapshot"); DEFINE_FLAG_STRING(ilogtail_config_env_name, "config file path", "ALIYUN_LOGTAIL_CONFIG"); + #if defined(__linux__) DEFINE_FLAG_STRING(adhoc_check_point_file_dir, "", "/tmp/logtail_adhoc_checkpoint"); #elif defined(_MSC_VER) @@ -194,6 +195,21 @@ DEFINE_FLAG_STRING(sls_observer_ebpf_host_path, namespace logtail { constexpr int32_t kDefaultMaxSendBytePerSec = 25 * 1024 * 1024; // the max send speed per sec, realtime thread + +// 全局并发度保留余量百分比 +const double GLOBAL_CONCURRENCY_FREE_PERCENTAGE_FOR_ONE_REGION = 0.5; +// 单地域并发度最小值 +const int32_t MIN_SEND_REQUEST_CONCURRENCY = 15; +// 单地域并发度最大值 +const int32_t MAX_SEND_REQUEST_CONCURRENCY = 80; +// 并发度统计数量&&时间间隔 +const uint32_t CONCURRENCY_STATISTIC_THRESHOLD = 10; +const uint32_t CONCURRENCY_STATISTIC_INTERVAL_THRESHOLD_SECONDS = 3; +// 并发度不回退百分比阈值 +const uint32_t NO_FALL_BACK_FAIL_PERCENTAGE = 10; +// 并发度慢回退百分比阈值 +const uint32_t SLOW_FALL_BACK_FAIL_PERCENTAGE = 40; + std::string AppConfig::sLocalConfigDir = "local"; void CreateAgentDir() { try { @@ -1161,6 +1177,15 @@ void AppConfig::LoadResourceConf(const Json::Value& confJson) { mBindInterface.clear(); LOG_INFO(sLogger, ("bind_interface", mBindInterface)); } + + // mSendRequestConcurrency was limited + if (mSendRequestConcurrency < MIN_SEND_REQUEST_CONCURRENCY) { + mSendRequestConcurrency = MIN_SEND_REQUEST_CONCURRENCY; + } + if (mSendRequestConcurrency > MAX_SEND_REQUEST_CONCURRENCY) { + mSendRequestConcurrency = MAX_SEND_REQUEST_CONCURRENCY; + } + mSendRequestGlobalConcurrency = mSendRequestConcurrency * (1 + GLOBAL_CONCURRENCY_FREE_PERCENTAGE_FOR_ONE_REGION); } bool AppConfig::CheckAndResetProxyEnv() { diff --git a/core/app_config/AppConfig.h b/core/app_config/AppConfig.h index 609c4379f6..d8f756ce03 100644 --- a/core/app_config/AppConfig.h +++ b/core/app_config/AppConfig.h @@ -31,6 +31,14 @@ namespace logtail { extern const int32_t kDefaultMaxSendBytePerSec; +extern const double GLOBAL_CONCURRENCY_FREE_PERCENTAGE_FOR_ONE_REGION; +extern const int32_t MIN_SEND_REQUEST_CONCURRENCY; +extern const int32_t MAX_SEND_REQUEST_CONCURRENCY; +extern const uint32_t CONCURRENCY_STATISTIC_THRESHOLD; +extern const uint32_t CONCURRENCY_STATISTIC_INTERVAL_THRESHOLD_SECONDS; +extern const uint32_t NO_FALL_BACK_FAIL_PERCENTAGE; +extern const uint32_t SLOW_FALL_BACK_FAIL_PERCENTAGE; + void CreateAgentDir(); std::string GetAgentLogDir(); @@ -131,6 +139,7 @@ class AppConfig { int32_t mNumOfBufferFile; int32_t mLocalFileSize; int32_t mSendRequestConcurrency; + int32_t mSendRequestGlobalConcurrency; std::string mBufferFilePath; // checkpoint @@ -207,6 +216,8 @@ class AppConfig { std::string mBindInterface; + + // /** // * @brief Load ConfigServer, DataServer and network interface // * @@ -434,8 +445,12 @@ class AppConfig { int32_t GetLocalFileSize() const { return mLocalFileSize; } const std::string& GetBufferFilePath() const { return mBufferFilePath; } - + // 单地域并发度 int32_t GetSendRequestConcurrency() const { return mSendRequestConcurrency; } + // 全局并发度 + int32_t GetSendRequestGlobalConcurrency() const { return mSendRequestGlobalConcurrency; } + + double GetGlobalConcurrencyFreePercentageForOneRegion() const { return GLOBAL_CONCURRENCY_FREE_PERCENTAGE_FOR_ONE_REGION; } int32_t GetProcessThreadCount() const { return mProcessThreadCount; } diff --git a/core/pipeline/limiter/ConcurrencyLimiter.cpp b/core/pipeline/limiter/ConcurrencyLimiter.cpp index ac75c8de27..937004769b 100644 --- a/core/pipeline/limiter/ConcurrencyLimiter.cpp +++ b/core/pipeline/limiter/ConcurrencyLimiter.cpp @@ -20,17 +20,12 @@ using namespace std; namespace logtail { - #ifdef APSARA_UNIT_TEST_MAIN uint32_t ConcurrencyLimiter::GetCurrentLimit() const { lock_guard lock(mLimiterMux); return mCurrenctConcurrency; } -uint32_t ConcurrencyLimiter::GetCurrentInterval() const { - lock_guard lock(mLimiterMux); - return mRetryIntervalSecs; -} void ConcurrencyLimiter::SetCurrentLimit(uint32_t limit) { lock_guard lock(mLimiterMux); mCurrenctConcurrency = limit; @@ -42,19 +37,15 @@ void ConcurrencyLimiter::SetInSendingCount(uint32_t count) { uint32_t ConcurrencyLimiter::GetInSendingCount() const { return mInSendingCnt.load(); } + +uint32_t ConcurrencyLimiter::GetStatisticThreshold() const { + return CONCURRENCY_STATISTIC_THRESHOLD; +} + #endif bool ConcurrencyLimiter::IsValidToPop() { lock_guard lock(mLimiterMux); - if (mCurrenctConcurrency == 0) { - auto curTime = std::chrono::system_clock::now(); - if (chrono::duration_cast(curTime - mLastCheckTime).count() > mRetryIntervalSecs) { - mLastCheckTime = curTime; - return true; - } else { - return false; - } - } if (mCurrenctConcurrency > mInSendingCnt.load()) { return true; } @@ -69,16 +60,20 @@ void ConcurrencyLimiter::OnSendDone() { --mInSendingCnt; } -void ConcurrencyLimiter::OnSuccess() { +void ConcurrencyLimiter::OnSuccess(std::chrono::system_clock::time_point currentTime) { + AdjustConcurrency(true, currentTime); +} + +void ConcurrencyLimiter::OnFail(std::chrono::system_clock::time_point currentTime) { + AdjustConcurrency(false, currentTime); +} + +void ConcurrencyLimiter::Increase() { lock_guard lock(mLimiterMux); - if (mCurrenctConcurrency <= 0) { - mRetryIntervalSecs = mMinRetryIntervalSecs; - LOG_INFO(sLogger, ("reset send retry interval, type", mDescription)); - } if (mCurrenctConcurrency != mMaxConcurrency) { ++mCurrenctConcurrency; if (mCurrenctConcurrency == mMaxConcurrency) { - LOG_INFO(sLogger, + LOG_DEBUG(sLogger, ("increase send concurrency to maximum, type", mDescription)("concurrency", mCurrenctConcurrency)); } else { LOG_DEBUG(sLogger, @@ -88,22 +83,57 @@ void ConcurrencyLimiter::OnSuccess() { } } -void ConcurrencyLimiter::OnFail() { +void ConcurrencyLimiter::Decrease(double fallBackRatio) { lock_guard lock(mLimiterMux); - if (mCurrenctConcurrency != 0) { + if (mCurrenctConcurrency != mMinConcurrency) { auto old = mCurrenctConcurrency; - mCurrenctConcurrency = static_cast(mCurrenctConcurrency * mConcurrencyDownRatio); - LOG_INFO(sLogger, ("decrease send concurrency, type", mDescription)("from", old)("to", mCurrenctConcurrency)); + mCurrenctConcurrency = std::max(static_cast(mCurrenctConcurrency * fallBackRatio), mMinConcurrency); + LOG_DEBUG(sLogger, ("decrease send concurrency, type", mDescription)("from", old)("to", mCurrenctConcurrency)); } else { - if (mRetryIntervalSecs != mMaxRetryIntervalSecs) { - auto old = mRetryIntervalSecs; - mRetryIntervalSecs - = min(mMaxRetryIntervalSecs, static_cast(mRetryIntervalSecs * mRetryIntervalUpRatio)); - LOG_INFO(sLogger, - ("increase send retry interval, type", - mDescription)("from", ToString(old) + "s")("to", ToString(mRetryIntervalSecs) + "s")); + if (mMinConcurrency == 0) { + mCurrenctConcurrency = 1; + LOG_INFO(sLogger, ("decrease send concurrency to min, type", mDescription)("to", mCurrenctConcurrency)); } } } + +void ConcurrencyLimiter::AdjustConcurrency(bool success, std::chrono::system_clock::time_point currentTime) { + uint32_t failPercentage = 0; + bool finishStatistics = false; + { + lock_guard lock(mStatisticsMux); + mStatisticsTotal ++; + if (!success) { + mStatisticsFailTotal ++; + } + if (mLastStatisticsTime == std::chrono::system_clock::time_point()) { + mLastStatisticsTime = currentTime; + } + if (mStatisticsTotal == CONCURRENCY_STATISTIC_THRESHOLD || chrono::duration_cast(currentTime - mLastStatisticsTime).count() > CONCURRENCY_STATISTIC_INTERVAL_THRESHOLD_SECONDS) { + failPercentage = mStatisticsFailTotal*100/mStatisticsTotal; + LOG_DEBUG(sLogger,("AdjustConcurrency", mDescription)("mStatisticsFailTotal", mStatisticsFailTotal)("mStatisticsTotal", mStatisticsTotal)); + mStatisticsTotal = 0; + mStatisticsFailTotal = 0; + mLastStatisticsTime = currentTime; + finishStatistics = true; + } + } + if (finishStatistics) { + if (failPercentage == 0) { + // 成功 + Increase(); + } else if (failPercentage <= NO_FALL_BACK_FAIL_PERCENTAGE) { + // 不调整 + } else if (failPercentage <= SLOW_FALL_BACK_FAIL_PERCENTAGE) { + // 慢回退 + Decrease(mConcurrencySlowFallBackRatio); + } else { + // 快速回退 + Decrease(mConcurrencyFastFallBackRatio); + } + } +} + + } // namespace logtail diff --git a/core/pipeline/limiter/ConcurrencyLimiter.h b/core/pipeline/limiter/ConcurrencyLimiter.h index 1191326b04..513cea0988 100644 --- a/core/pipeline/limiter/ConcurrencyLimiter.h +++ b/core/pipeline/limiter/ConcurrencyLimiter.h @@ -22,33 +22,31 @@ #include #include +#include "app_config/AppConfig.h" #include "monitor/metric_constants/MetricConstants.h" namespace logtail { - class ConcurrencyLimiter { public: ConcurrencyLimiter(const std::string& description, uint32_t maxConcurrency, - uint32_t maxRetryIntervalSecs = 3600, - uint32_t minRetryIntervalSecs = 30, - double retryIntervalUpRatio = 1.5, - double concurrencyDownRatio = 0.5) + uint32_t minConcurrency = 1, + double concurrencyFastFallBackRatio = 0.5, + double concurrencySlowFallBackRatio = 0.8) : mDescription(description), mMaxConcurrency(maxConcurrency), + mMinConcurrency(minConcurrency), mCurrenctConcurrency(maxConcurrency), - mMaxRetryIntervalSecs(maxRetryIntervalSecs), - mMinRetryIntervalSecs(minRetryIntervalSecs), - mRetryIntervalSecs(minRetryIntervalSecs), - mRetryIntervalUpRatio(retryIntervalUpRatio), - mConcurrencyDownRatio(concurrencyDownRatio) {} + mConcurrencyFastFallBackRatio(concurrencyFastFallBackRatio), + mConcurrencySlowFallBackRatio(concurrencySlowFallBackRatio) {} bool IsValidToPop(); void PostPop(); void OnSendDone(); - void OnSuccess(); - void OnFail(); + void OnSuccess(std::chrono::system_clock::time_point currentTime); + void OnFail(std::chrono::system_clock::time_point currentTime); + static std::string GetLimiterMetricName(const std::string& limiter) { if (limiter == "region") { @@ -64,10 +62,10 @@ class ConcurrencyLimiter { #ifdef APSARA_UNIT_TEST_MAIN uint32_t GetCurrentLimit() const; - uint32_t GetCurrentInterval() const; void SetCurrentLimit(uint32_t limit); void SetInSendingCount(uint32_t count); uint32_t GetInSendingCount() const; + uint32_t GetStatisticThreshold() const; #endif @@ -77,19 +75,25 @@ class ConcurrencyLimiter { std::atomic_uint32_t mInSendingCnt = 0U; uint32_t mMaxConcurrency = 0; + uint32_t mMinConcurrency = 0; mutable std::mutex mLimiterMux; uint32_t mCurrenctConcurrency = 0; - uint32_t mMaxRetryIntervalSecs = 0; - uint32_t mMinRetryIntervalSecs = 0; + double mConcurrencyFastFallBackRatio = 0.0; + double mConcurrencySlowFallBackRatio = 0.0; - uint32_t mRetryIntervalSecs = 0; + std::chrono::system_clock::time_point mLastCheckTime; - double mRetryIntervalUpRatio = 0.0; - double mConcurrencyDownRatio = 0.0; + mutable std::mutex mStatisticsMux; + std::chrono::system_clock::time_point mLastStatisticsTime; + uint32_t mStatisticsTotal = 0; + uint32_t mStatisticsFailTotal = 0; + + void Increase(); + void Decrease(double fallBackRatio); + void AdjustConcurrency(bool success, std::chrono::system_clock::time_point currentTime); - std::chrono::system_clock::time_point mLastCheckTime; }; } // namespace logtail diff --git a/core/pipeline/queue/BoundedSenderQueueInterface.cpp b/core/pipeline/queue/BoundedSenderQueueInterface.cpp index bbf258189d..4d9f0821a1 100644 --- a/core/pipeline/queue/BoundedSenderQueueInterface.cpp +++ b/core/pipeline/queue/BoundedSenderQueueInterface.cpp @@ -56,13 +56,6 @@ void BoundedSenderQueueInterface::SetConcurrencyLimiters(std::unordered_mapOnSuccess(); - } - } -} void BoundedSenderQueueInterface::DecreaseSendingCnt() { for (auto& limiter : mConcurrencyLimiters) { diff --git a/core/pipeline/queue/BoundedSenderQueueInterface.h b/core/pipeline/queue/BoundedSenderQueueInterface.h index 71a1f0c85e..826d574a5b 100644 --- a/core/pipeline/queue/BoundedSenderQueueInterface.h +++ b/core/pipeline/queue/BoundedSenderQueueInterface.h @@ -48,7 +48,6 @@ class BoundedSenderQueueInterface : public BoundedQueueInterface& items, int32_t limit) = 0; void DecreaseSendingCnt(); - void OnSendingSuccess(); void SetRateLimiter(uint32_t maxRate); void SetConcurrencyLimiters(std::unordered_map>&& concurrencyLimitersMap); virtual void SetPipelineForItems(const std::shared_ptr& p) const = 0; diff --git a/core/plugin/flusher/sls/FlusherSLS.cpp b/core/plugin/flusher/sls/FlusherSLS.cpp index e9c0e39861..4a5edeeec1 100644 --- a/core/plugin/flusher/sls/FlusherSLS.cpp +++ b/core/plugin/flusher/sls/FlusherSLS.cpp @@ -126,12 +126,12 @@ shared_ptr FlusherSLS::GetLogstoreConcurrencyLimiter(const s auto iter = sLogstoreConcurrencyLimiterMap.find(key); if (iter == sLogstoreConcurrencyLimiterMap.end()) { - auto limiter = GetConcurrencyLimiter(sName + "#quota#logstore#" + key); + auto limiter = make_shared(sName + "#quota#logstore#" + key, AppConfig::GetInstance()->GetSendRequestConcurrency()); sLogstoreConcurrencyLimiterMap.try_emplace(key, limiter); return limiter; } if (iter->second.expired()) { - auto limiter = GetConcurrencyLimiter(sName + "#quota#logstore#" + key); + auto limiter = make_shared(sName + "#quota#logstore#" + key, AppConfig::GetInstance()->GetSendRequestConcurrency()); iter->second = limiter; return limiter; } @@ -142,12 +142,12 @@ shared_ptr FlusherSLS::GetProjectConcurrencyLimiter(const st lock_guard lock(sMux); auto iter = sProjectConcurrencyLimiterMap.find(project); if (iter == sProjectConcurrencyLimiterMap.end()) { - auto limiter = GetConcurrencyLimiter(sName + "#quota#project#" + project); + auto limiter = make_shared(sName + "#quota#project#" + project, AppConfig::GetInstance()->GetSendRequestConcurrency()); sProjectConcurrencyLimiterMap.try_emplace(project, limiter); return limiter; } if (iter->second.expired()) { - auto limiter = GetConcurrencyLimiter(sName + "#quota#project#" + project); + auto limiter = make_shared(sName + "#quota#project#" + project, AppConfig::GetInstance()->GetSendRequestConcurrency()); iter->second = limiter; return limiter; } @@ -158,12 +158,12 @@ shared_ptr FlusherSLS::GetRegionConcurrencyLimiter(const str lock_guard lock(sMux); auto iter = sRegionConcurrencyLimiterMap.find(region); if (iter == sRegionConcurrencyLimiterMap.end()) { - auto limiter = GetConcurrencyLimiter(sName + "#network#region#" + region); + auto limiter = make_shared(sName + "#network#region#" + region, AppConfig::GetInstance()->GetSendRequestConcurrency(), AppConfig::GetInstance()->GetSendRequestConcurrency()*AppConfig::GetInstance()->GetGlobalConcurrencyFreePercentageForOneRegion()); sRegionConcurrencyLimiterMap.try_emplace(region, limiter); return limiter; } if (iter->second.expired()) { - auto limiter = GetConcurrencyLimiter(sName + "#network#region#" + region); + auto limiter = make_shared(sName + "#network#region#" + region, AppConfig::GetInstance()->GetSendRequestConcurrency(), AppConfig::GetInstance()->GetSendRequestConcurrency()*AppConfig::GetInstance()->GetGlobalConcurrencyFreePercentageForOneRegion()); iter->second = limiter; return limiter; } @@ -693,9 +693,9 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) ToString(chrono::duration_cast(curSystemTime - item->mFirstEnqueTime).count()) + "ms")("try cnt", data->mTryCnt)("endpoint", data->mCurrentEndpoint)("is profile data", isProfileData)); - GetRegionConcurrencyLimiter(mRegion)->OnSuccess(); - GetProjectConcurrencyLimiter(mProject)->OnSuccess(); - GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnSuccess(); + GetRegionConcurrencyLimiter(mRegion)->OnSuccess(curSystemTime); + GetProjectConcurrencyLimiter(mProject)->OnSuccess(curSystemTime); + GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnSuccess(curSystemTime); SenderQueueManager::GetInstance()->DecreaseConcurrencyLimiterInSendingCnt(item->mQueueKey); if (mSuccessCnt) { mSuccessCnt->Add(1); @@ -736,17 +736,17 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) } } operation = data->mBufferOrNot ? OperationOnFail::RETRY_LATER : OperationOnFail::DISCARD; - GetRegionConcurrencyLimiter(mRegion)->OnFail(); - GetProjectConcurrencyLimiter(mProject)->OnSuccess(); - GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnSuccess(); + GetRegionConcurrencyLimiter(mRegion)->OnFail(curSystemTime); + GetProjectConcurrencyLimiter(mProject)->OnSuccess(curSystemTime); + GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnSuccess(curSystemTime); } else if (sendResult == SEND_QUOTA_EXCEED) { BOOL_FLAG(global_network_success) = true; if (slsResponse.mErrorCode == sdk::LOGE_SHARD_WRITE_QUOTA_EXCEED) { failDetail << "shard write quota exceed"; suggestion << "Split logstore shards. https://help.aliyun.com/zh/sls/user-guide/expansion-of-resources"; - GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnFail(); - GetRegionConcurrencyLimiter(mRegion)->OnSuccess(); - GetProjectConcurrencyLimiter(mProject)->OnSuccess(); + GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnFail(curSystemTime); + GetRegionConcurrencyLimiter(mRegion)->OnSuccess(curSystemTime); + GetProjectConcurrencyLimiter(mProject)->OnSuccess(curSystemTime); if (mShardWriteQuotaErrorCnt) { mShardWriteQuotaErrorCnt->Add(1); } @@ -754,9 +754,9 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) failDetail << "project write quota exceed"; suggestion << "Submit quota modification request. " "https://help.aliyun.com/zh/sls/user-guide/expansion-of-resources"; - GetProjectConcurrencyLimiter(mProject)->OnFail(); - GetRegionConcurrencyLimiter(mRegion)->OnSuccess(); - GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnSuccess(); + GetProjectConcurrencyLimiter(mProject)->OnFail(curSystemTime); + GetRegionConcurrencyLimiter(mRegion)->OnSuccess(curSystemTime); + GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnSuccess(curSystemTime); if (mProjectQuotaErrorCnt) { mProjectQuotaErrorCnt->Add(1); } diff --git a/core/runner/FlusherRunner.cpp b/core/runner/FlusherRunner.cpp index 38b09d78de..3b77dc17f5 100644 --- a/core/runner/FlusherRunner.cpp +++ b/core/runner/FlusherRunner.cpp @@ -115,7 +115,7 @@ void FlusherRunner::DecreaseHttpSendingCnt() { void FlusherRunner::PushToHttpSink(SenderQueueItem* item, bool withLimit) { // TODO: use semaphore instead while (withLimit && !Application::GetInstance()->IsExiting() - && GetSendingBufferCount() >= AppConfig::GetInstance()->GetSendRequestConcurrency()) { + && GetSendingBufferCount() >= AppConfig::GetInstance()->GetSendRequestGlobalConcurrency()) { this_thread::sleep_for(chrono::milliseconds(10)); } @@ -155,7 +155,7 @@ void FlusherRunner::Run() { vector items; int32_t limit - = Application::GetInstance()->IsExiting() ? -1 : AppConfig::GetInstance()->GetSendRequestConcurrency(); + = Application::GetInstance()->IsExiting() ? -1 : AppConfig::GetInstance()->GetSendRequestGlobalConcurrency(); SenderQueueManager::GetInstance()->GetAvailableItems(items, limit); if (items.empty()) { SenderQueueManager::GetInstance()->Wait(1000); diff --git a/core/runner/sink/http/HttpSink.cpp b/core/runner/sink/http/HttpSink.cpp index 6777e3d3b0..e9951d4237 100644 --- a/core/runner/sink/http/HttpSink.cpp +++ b/core/runner/sink/http/HttpSink.cpp @@ -54,7 +54,7 @@ bool HttpSink::Init() { mSendConcurrency = mMetricsRecordRef.CreateIntGauge(METRIC_RUNNER_SINK_SEND_CONCURRENCY); // TODO: should be dynamic - mSendConcurrency->Set(AppConfig::GetInstance()->GetSendRequestConcurrency()); + mSendConcurrency->Set(AppConfig::GetInstance()->GetSendRequestGlobalConcurrency()); mThreadRes = async(launch::async, &HttpSink::Run, this); return true; diff --git a/core/runner/sink/http/HttpSinkRequest.h b/core/runner/sink/http/HttpSinkRequest.h index 1f936a8f99..f8220f7722 100644 --- a/core/runner/sink/http/HttpSinkRequest.h +++ b/core/runner/sink/http/HttpSinkRequest.h @@ -32,8 +32,11 @@ struct HttpSinkRequest : public AsynHttpRequest { const std::string& query, const std::map& header, const std::string& body, - SenderQueueItem* item) - : AsynHttpRequest(method, httpsFlag, host, port, url, query, header, body), mItem(item) {} + SenderQueueItem* item, + uint32_t timeout = static_cast(INT32_FLAG(default_http_request_timeout_secs)), + uint32_t maxTryCnt = static_cast(INT32_FLAG(default_http_request_max_try_cnt)) + ) + : AsynHttpRequest(method, httpsFlag, host, port, url, query, header, body, HttpResponse(), timeout, maxTryCnt), mItem(item) {} bool IsContextValid() const override { return true; } void OnSendDone(HttpResponse& response) override {} diff --git a/core/sdk/Client.cpp b/core/sdk/Client.cpp index acfc49f95c..053e6da40a 100644 --- a/core/sdk/Client.cpp +++ b/core/sdk/Client.cpp @@ -289,7 +289,7 @@ namespace sdk { SetCommonHeader(httpHeader, (int32_t)(body.length()), ""); string signature = GetUrlSignature(HTTP_POST, operation, httpHeader, parameterList, body, accessKeySecret); httpHeader[AUTHORIZATION] = LOG_HEADSIGNATURE_PREFIX + accessKeyId + ':' + signature; - return make_unique(HTTP_POST, mUsingHTTPS, host, mPort, operation, "", httpHeader, body, item); + return make_unique(HTTP_POST, mUsingHTTPS, host, mPort, operation, "", httpHeader, body, item, INT32_FLAG(default_http_request_timeout_secs), LOG_REQUEST_TRY_TIMES); } unique_ptr @@ -340,7 +340,7 @@ namespace sdk { GetQueryString(parameterList, queryString); return make_unique( - HTTP_POST, mUsingHTTPS, host, mPort, operation, queryString, httpHeader, body, item); + HTTP_POST, mUsingHTTPS, host, mPort, operation, queryString, httpHeader, body, item, INT32_FLAG(default_http_request_timeout_secs), LOG_REQUEST_TRY_TIMES); } PostLogStoreLogsResponse diff --git a/core/sdk/Common.h b/core/sdk/Common.h index 56ce512560..e37d2efa61 100644 --- a/core/sdk/Common.h +++ b/core/sdk/Common.h @@ -36,6 +36,8 @@ namespace sdk { const int64_t kFirstHashKeySeqID = 1; const uint32_t LOG_REQUEST_TIMEOUT = 20; + const uint32_t LOG_REQUEST_TRY_TIMES = 1; + const uint32_t MD5_BYTES = 16; #define DATE_FORMAT_RFC822 "%a, %d %b %Y %H:%M:%S GMT" ///< RFC822 date formate, GMT time. diff --git a/core/unittest/pipeline/ConcurrencyLimiterUnittest.cpp b/core/unittest/pipeline/ConcurrencyLimiterUnittest.cpp index e593e8db68..973b1b06b3 100644 --- a/core/unittest/pipeline/ConcurrencyLimiterUnittest.cpp +++ b/core/unittest/pipeline/ConcurrencyLimiterUnittest.cpp @@ -25,65 +25,118 @@ class ConcurrencyLimiterUnittest : public testing::Test { }; void ConcurrencyLimiterUnittest::TestLimiter() const { - shared_ptr sConcurrencyLimiter = make_shared("", 80); - // comcurrency = 10, count = 0 + auto curSystemTime = chrono::system_clock::now(); + int maxConcurrency = 80; + int minConcurrency = 20; + + shared_ptr sConcurrencyLimiter = make_shared("", maxConcurrency, minConcurrency); + // fastFallBack APSARA_TEST_EQUAL(true, sConcurrencyLimiter->IsValidToPop()); - sConcurrencyLimiter->PostPop(); - APSARA_TEST_EQUAL(1U, sConcurrencyLimiter->GetInSendingCount()); - sConcurrencyLimiter->OnFail(); - sConcurrencyLimiter->OnSendDone(); + for (uint32_t i = 0; i < sConcurrencyLimiter->GetStatisticThreshold(); i++) { + sConcurrencyLimiter->PostPop(); + APSARA_TEST_EQUAL(1U, sConcurrencyLimiter->GetInSendingCount()); + curSystemTime = chrono::system_clock::now(); + sConcurrencyLimiter->OnFail(curSystemTime); + sConcurrencyLimiter->OnSendDone(); + } APSARA_TEST_EQUAL(40U, sConcurrencyLimiter->GetCurrentLimit()); APSARA_TEST_EQUAL(0U, sConcurrencyLimiter->GetInSendingCount()); - APSARA_TEST_EQUAL(30U, sConcurrencyLimiter->GetCurrentInterval()); - // count = 10, comcurrency = 10 + // success one time APSARA_TEST_EQUAL(true, sConcurrencyLimiter->IsValidToPop()); - int num = 10; - for (int i = 0; i < num; i++) { + for (uint32_t i = 0; i < sConcurrencyLimiter->GetStatisticThreshold(); i++) { APSARA_TEST_EQUAL(true, sConcurrencyLimiter->IsValidToPop()); sConcurrencyLimiter->PostPop(); } APSARA_TEST_EQUAL(10U, sConcurrencyLimiter->GetInSendingCount()); - for (int i = 0; i < num; i++) { - sConcurrencyLimiter->OnSuccess(); + for (uint32_t i = 0; i < sConcurrencyLimiter->GetStatisticThreshold(); i++) { + curSystemTime = chrono::system_clock::now(); + sConcurrencyLimiter->OnSuccess(curSystemTime); sConcurrencyLimiter->OnSendDone(); } + APSARA_TEST_EQUAL(0U, sConcurrencyLimiter->GetInSendingCount()); - APSARA_TEST_EQUAL(50U, sConcurrencyLimiter->GetCurrentLimit()); - APSARA_TEST_EQUAL(30U, sConcurrencyLimiter->GetCurrentInterval()); + APSARA_TEST_EQUAL(41U, sConcurrencyLimiter->GetCurrentLimit()); - // limit = 50/2/2/2/2/2/2/2 = 25/2/2/2/2/2/2 = 3/2/2/2 = 1/2/2 = 0 - // interval = 30 * 1.5 = 45 - num = 7; - for (int i = 0; i < num; i++) { - APSARA_TEST_EQUAL(true, sConcurrencyLimiter->IsValidToPop()); + // slowFallBack + for (uint32_t i = 0; i < sConcurrencyLimiter->GetStatisticThreshold() - 2; i++) { + sConcurrencyLimiter->PostPop(); + curSystemTime = chrono::system_clock::now(); + sConcurrencyLimiter->OnSuccess(curSystemTime); + sConcurrencyLimiter->OnSendDone(); + } + for (int i = 0; i < 2; i++) { + sConcurrencyLimiter->PostPop(); + curSystemTime = chrono::system_clock::now(); + sConcurrencyLimiter->OnFail(curSystemTime); + sConcurrencyLimiter->OnSendDone(); + } + uint32_t expect = 41*0.8; + APSARA_TEST_EQUAL(0U, sConcurrencyLimiter->GetInSendingCount()); + APSARA_TEST_EQUAL(expect, sConcurrencyLimiter->GetCurrentLimit()); + + // no FallBack + for (uint32_t i = 0; i < sConcurrencyLimiter->GetStatisticThreshold() - 1; i++) { sConcurrencyLimiter->PostPop(); + curSystemTime = chrono::system_clock::now(); + sConcurrencyLimiter->OnSuccess(curSystemTime); + sConcurrencyLimiter->OnSendDone(); } - APSARA_TEST_EQUAL(7U, sConcurrencyLimiter->GetInSendingCount()); - for (int i = 0; i < num; i++) { - sConcurrencyLimiter->OnFail(); + for (int i = 0; i < 1; i++) { + sConcurrencyLimiter->PostPop(); + curSystemTime = chrono::system_clock::now(); + sConcurrencyLimiter->OnFail(curSystemTime); sConcurrencyLimiter->OnSendDone(); } APSARA_TEST_EQUAL(0U, sConcurrencyLimiter->GetInSendingCount()); - APSARA_TEST_EQUAL(0U, sConcurrencyLimiter->GetCurrentLimit()); - APSARA_TEST_EQUAL(45U, sConcurrencyLimiter->GetCurrentInterval()); - - num = 3; - for (int i = 0; i < num; i++) { - if (i == 0) { - APSARA_TEST_EQUAL(true, sConcurrencyLimiter->IsValidToPop()); - } else { - APSARA_TEST_EQUAL(false, sConcurrencyLimiter->IsValidToPop()); + APSARA_TEST_EQUAL(expect, sConcurrencyLimiter->GetCurrentLimit()); + + // test FallBack to minConcurrency + for (int i = 0; i < 10; i++) { + for (uint32_t j = 0; j < sConcurrencyLimiter->GetStatisticThreshold(); j++) { + sConcurrencyLimiter->PostPop(); + curSystemTime = chrono::system_clock::now(); + sConcurrencyLimiter->OnFail(curSystemTime); + sConcurrencyLimiter->OnSendDone(); } } + APSARA_TEST_EQUAL(0U, sConcurrencyLimiter->GetInSendingCount()); + APSARA_TEST_EQUAL(minConcurrency, sConcurrencyLimiter->GetCurrentLimit()); - sConcurrencyLimiter->PostPop(); - sConcurrencyLimiter->OnSuccess(); - sConcurrencyLimiter->OnSendDone(); + // test limit by concurrency + for (int i = 0; i < minConcurrency; i++) { + APSARA_TEST_EQUAL(true, sConcurrencyLimiter->IsValidToPop()); + sConcurrencyLimiter->PostPop(); + } + APSARA_TEST_EQUAL(false, sConcurrencyLimiter->IsValidToPop()); + for (int i = 0; i < minConcurrency; i++) { + sConcurrencyLimiter->OnSendDone(); + } + // test time exceed interval; 8 success, 1 fail, and last one timeout + sConcurrencyLimiter->SetCurrentLimit(40); + for (uint32_t i = 0; i < sConcurrencyLimiter->GetStatisticThreshold() - 3; i++) { + sConcurrencyLimiter->PostPop(); + curSystemTime = chrono::system_clock::now(); + sConcurrencyLimiter->OnSuccess(curSystemTime); + sConcurrencyLimiter->OnSendDone(); + } + for (int i = 0; i < 1; i++) { + sConcurrencyLimiter->PostPop(); + curSystemTime = chrono::system_clock::now(); + sConcurrencyLimiter->OnFail(curSystemTime); + sConcurrencyLimiter->OnSendDone(); + } + sleep(4); + for (int i = 0; i < 1; i++) { + sConcurrencyLimiter->PostPop(); + curSystemTime = chrono::system_clock::now(); + sConcurrencyLimiter->OnSuccess(curSystemTime); + sConcurrencyLimiter->OnSendDone(); + } + expect = 40*0.8; APSARA_TEST_EQUAL(0U, sConcurrencyLimiter->GetInSendingCount()); - APSARA_TEST_EQUAL(1U, sConcurrencyLimiter->GetCurrentLimit()); - APSARA_TEST_EQUAL(30U, sConcurrencyLimiter->GetCurrentInterval()); + APSARA_TEST_EQUAL(expect, sConcurrencyLimiter->GetCurrentLimit()); } UNIT_TEST_CASE(ConcurrencyLimiterUnittest, TestLimiter) diff --git a/core/unittest/sender/FlusherRunnerUnittest.cpp b/core/unittest/sender/FlusherRunnerUnittest.cpp index 9bb1b6e1fa..076b51fb07 100644 --- a/core/unittest/sender/FlusherRunnerUnittest.cpp +++ b/core/unittest/sender/FlusherRunnerUnittest.cpp @@ -47,6 +47,8 @@ void FlusherRunnerUnittest::TestDispatch() { flusher->SetMetricsRecordRef("name", "1"); flusher->Init(Json::Value(), tmp); + AppConfig::GetInstance()->mSendRequestGlobalConcurrency = 10; + auto item = make_unique("content", 10, flusher.get(), flusher->GetQueueKey()); auto realItem = item.get(); flusher->PushToQueue(std::move(item)); From dd5b59e614a2212e13f7c7ee590dbbb4d7e12685 Mon Sep 17 00:00:00 2001 From: Zhu Shunjia Date: Mon, 30 Dec 2024 09:55:51 +0800 Subject: [PATCH 05/12] fix: flusher_otlp stop nil log/metric/trace client (#1994) * fix nil log/metric/trace otlp client stop issue Change-Id: I8948322d1e54a61c7b17968a5bcaf5590d9a437c * add ut Change-Id: Ib0003a387cb9d23592f28fd8097bc49ff3ae2351 --- plugins/flusher/opentelemetry/flusher_otlp.go | 6 +++--- plugins/flusher/opentelemetry/flusher_otlp_test.go | 9 +++++++++ 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/plugins/flusher/opentelemetry/flusher_otlp.go b/plugins/flusher/opentelemetry/flusher_otlp.go index fce944339e..ed747e9c25 100644 --- a/plugins/flusher/opentelemetry/flusher_otlp.go +++ b/plugins/flusher/opentelemetry/flusher_otlp.go @@ -160,21 +160,21 @@ func (f *FlusherOTLP) IsReady(projectName string, logstoreName string, logstoreK func (f *FlusherOTLP) Stop() error { var err error - if f.logClient.grpcConn != nil { + if f.logClient != nil && f.logClient.grpcConn != nil { err = f.logClient.grpcConn.Close() if err != nil { logger.Error(f.context.GetRuntimeContext(), "FLUSHER_STOP_ALARM", "stop otlp logs flusher fail, error", err) } } - if f.metricClient.grpcConn != nil { + if f.metricClient != nil && f.metricClient.grpcConn != nil { err = f.metricClient.grpcConn.Close() if err != nil { logger.Error(f.context.GetRuntimeContext(), "FLUSHER_STOP_ALARM", "stop otlp metrics flusher fail, error", err) } } - if f.traceClient.grpcConn != nil { + if f.traceClient != nil && f.traceClient.grpcConn != nil { err = f.traceClient.grpcConn.Close() if err != nil { logger.Error(f.context.GetRuntimeContext(), "FLUSHER_STOP_ALARM", "stop otlp traces flusher fail, error", err) diff --git a/plugins/flusher/opentelemetry/flusher_otlp_test.go b/plugins/flusher/opentelemetry/flusher_otlp_test.go index 45637a4085..24f70bd52f 100644 --- a/plugins/flusher/opentelemetry/flusher_otlp_test.go +++ b/plugins/flusher/opentelemetry/flusher_otlp_test.go @@ -50,6 +50,13 @@ func (t *TestOtlpLogService) Export(ctx context.Context, request *otlpv1.ExportL } func Test_Flusher_Init(t *testing.T) { + convey.Convey("When config is invalid", t, func() { + convey.Convey("When FlusherOTLP is not initialized", func() { + f := &FlusherOTLP{Version: v1, Logs: &helper.GrpcClientConfig{Endpoint: ":8080"}} + err := f.Stop() + convey.So(err, convey.ShouldBeNil) + }) + }) convey.Convey("When init grpc service", t, func() { _, server := newTestGrpcService(t, ":8080", time.Nanosecond) defer func() { @@ -61,6 +68,8 @@ func Test_Flusher_Init(t *testing.T) { f := &FlusherOTLP{Version: v1, Logs: &helper.GrpcClientConfig{Endpoint: ":8080"}} err := f.Init(logCtx) convey.So(err, convey.ShouldBeNil) + err = f.Stop() + convey.So(err, convey.ShouldBeNil) }) }) } From e8b9318320e06c09ee1e7ee147d4cd8601daf19c Mon Sep 17 00:00:00 2001 From: Huxing Zhang Date: Mon, 30 Dec 2024 14:08:58 +0800 Subject: [PATCH 06/12] Polish README (#1992) * Update README.md Polish quick start. --- README.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 71beae8bf1..298552cc38 100644 --- a/README.md +++ b/README.md @@ -45,19 +45,17 @@ The core advantages of **LoongCollector**: ## Quick Start -For the complexity of C++ dependencies, the compilation of LoongCollector requires you have docker installed. If you aim to build LoongCollector from sources, you can go ahead and start with the following commands. - -1. Start with local +For the complexity of C++ dependencies, the compilation of LoongCollector requires you have docker and golang installed. If you aim to build LoongCollector from sources, you can go ahead and start with the following commands. ```bash make cp -r example_config/quick_start/* output cd output ./loongcollector -# Now, LoongCollector is collecting data from output/simple.log and outputing the result to stdout +# Now, LoongCollector is collecting data from output/simple.log, +# and outputing the result to stdout. ``` - HEAD ## Documentation From 9800d9e1fd618ffc4693aca41b443dfaaf9fdd23 Mon Sep 17 00:00:00 2001 From: Bingchang Chen Date: Mon, 30 Dec 2024 15:14:34 +0800 Subject: [PATCH 07/12] test: pipeline update unittest (#1991) * test: pipeline update unittest * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix --- core/app_config/AppConfig.h | 1 + core/common/http/HttpResponse.h | 6 +- core/config/PipelineConfig.cpp | 11 +- core/file_server/event_handler/LogInput.cpp | 4 +- core/go_pipeline/LogtailPlugin.cpp | 28 + core/monitor/profile_sender/ProfileSender.cpp | 9 +- core/pipeline/Pipeline.h | 1 + core/pipeline/PipelineManager.h | 1 + .../plugin/instance/ProcessorInstance.h | 1 + core/pipeline/plugin/interface/Flusher.h | 1 + core/pipeline/queue/BoundedProcessQueue.h | 1 + core/pipeline/queue/ProcessQueueItem.h | 10 +- core/pipeline/queue/ProcessQueueManager.h | 1 + core/pipeline/queue/SenderQueueManager.h | 1 + core/plugin/flusher/sls/FlusherSLS.cpp | 4 +- core/runner/FlusherRunner.cpp | 9 +- core/runner/FlusherRunner.h | 2 + core/runner/ProcessorRunner.cpp | 4 +- core/runner/sink/Sink.h | 2 +- core/runner/sink/http/HttpSink.cpp | 12 + core/runner/sink/http/HttpSink.h | 9 +- core/unittest/config/PipelineManagerMock.h | 6 +- core/unittest/pipeline/HttpSinkMock.h | 99 + core/unittest/pipeline/LogtailPluginMock.h | 93 + core/unittest/pipeline/PipelineUnittest.cpp | 19 +- .../pipeline/PipelineUpdateUnittest.cpp | 2305 ++++++++++++++++- core/unittest/plugin/PluginMock.h | 27 +- .../unittest/sender/FlusherRunnerUnittest.cpp | 3 +- pluginmanager/config_update_test.go | 73 +- pluginmanager/plugin_runner_helper.go | 14 + .../test_config/update_mock_block.json | 54 +- .../test_config/update_mock_noblock.json | 5 +- .../update_mock_noblock_no_input.json | 52 + plugins/flusher/checker/flusher_checker.go | 3 +- plugins/input/mockd/input_mockd.go | 13 + 35 files changed, 2785 insertions(+), 99 deletions(-) create mode 100644 core/unittest/pipeline/HttpSinkMock.h create mode 100644 core/unittest/pipeline/LogtailPluginMock.h create mode 100644 pluginmanager/test_config/update_mock_noblock_no_input.json diff --git a/core/app_config/AppConfig.h b/core/app_config/AppConfig.h index d8f756ce03..71a8d2df49 100644 --- a/core/app_config/AppConfig.h +++ b/core/app_config/AppConfig.h @@ -533,6 +533,7 @@ class AppConfig { friend class InputPrometheusUnittest; friend class InputContainerStdioUnittest; friend class BatcherUnittest; + friend class PipelineUpdateUnittest; #endif }; diff --git a/core/common/http/HttpResponse.h b/core/common/http/HttpResponse.h index 228f491264..c4282df428 100644 --- a/core/common/http/HttpResponse.h +++ b/core/common/http/HttpResponse.h @@ -75,7 +75,7 @@ class HttpResponse { HttpResponse() : mHeader(compareHeader), mBody(new std::string(), [](void* p) { delete static_cast(p); }), - mWriteCallback(DefaultWriteCallback) {}; + mWriteCallback(DefaultWriteCallback){}; HttpResponse(void* body, const std::function& bodyDeleter, size_t (*callback)(char*, size_t, size_t, void*)) @@ -155,6 +155,10 @@ class HttpResponse { std::map mHeader; std::unique_ptr> mBody; size_t (*mWriteCallback)(char*, size_t, size_t, void*) = nullptr; + +#ifdef APSARA_UNIT_TEST_MAIN + friend class HttpSinkMock; +#endif }; } // namespace logtail diff --git a/core/config/PipelineConfig.cpp b/core/config/PipelineConfig.cpp index 4aeea027ca..6cafcb6ed4 100644 --- a/core/config/PipelineConfig.cpp +++ b/core/config/PipelineConfig.cpp @@ -246,10 +246,17 @@ bool PipelineConfig::Parse() { } } mInputs.push_back(&plugin); +#ifndef APSARA_UNIT_TEST_MAIN // TODO: remove these special restrictions if (pluginType == "input_file" || pluginType == "input_container_stdio") { hasFileInput = true; } +#else + // TODO: remove these special restrictions after all C++ inputs support Go processors + if (pluginType.find("input_file") != string::npos || pluginType.find("input_container_stdio") != string::npos) { + hasFileInput = true; + } +#endif } // TODO: remove these special restrictions if (hasFileInput && (*mDetail)["inputs"].size() > 1) { @@ -530,7 +537,9 @@ bool PipelineConfig::Parse() { } mRouter.emplace_back(i, itr); } else { - mRouter.emplace_back(i, nullptr); + if (!IsFlushingThroughGoPipelineExisted()) { + mRouter.emplace_back(i, nullptr); + } } } diff --git a/core/file_server/event_handler/LogInput.cpp b/core/file_server/event_handler/LogInput.cpp index 340a6b6763..012d83967c 100644 --- a/core/file_server/event_handler/LogInput.cpp +++ b/core/file_server/event_handler/LogInput.cpp @@ -350,7 +350,8 @@ void LogInput::ProcessEvent(EventDispatcher* dispatcher, Event* ev) { void LogInput::UpdateCriticalMetric(int32_t curTime) { mLastRunTime->Set(mLastReadEventTime.load()); - LoongCollectorMonitor::GetInstance()->SetAgentOpenFdTotal(GloablFileDescriptorManager::GetInstance()->GetOpenedFilePtrSize()); + LoongCollectorMonitor::GetInstance()->SetAgentOpenFdTotal( + GloablFileDescriptorManager::GetInstance()->GetOpenedFilePtrSize()); mRegisterdHandlersTotal->Set(EventDispatcher::GetInstance()->GetHandlerCount()); mActiveReadersTotal->Set(CheckPointManager::Instance()->GetReaderCount()); mEventProcessCount = 0; @@ -529,6 +530,7 @@ Event* LogInput::PopEventQueue() { #ifdef APSARA_UNIT_TEST_MAIN void LogInput::CleanEnviroments() { mIdleFlag = true; + mInteruptFlag = true; usleep(100 * 1000); while (true) { Event* ev = PopEventQueue(); diff --git a/core/go_pipeline/LogtailPlugin.cpp b/core/go_pipeline/LogtailPlugin.cpp index 6e41963c82..c1899b13df 100644 --- a/core/go_pipeline/LogtailPlugin.cpp +++ b/core/go_pipeline/LogtailPlugin.cpp @@ -31,6 +31,9 @@ #include "pipeline/PipelineManager.h" #include "pipeline/queue/SenderQueueManager.h" #include "provider/Provider.h" +#ifdef APSARA_UNIT_TEST_MAIN +#include "unittest/pipeline/LogtailPluginMock.h" +#endif DEFINE_FLAG_BOOL(enable_sls_metrics_format, "if enable format metrics in SLS metricstore log pattern", false); DEFINE_FLAG_BOOL(enable_containerd_upper_dir_detect, @@ -86,6 +89,7 @@ bool LogtailPlugin::LoadPipeline(const std::string& pipelineName, const std::string& logstore, const std::string& region, logtail::QueueKey logstoreKey) { +#ifndef APSARA_UNIT_TEST_MAIN if (!mPluginValid) { LoadPluginBase(); } @@ -110,9 +114,14 @@ bool LogtailPlugin::LoadPipeline(const std::string& pipelineName, } return false; +#else + return LogtailPluginMock::GetInstance()->LoadPipeline( + pipelineName, pipeline, project, logstore, region, logstoreKey); +#endif } bool LogtailPlugin::UnloadPipeline(const std::string& pipelineName) { +#ifndef APSARA_UNIT_TEST_MAIN if (!mPluginValid) { LOG_ERROR(sLogger, ("UnloadPipeline", "plugin not valid")); return false; @@ -128,9 +137,13 @@ bool LogtailPlugin::UnloadPipeline(const std::string& pipelineName) { } return false; +#else + return LogtailPluginMock::GetInstance()->UnloadPipeline(pipelineName); +#endif } void LogtailPlugin::StopAllPipelines(bool withInputFlag) { +#ifndef APSARA_UNIT_TEST_MAIN if (mPluginValid && mStopAllPipelinesFun != NULL) { LOG_INFO(sLogger, ("Go pipelines stop all", "starts")); auto stopAllStart = GetCurrentTimeInMilliSeconds(); @@ -142,9 +155,13 @@ void LogtailPlugin::StopAllPipelines(bool withInputFlag) { "Stopping all Go pipelines took " + ToString(stopAllCost) + "ms"); } } +#else + LogtailPluginMock::GetInstance()->StopAllPipelines(withInputFlag); +#endif } void LogtailPlugin::Stop(const std::string& configName, bool removedFlag) { +#ifndef APSARA_UNIT_TEST_MAIN if (mPluginValid && mStopFun != NULL) { LOG_INFO(sLogger, ("Go pipelines stop", "starts")("config", configName)); auto stopStart = GetCurrentTimeInMilliSeconds(); @@ -159,6 +176,9 @@ void LogtailPlugin::Stop(const std::string& configName, bool removedFlag) { HOLD_ON_TOO_SLOW_ALARM, "Stopping Go pipeline " + configName + " took " + ToString(stopCost) + "ms"); } } +#else + LogtailPluginMock::GetInstance()->Stop(configName, removedFlag); +#endif } void LogtailPlugin::StopBuiltInModules() { @@ -170,6 +190,7 @@ void LogtailPlugin::StopBuiltInModules() { } void LogtailPlugin::Start(const std::string& configName) { +#ifndef APSARA_UNIT_TEST_MAIN if (mPluginValid && mStartFun != NULL) { LOG_INFO(sLogger, ("Go pipelines start", "starts")("config name", configName)); GoString goConfigName; @@ -178,6 +199,9 @@ void LogtailPlugin::Start(const std::string& configName) { mStartFun(goConfigName); LOG_INFO(sLogger, ("Go pipelines start", "succeeded")("config name", configName)); } +#else + LogtailPluginMock::GetInstance()->Start(configName); +#endif } int LogtailPlugin::IsValidToSend(long long logstoreKey) { @@ -503,6 +527,7 @@ void LogtailPlugin::ProcessLog(const std::string& configName, void LogtailPlugin::ProcessLogGroup(const std::string& configName, const std::string& logGroup, const std::string& packId) { +#ifndef APSARA_UNIT_TEST_MAIN if (logGroup.empty() || !(mPluginValid && mProcessLogsFun != NULL)) { return; } @@ -521,6 +546,9 @@ void LogtailPlugin::ProcessLogGroup(const std::string& configName, if (rst != (GoInt)0) { LOG_WARNING(sLogger, ("process loggroup error", configName)("result", rst)); } +#else + LogtailPluginMock::GetInstance()->ProcessLogGroup(configName, logGroup, packId); +#endif } void LogtailPlugin::GetGoMetrics(std::vector>& metircsList, diff --git a/core/monitor/profile_sender/ProfileSender.cpp b/core/monitor/profile_sender/ProfileSender.cpp index d899cbae00..2c6b942c67 100644 --- a/core/monitor/profile_sender/ProfileSender.cpp +++ b/core/monitor/profile_sender/ProfileSender.cpp @@ -25,9 +25,9 @@ #ifdef __ENTERPRISE__ #include "EnterpriseProfileSender.h" #endif -#include "sdk/Exception.h" -#include "plugin/flusher/sls/SLSClientManager.h" #include "app_config/AppConfig.h" +#include "plugin/flusher/sls/SLSClientManager.h" +#include "sdk/Exception.h" // TODO: temporarily used #include "common/compression/CompressorFactory.h" @@ -119,12 +119,17 @@ FlusherSLS* ProfileSender::GetFlusher(const string& region) { } bool ProfileSender::IsProfileData(const string& region, const string& project, const string& logstore) { +// TODO: temporarily used, profile should work in unit test +#ifndef APSARA_UNIT_TEST_MAIN if ((logstore == "shennong_log_profile" || logstore == "logtail_alarm" || logstore == "logtail_status_profile" || logstore == "logtail_suicide_profile") && (project == GetProfileProjectName(region) || region == "")) return true; else return false; +#else + return false; +#endif } void ProfileSender::SendToProfileProject(const string& region, sls_logs::LogGroup& logGroup) { diff --git a/core/pipeline/Pipeline.h b/core/pipeline/Pipeline.h index 29666c68c1..5009b3c445 100644 --- a/core/pipeline/Pipeline.h +++ b/core/pipeline/Pipeline.h @@ -128,6 +128,7 @@ class Pipeline { friend class InputProcessSecurityUnittest; friend class InputNetworkSecurityUnittest; friend class InputNetworkObserverUnittest; + friend class PipelineUpdateUnittest; #endif }; diff --git a/core/pipeline/PipelineManager.h b/core/pipeline/PipelineManager.h index 6c3cf5cbe0..255514c15d 100644 --- a/core/pipeline/PipelineManager.h +++ b/core/pipeline/PipelineManager.h @@ -76,6 +76,7 @@ class PipelineManager { friend class CircularProcessQueueUnittest; friend class CommonConfigProviderUnittest; friend class FlusherUnittest; + friend class PipelineUnittest; #endif }; diff --git a/core/pipeline/plugin/instance/ProcessorInstance.h b/core/pipeline/plugin/instance/ProcessorInstance.h index 298c99a78a..24373685c6 100644 --- a/core/pipeline/plugin/instance/ProcessorInstance.h +++ b/core/pipeline/plugin/instance/ProcessorInstance.h @@ -59,6 +59,7 @@ class ProcessorInstance : public PluginInstance { friend class InputFileUnittest; friend class InputPrometheusUnittest; friend class PipelineUnittest; + friend class PipelineUpdateUnittest; #endif }; diff --git a/core/pipeline/plugin/interface/Flusher.h b/core/pipeline/plugin/interface/Flusher.h index 232020df34..a59e73eb6d 100644 --- a/core/pipeline/plugin/interface/Flusher.h +++ b/core/pipeline/plugin/interface/Flusher.h @@ -62,6 +62,7 @@ class Flusher : public Plugin { friend class FlusherInstanceUnittest; friend class FlusherRunnerUnittest; friend class FlusherUnittest; + friend class PipelineUpdateUnittest; #endif }; diff --git a/core/pipeline/queue/BoundedProcessQueue.h b/core/pipeline/queue/BoundedProcessQueue.h index 728abe0bbe..77687ce0b9 100644 --- a/core/pipeline/queue/BoundedProcessQueue.h +++ b/core/pipeline/queue/BoundedProcessQueue.h @@ -53,6 +53,7 @@ class BoundedProcessQueue : public BoundedQueueInterfaceAddInProcessCnt(); - } else { - const auto& p = PipelineManager::GetInstance()->FindConfigByName(configName); - if (p) { - p->AddInProcessCnt(); - } + const auto& p = PipelineManager::GetInstance()->FindConfigByName(configName); + if (p) { + p->AddInProcessCnt(); } } }; diff --git a/core/pipeline/queue/ProcessQueueManager.h b/core/pipeline/queue/ProcessQueueManager.h index dbe47efeaa..289bafba5b 100644 --- a/core/pipeline/queue/ProcessQueueManager.h +++ b/core/pipeline/queue/ProcessQueueManager.h @@ -93,6 +93,7 @@ class ProcessQueueManager : public FeedbackInterface { void Clear(); friend class ProcessQueueManagerUnittest; friend class PipelineUnittest; + friend class PipelineUpdateUnittest; #endif }; diff --git a/core/pipeline/queue/SenderQueueManager.h b/core/pipeline/queue/SenderQueueManager.h index 6a8c6a1ad7..cc159daa86 100644 --- a/core/pipeline/queue/SenderQueueManager.h +++ b/core/pipeline/queue/SenderQueueManager.h @@ -98,6 +98,7 @@ class SenderQueueManager : public FeedbackInterface { #ifdef APSARA_UNIT_TEST_MAIN friend class SenderQueueManagerUnittest; friend class FlusherRunnerUnittest; + friend class PipelineUpdateUnittest; #endif }; diff --git a/core/plugin/flusher/sls/FlusherSLS.cpp b/core/plugin/flusher/sls/FlusherSLS.cpp index 4a5edeeec1..3ecb22a0be 100644 --- a/core/plugin/flusher/sls/FlusherSLS.cpp +++ b/core/plugin/flusher/sls/FlusherSLS.cpp @@ -810,8 +810,8 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) // the possibility of hash key conflict is very low, so data is // dropped here. cpt->Commit(); - failDetail << ", drop exactly once log group and commit checkpoint" - << " checkpointKey:" << cpt->key << " checkpoint:" << cpt->data.DebugString(); + failDetail << ", drop exactly once log group and commit checkpoint" << " checkpointKey:" << cpt->key + << " checkpoint:" << cpt->data.DebugString(); suggestion << "no suggestion"; AlarmManager::GetInstance()->SendAlarm( EXACTLY_ONCE_ALARM, diff --git a/core/runner/FlusherRunner.cpp b/core/runner/FlusherRunner.cpp index 3b77dc17f5..4a0ea254dc 100644 --- a/core/runner/FlusherRunner.cpp +++ b/core/runner/FlusherRunner.cpp @@ -26,7 +26,6 @@ #include "pipeline/queue/SenderQueueItem.h" #include "pipeline/queue/SenderQueueManager.h" #include "plugin/flusher/sls/DiskBufferWriter.h" -#include "runner/sink/http/HttpSink.h" // TODO: temporarily used here #include "plugin/flusher/sls/PackIdManager.h" #include "plugin/flusher/sls/SLSClientManager.h" @@ -59,6 +58,7 @@ bool FlusherRunner::Init() { mThreadRes = async(launch::async, &FlusherRunner::Run, this); mLastCheckSendClientTime = time(nullptr); + mIsFlush = false; return true; } @@ -139,12 +139,12 @@ void FlusherRunner::PushToHttpSink(SenderQueueItem* item, bool withLimit) { } req->mEnqueTime = item->mLastSendTime = chrono::system_clock::now(); - HttpSink::GetInstance()->AddRequest(std::move(req)); - ++mHttpSendingCnt; LOG_DEBUG(sLogger, ("send item to http sink, item address", item)("config-flusher-dst", QueueKeyManager::GetInstance()->GetName(item->mQueueKey))( - "sending cnt", ToString(mHttpSendingCnt.load()))); + "sending cnt", ToString(mHttpSendingCnt.load() + 1))); + HttpSink::GetInstance()->AddRequest(std::move(req)); + ++mHttpSendingCnt; } void FlusherRunner::Run() { @@ -195,7 +195,6 @@ void FlusherRunner::Run() { PackIdManager::GetInstance()->CleanTimeoutEntry(); mLastCheckSendClientTime = time(NULL); } - if (mIsFlush && SenderQueueManager::GetInstance()->IsAllQueueEmpty()) { break; } diff --git a/core/runner/FlusherRunner.h b/core/runner/FlusherRunner.h index e23856aed9..3390a021b6 100644 --- a/core/runner/FlusherRunner.h +++ b/core/runner/FlusherRunner.h @@ -24,6 +24,7 @@ #include "pipeline/plugin/interface/Flusher.h" #include "pipeline/queue/SenderQueueItem.h" #include "runner/sink/SinkType.h" +#include "runner/sink/http/HttpSink.h" namespace logtail { @@ -83,6 +84,7 @@ class FlusherRunner { friend class PluginRegistryUnittest; friend class FlusherRunnerUnittest; friend class InstanceConfigManagerUnittest; + friend class PipelineUpdateUnittest; #endif }; diff --git a/core/runner/ProcessorRunner.cpp b/core/runner/ProcessorRunner.cpp index 76c34afe28..ab88c517c6 100644 --- a/core/runner/ProcessorRunner.cpp +++ b/core/runner/ProcessorRunner.cpp @@ -22,7 +22,6 @@ #include "monitor/AlarmManager.h" #include "monitor/metric_constants/MetricConstants.h" #include "pipeline/PipelineManager.h" -#include "queue/ExactlyOnceQueueManager.h" #include "queue/ProcessQueueManager.h" #include "queue/QueueKeyManager.h" @@ -49,6 +48,7 @@ void ProcessorRunner::Init() { for (uint32_t threadNo = 0; threadNo < mThreadCount; ++threadNo) { mThreadRes[threadNo] = async(launch::async, &ProcessorRunner::Run, this, threadNo); } + mIsFlush = false; } void ProcessorRunner::Stop() { @@ -142,7 +142,7 @@ void ProcessorRunner::Run(uint32_t threadNo) { pipeline->Process(eventGroupList, item->mInputIndex); // if the pipeline is updated, the pointer will be released, so we need to update it to the new pipeline if (hasOldPipeline) { - pipeline = PipelineManager::GetInstance()->FindConfigByName(configName); + pipeline = PipelineManager::GetInstance()->FindConfigByName(configName); // update to new pipeline if (!pipeline) { LOG_INFO(sLogger, ("pipeline not found during processing, perhaps due to config deletion", diff --git a/core/runner/sink/Sink.h b/core/runner/sink/Sink.h index b8df22a28d..59992dc6c4 100644 --- a/core/runner/sink/Sink.h +++ b/core/runner/sink/Sink.h @@ -27,7 +27,7 @@ class Sink { public: virtual bool Init() = 0; virtual void Stop() = 0; - + bool AddRequest(std::unique_ptr&& request) { mQueue.Push(std::move(request)); return true; diff --git a/core/runner/sink/http/HttpSink.cpp b/core/runner/sink/http/HttpSink.cpp index e9951d4237..0213edd166 100644 --- a/core/runner/sink/http/HttpSink.cpp +++ b/core/runner/sink/http/HttpSink.cpp @@ -24,6 +24,9 @@ #include "pipeline/queue/QueueKeyManager.h" #include "pipeline/queue/SenderQueueItem.h" #include "runner/FlusherRunner.h" +#ifdef APSARA_UNIT_TEST_MAIN +#include "unittest/pipeline/HttpSinkMock.h" +#endif DEFINE_FLAG_INT32(http_sink_exit_timeout_secs, "", 5); @@ -31,6 +34,15 @@ using namespace std; namespace logtail { +HttpSink* HttpSink::GetInstance() { +#ifndef APSARA_UNIT_TEST_MAIN + static HttpSink instance; + return &instance; +#else + return HttpSinkMock::GetInstance(); +#endif +} + bool HttpSink::Init() { mClient = curl_multi_init(); if (mClient == nullptr) { diff --git a/core/runner/sink/http/HttpSink.h b/core/runner/sink/http/HttpSink.h index ad788adee8..a6ce67d882 100644 --- a/core/runner/sink/http/HttpSink.h +++ b/core/runner/sink/http/HttpSink.h @@ -23,10 +23,9 @@ #include #include +#include "monitor/MetricManager.h" #include "runner/sink/Sink.h" #include "runner/sink/http/HttpSinkRequest.h" -#include "monitor/MetricManager.h" - namespace logtail { class HttpSink : public Sink { @@ -34,10 +33,7 @@ class HttpSink : public Sink { HttpSink(const HttpSink&) = delete; HttpSink& operator=(const HttpSink&) = delete; - static HttpSink* GetInstance() { - static HttpSink instance; - return &instance; - } + static HttpSink* GetInstance(); bool Init() override; void Stop() override; @@ -68,6 +64,7 @@ class HttpSink : public Sink { #ifdef APSARA_UNIT_TEST_MAIN friend class FlusherRunnerUnittest; + friend class HttpSinkMock; #endif }; diff --git a/core/unittest/config/PipelineManagerMock.h b/core/unittest/config/PipelineManagerMock.h index 4a27a50802..8bcd8cb6ce 100644 --- a/core/unittest/config/PipelineManagerMock.h +++ b/core/unittest/config/PipelineManagerMock.h @@ -31,9 +31,6 @@ class PipelineMock : public Pipeline { mContext.SetCreateTime(config.mCreateTime); return (*mConfig)["valid"].asBool(); } - - bool Start() { return true; } - void Stop(bool isRemoving) {} }; class PipelineManagerMock : public PipelineManager { @@ -44,6 +41,9 @@ class PipelineManagerMock : public PipelineManager { } void ClearEnvironment() { + for (auto& it : mPipelineNameEntityMap) { + it.second->Stop(true); + } mPipelineNameEntityMap.clear(); mPluginCntMap.clear(); } diff --git a/core/unittest/pipeline/HttpSinkMock.h b/core/unittest/pipeline/HttpSinkMock.h new file mode 100644 index 0000000000..3016f56e1d --- /dev/null +++ b/core/unittest/pipeline/HttpSinkMock.h @@ -0,0 +1,99 @@ +/* + * Copyright 2024 iLogtail Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "logger/Logger.h" +#include "pipeline/plugin/interface/HttpFlusher.h" +#include "pipeline/queue/SLSSenderQueueItem.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "runner/FlusherRunner.h" +#include "runner/sink/http/HttpSink.h" +#include "sdk/Common.h" + +namespace logtail { +class HttpSinkMock : public HttpSink { +public: + HttpSinkMock(const HttpSinkMock&) = delete; + HttpSinkMock& operator=(const HttpSinkMock&) = delete; + + static HttpSinkMock* GetInstance() { + static HttpSinkMock instance; + return &instance; + } + + bool Init() override { + mThreadRes = async(std::launch::async, &HttpSinkMock::Run, this); + mIsFlush = false; + return true; + } + + void Stop() override { + mIsFlush = true; + if (!mThreadRes.valid()) { + return; + } + std::future_status s = mThreadRes.wait_for(std::chrono::seconds(1)); + if (s == std::future_status::ready) { + LOG_INFO(sLogger, ("http sink mock", "stopped successfully")); + } else { + LOG_WARNING(sLogger, ("http sink mock", "forced to stopped")); + } + ClearRequests(); + } + + void Run() { + LOG_INFO(sLogger, ("http sink mock", "started")); + while (true) { + std::unique_ptr request; + if (mQueue.WaitAndPop(request, 500)) { + { + std::lock_guard lock(mMutex); + mRequests.push_back(*(request->mItem)); + } + request->mResponse.SetStatusCode(200); + request->mResponse.mHeader[sdk::X_LOG_REQUEST_ID] = "request_id"; + static_cast(request->mItem->mFlusher)->OnSendDone(request->mResponse, request->mItem); + FlusherRunner::GetInstance()->DecreaseHttpSendingCnt(); + request.reset(); + } else if (mIsFlush && mQueue.Empty()) { + break; + } else { + continue; + } + } + } + + std::vector& GetRequests() { + std::lock_guard lock(mMutex); + return mRequests; + } + + void ClearRequests() { + std::lock_guard lock(mMutex); + mRequests.clear(); + } + +private: + HttpSinkMock() = default; + ~HttpSinkMock() = default; + + std::atomic_bool mIsFlush = false; + mutable std::mutex mMutex; + std::vector mRequests; +}; + +} // namespace logtail \ No newline at end of file diff --git a/core/unittest/pipeline/LogtailPluginMock.h b/core/unittest/pipeline/LogtailPluginMock.h new file mode 100644 index 0000000000..6a87704099 --- /dev/null +++ b/core/unittest/pipeline/LogtailPluginMock.h @@ -0,0 +1,93 @@ +/* + * Copyright 2024 iLogtail Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "go_pipeline/LogtailPlugin.h" + +namespace logtail { +class LogtailPluginMock : public LogtailPlugin { +public: + static LogtailPluginMock* GetInstance() { + static LogtailPluginMock instance; + return &instance; + } + + void BlockStart() { startBlockFlag = true; } + void UnblockStart() { startBlockFlag = false; } + void BlockProcess() { processBlockFlag = true; } + void UnblockProcess() { processBlockFlag = false; } + void BlockStop() { stopBlockFlag = true; } + void UnblockStop() { stopBlockFlag = false; } + + bool LoadPipeline(const std::string& pipelineName, + const std::string& pipeline, + const std::string& project, + const std::string& logstore, + const std::string& region, + logtail::QueueKey logstoreKey) { + return true; + } + + bool UnloadPipeline(const std::string& pipelineName) { return true; } + + void StopAllPipelines(bool withInputFlag) {} + + void Start(const std::string& configName) { + while (startBlockFlag) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + startFlag = true; + LOG_INFO(sLogger, ("LogtailPluginMock start", "success")("config", configName)); + } + + void Stop(const std::string& configName, bool removingFlag) { + while (stopBlockFlag) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + startFlag = false; + LOG_INFO(sLogger, ("LogtailPluginMock stop", "success")("config", configName)); + } + + + void ProcessLogGroup(const std::string& configName, const std::string& logGroup, const std::string& packId) { + while (processBlockFlag) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + LogtailPlugin::SendPbV2(configName.c_str(), + configName.size(), + "", + 0, + const_cast(logGroup.c_str()), + logGroup.size(), + 0, + "", + 0); + LOG_INFO(sLogger, + ("LogtailPluginMock process log group", "success")("config", configName)("logGroup", + logGroup)("packId", packId)); + } + + bool IsStarted() const { return startFlag; } + +private: + std::atomic_bool startBlockFlag = false; + std::atomic_bool processBlockFlag = false; + std::atomic_bool stopBlockFlag = false; + std::atomic_bool startFlag = false; +}; + +} // namespace logtail diff --git a/core/unittest/pipeline/PipelineUnittest.cpp b/core/unittest/pipeline/PipelineUnittest.cpp index 0f9b273a37..614e6dd415 100644 --- a/core/unittest/pipeline/PipelineUnittest.cpp +++ b/core/unittest/pipeline/PipelineUnittest.cpp @@ -2702,7 +2702,8 @@ void PipelineUnittest::TestProcess() const { processor->Init(Json::Value(), ctx); pipeline.mProcessorLine.emplace_back(std::move(processor)); - WriteMetrics::GetInstance()->PrepareMetricsRecordRef(pipeline.mMetricsRecordRef, MetricCategory::METRIC_CATEGORY_UNKNOWN, {}); + WriteMetrics::GetInstance()->PrepareMetricsRecordRef( + pipeline.mMetricsRecordRef, MetricCategory::METRIC_CATEGORY_UNKNOWN, {}); pipeline.mProcessorsInEventsTotal = pipeline.mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_PROCESSORS_IN_EVENTS_TOTAL); pipeline.mProcessorsInGroupsTotal @@ -2750,7 +2751,8 @@ void PipelineUnittest::TestSend() const { configs.emplace_back(1, nullptr); pipeline.mRouter.Init(configs, ctx); - WriteMetrics::GetInstance()->PrepareMetricsRecordRef(pipeline.mMetricsRecordRef, MetricCategory::METRIC_CATEGORY_UNKNOWN, {}); + WriteMetrics::GetInstance()->PrepareMetricsRecordRef( + pipeline.mMetricsRecordRef, MetricCategory::METRIC_CATEGORY_UNKNOWN, {}); pipeline.mFlushersInGroupsTotal = pipeline.mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_FLUSHERS_IN_EVENT_GROUPS_TOTAL); pipeline.mFlushersInEventsTotal @@ -2816,7 +2818,8 @@ void PipelineUnittest::TestSend() const { configs.emplace_back(configJson.size(), nullptr); pipeline.mRouter.Init(configs, ctx); - WriteMetrics::GetInstance()->PrepareMetricsRecordRef(pipeline.mMetricsRecordRef, MetricCategory::METRIC_CATEGORY_UNKNOWN, {}); + WriteMetrics::GetInstance()->PrepareMetricsRecordRef( + pipeline.mMetricsRecordRef, MetricCategory::METRIC_CATEGORY_UNKNOWN, {}); pipeline.mFlushersInGroupsTotal = pipeline.mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_FLUSHERS_IN_EVENT_GROUPS_TOTAL); pipeline.mFlushersInEventsTotal @@ -2893,15 +2896,19 @@ void PipelineUnittest::TestInProcessingCount() const { vector group; group.emplace_back(make_shared()); + auto pipeline2 = make_shared(); + PipelineManager::GetInstance()->mPipelineNameEntityMap[""] = pipeline2; processQueue->EnablePop(); processQueue->Push(GenerateProcessItem(pipeline)); APSARA_TEST_EQUAL(0, pipeline->mInProcessCnt.load()); + APSARA_TEST_EQUAL(0, pipeline2->mInProcessCnt.load()); unique_ptr item; APSARA_TEST_TRUE(processQueue->Pop(item)); - APSARA_TEST_EQUAL(1, pipeline->mInProcessCnt.load()); - - pipeline->SubInProcessCnt(); APSARA_TEST_EQUAL(0, pipeline->mInProcessCnt.load()); + APSARA_TEST_EQUAL(1, pipeline2->mInProcessCnt.load()); + + pipeline2->SubInProcessCnt(); + APSARA_TEST_EQUAL(0, pipeline2->mInProcessCnt.load()); } void PipelineUnittest::TestWaitAllItemsInProcessFinished() const { diff --git a/core/unittest/pipeline/PipelineUpdateUnittest.cpp b/core/unittest/pipeline/PipelineUpdateUnittest.cpp index 26c2ef1702..6e5a4be888 100644 --- a/core/unittest/pipeline/PipelineUpdateUnittest.cpp +++ b/core/unittest/pipeline/PipelineUpdateUnittest.cpp @@ -12,39 +12,157 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include #include #include #include #include "common/JsonUtil.h" #include "config/PipelineConfig.h" -#include "file_server/FileServer.h" +#include "file_server/EventDispatcher.h" #include "file_server/event_handler/LogInput.h" #include "pipeline/plugin/PluginRegistry.h" +#include "pipeline/queue/BoundedProcessQueue.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "pipeline/queue/SLSSenderQueueItem.h" +#include "pipeline/queue/SenderQueueManager.h" +#include "runner/FlusherRunner.h" +#include "runner/ProcessorRunner.h" #include "unittest/Unittest.h" #include "unittest/config/PipelineManagerMock.h" +#include "unittest/pipeline/HttpSinkMock.h" +#include "unittest/pipeline/LogtailPluginMock.h" +#include "unittest/plugin/PluginMock.h" using namespace std; namespace logtail { +class InputFileMock : public InputMock { +public: + static const std::string sName; +}; + +const std::string InputFileMock::sName = "input_file_mock"; + +class InputFileMock2 : public InputMock { +public: + static const std::string sName; +}; + +const std::string InputFileMock2::sName = "input_file_mock2"; + +class ProcessorMock2 : public ProcessorMock { +public: + static const std::string sName; +}; + +const std::string ProcessorMock2::sName = "processor_mock2"; + +class FlusherSLSMock : public FlusherSLS { +public: + static const std::string sName; + + bool BuildRequest(SenderQueueItem* item, std::unique_ptr& req, bool* keepItem) const override { + auto data = static_cast(item); + std::map header; + req = std::make_unique( + "POST", false, "test-host", 80, "/test-operation", "", header, data->mData, item); + return true; + } +}; + +const std::string FlusherSLSMock::sName = "flusher_sls_mock"; + +class FlusherSLSMock2 : public FlusherSLSMock { +public: + static const std::string sName; +}; + +const std::string FlusherSLSMock2::sName = "flusher_sls_mock2"; + class PipelineUpdateUnittest : public testing::Test { public: - void TestFileServerStart() const; + void TestFileServerStart(); + void TestPipelineParamUpdateCase1() const; + void TestPipelineParamUpdateCase2() const; + void TestPipelineParamUpdateCase3() const; + void TestPipelineParamUpdateCase4() const; + void TestPipelineTypeUpdateCase1() const; + void TestPipelineTypeUpdateCase2() const; + void TestPipelineTypeUpdateCase3() const; + void TestPipelineTypeUpdateCase4() const; + void TestPipelineTopoUpdateCase1() const; + void TestPipelineTopoUpdateCase2() const; + void TestPipelineTopoUpdateCase3() const; + void TestPipelineTopoUpdateCase4() const; + void TestPipelineTopoUpdateCase5() const; + void TestPipelineTopoUpdateCase6() const; + void TestPipelineTopoUpdateCase7() const; + void TestPipelineTopoUpdateCase8() const; + void TestPipelineTopoUpdateCase9() const; + void TestPipelineTopoUpdateCase10() const; + void TestPipelineTopoUpdateCase11() const; + void TestPipelineTopoUpdateCase12() const; + void TestPipelineInputBlock() const; + void TestPipelineGoInputBlockCase1() const; + void TestPipelineGoInputBlockCase2() const; + void TestPipelineIsolationCase1() const; + void TestPipelineIsolationCase2() const; + void TestPipelineUpdateManyCase1() const; + void TestPipelineUpdateManyCase2() const; + void TestPipelineUpdateManyCase3() const; + void TestPipelineUpdateManyCase4() const; + void TestPipelineUpdateManyCase5() const; + void TestPipelineUpdateManyCase6() const; + void TestPipelineUpdateManyCase7() const; + void TestPipelineUpdateManyCase8() const; + void TestPipelineUpdateManyCase9() const; + void TestPipelineUpdateManyCase10() const; protected: - static void SetUpTestCase() { PluginRegistry::GetInstance()->LoadPlugins(); } + static void SetUpTestCase() { + PluginRegistry::GetInstance()->LoadPlugins(); + LoadPluginMock(); + PluginRegistry::GetInstance()->RegisterInputCreator(new StaticInputCreator()); + PluginRegistry::GetInstance()->RegisterInputCreator(new StaticInputCreator()); + PluginRegistry::GetInstance()->RegisterProcessorCreator(new StaticProcessorCreator()); + PluginRegistry::GetInstance()->RegisterFlusherCreator(new StaticFlusherCreator()); + PluginRegistry::GetInstance()->RegisterFlusherCreator(new StaticFlusherCreator()); - static void TearDownTestCase() { - PluginRegistry::GetInstance()->UnloadPlugins(); - FileServer::GetInstance()->Stop(); + FlusherRunner::GetInstance()->mEnableRateLimiter = false; +#ifdef __ENTERPRISE__ + builtinPipelineCnt = EnterpriseConfigProvider::GetInstance()->GetAllBuiltInPipelineConfigs().size(); +#endif + SenderQueueManager::GetInstance()->mDefaultQueueParam.mCapacity = 1; // test extra buffer + ProcessQueueManager::GetInstance()->mBoundedQueueParam.mCapacity = 100; + FLAGS_sls_client_send_compress = false; + AppConfig::GetInstance()->mSendRequestConcurrency = 100; + AppConfig::GetInstance()->mSendRequestGlobalConcurrency = 200; } - void SetUp() override {} + static void TearDownTestCase() { PluginRegistry::GetInstance()->UnloadPlugins(); } + + void SetUp() override { + LogInput::GetInstance()->CleanEnviroments(); + ProcessorRunner::GetInstance()->Init(); + isFileServerStart = false; // file server stop is not reentrant, so we stop it only when start it + } - void TearDown() override {} + void TearDown() override { + LogInput::GetInstance()->CleanEnviroments(); + EventDispatcher::GetInstance()->CleanEnviroments(); + for (auto& pipeline : PipelineManager::GetInstance()->GetAllPipelines()) { + pipeline.second->Stop(true); + } + PipelineManager::GetInstance()->mPipelineNameEntityMap.clear(); + if (isFileServerStart) { + FileServer::GetInstance()->Stop(); + } + ProcessorRunner::GetInstance()->Stop(); + FlusherRunner::GetInstance()->Stop(); + HttpSink::GetInstance()->Stop(); + } private: Json::Value GeneratePipelineConfigJson(const string& inputConfig, @@ -66,43 +184,210 @@ class PipelineUpdateUnittest : public testing::Test { errorMsg); return json; } + + void AddDataToProcessQueue(const string& configName, const string& data) const { + auto key = QueueKeyManager::GetInstance()->GetKey(configName); + PipelineEventGroup g(std::make_shared()); + auto event = g.AddLogEvent(); + event->SetContent("content", data); + std::unique_ptr item = std::make_unique(std::move(g), 0); + { + auto manager = ProcessQueueManager::GetInstance(); + manager->CreateOrUpdateBoundedQueue(key, 0, PipelineContext{}); + lock_guard lock(manager->mQueueMux); + auto iter = manager->mQueues.find(key); + APSARA_TEST_NOT_EQUAL(iter, manager->mQueues.end()); + static_cast((*iter->second.first).get())->mValidToPush = true; + APSARA_TEST_TRUE_FATAL((*iter->second.first)->Push(std::move(item))); + } + }; + + void AddDataToProcessor(const string& configName, const string& data) const { + auto key = QueueKeyManager::GetInstance()->GetKey(configName); + PipelineEventGroup g(std::make_shared()); + auto event = g.AddLogEvent(); + event->SetContent("content", data); + ProcessorRunner::GetInstance()->PushQueue(key, 0, std::move(g)); + } + + void AddDataToSenderQueue(const string& configName, string&& data, Flusher* flusher) const { + auto key = flusher->mQueueKey; + std::unique_ptr item = std::make_unique( + std::move(data), data.size(), flusher, key, "", RawDataType::EVENT_GROUP); + { + auto manager = SenderQueueManager::GetInstance(); + manager->CreateQueue(key, "", PipelineContext{}); + lock_guard lock(manager->mQueueMux); + auto iter = manager->mQueues.find(key); + APSARA_TEST_NOT_EQUAL(iter, manager->mQueues.end()); + APSARA_TEST_TRUE_FATAL(iter->second.Push(std::move(item))); + } + } + + void BlockProcessor(std::string configName) const { + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto processor + = static_cast(const_cast(pipeline->mProcessorLine[0].get()->mPlugin.get())); + processor->Block(); + } + + void UnBlockProcessor(std::string configName) const { + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto processor + = static_cast(const_cast(pipeline->mProcessorLine[0].get()->mPlugin.get())); + processor->Unblock(); + } + + void VerifyData(std::string logstore, size_t from, size_t to) const { + size_t i = from; + size_t j = 0; + size_t retryTimes = 15; + for (size_t retry = 0; retry < retryTimes; ++retry) { + auto requests = HttpSinkMock::GetInstance()->GetRequests(); + i = from; + j = 0; + while ((i < to + 1) && j < requests.size()) { + auto content = requests[j].mData; + auto actualLogstore = static_cast(requests[j].mFlusher)->mLogstore; + if (actualLogstore != logstore) { + ++j; + continue; + } + if (content.find("test-data-" + to_string(i)) != string::npos) { + ++i; + continue; + } + ++j; + } + if (i == to + 1 || retry == retryTimes - 1) { + APSARA_TEST_EQUAL_FATAL(to + 1, i); + return; + } + this_thread::sleep_for(chrono::milliseconds(1000)); + } + } + + string nativeInputFileConfig = R"( + { + "Type": "input_file", + "FilePaths": [ + "/tmp/not_found.log" + ] + })"; string nativeInputConfig = R"( { - "Type": "input_file" + "Type": "input_file_mock", + "FilePaths": [ + "/tmp/not_found.log" + ] + })"; + string nativeInputConfig2 = R"( + { + "Type": "input_file_mock", + "FilePaths": [ + "/tmp/*.log" + ] + })"; + string nativeInputConfig3 = R"( + { + "Type": "input_file_mock2", + "FilePaths": [ + "/tmp/not_found.log" + ] })"; string nativeProcessorConfig = R"( { - "Type": "processor_parse_regex_native" + "Type": "processor_mock" + })"; + string nativeProcessorConfig2 = R"( + { + "Type": "processor_mock", + "Regex": ".*" + })"; + string nativeProcessorConfig3 = R"( + { + "Type": "processor_mock2" })"; string nativeFlusherConfig = R"( { - "Type": "flusher_sls" + "Type": "flusher_sls_mock", + "Project": "test_project", + "Logstore": "test_logstore_1", + "Region": "test_region", + "Endpoint": "test_endpoint" + })"; + string nativeFlusherConfig2 = R"( + { + "Type": "flusher_sls_mock", + "Project": "test_project", + "Logstore": "test_logstore_2", + "Region": "test_region", + "Endpoint": "test_endpoint" + })"; + string nativeFlusherConfig3 = R"( + { + "Type": "flusher_sls_mock2", + "Project": "test_project", + "Logstore": "test_logstore_3", + "Region": "test_region", + "Endpoint": "test_endpoint" })"; string goInputConfig = R"( { - "Type": "input_docker_stdout" + "Type": "service_docker_stdout_v2" + })"; + string goInputConfig2 = R"( + { + "Type": "service_docker_stdout_v2", + "Stdout": true + })"; + string goInputConfig3 = R"( + { + "Type": "service_docker_stdout_v3" })"; string goProcessorConfig = R"( { "Type": "processor_regex" })"; + string goProcessorConfig2 = R"( + { + "Type": "processor_regex", + "Regex": ".*" + })"; + string goProcessorConfig3 = R"( + { + "Type": "processor_regex2" + })"; string goFlusherConfig = R"( { "Type": "flusher_stdout" })"; + string goFlusherConfig2 = R"( + { + "Type": "flusher_stdout", + "Stdout": true + })"; + string goFlusherConfig3 = R"( + { + "Type": "flusher_stdout2" + })"; + + size_t builtinPipelineCnt = 0; + bool isFileServerStart = false; }; -void PipelineUpdateUnittest::TestFileServerStart() const { +void PipelineUpdateUnittest::TestFileServerStart() { + isFileServerStart = true; Json::Value nativePipelineConfigJson - = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + = GeneratePipelineConfigJson(nativeInputFileConfig, nativeProcessorConfig, nativeFlusherConfig); Json::Value goPipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, goFlusherConfig); auto pipelineManager = PipelineManagerMock::GetInstance(); PipelineConfigDiff diff; PipelineConfig nativePipelineConfigObj - = PipelineConfig("test1", make_unique(nativePipelineConfigJson)); + = PipelineConfig("test-file-1", make_unique(nativePipelineConfigJson)); nativePipelineConfigObj.Parse(); diff.mAdded.push_back(std::move(nativePipelineConfigObj)); - PipelineConfig goPipelineConfigObj = PipelineConfig("test2", make_unique(goPipelineConfigJson)); + PipelineConfig goPipelineConfigObj = PipelineConfig("test-file-2", make_unique(goPipelineConfigJson)); goPipelineConfigObj.Parse(); diff.mAdded.push_back(std::move(goPipelineConfigObj)); @@ -111,7 +396,1991 @@ void PipelineUpdateUnittest::TestFileServerStart() const { APSARA_TEST_EQUAL_FATAL(false, LogInput::GetInstance()->mInteruptFlag); } +void PipelineUpdateUnittest::TestPipelineParamUpdateCase1() const { + // C++ -> C++ -> C++ + const std::string configName = "test1"; + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + auto processor + = static_cast(const_cast(pipeline->mProcessorLine[0].get()->mPlugin.get())); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + AddDataToProcessor(configName, "test-data-4"); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(nativeInputConfig2, nativeProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + processor->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessor(configName, "test-data-8"); + AddDataToProcessor(configName, "test-data-9"); + AddDataToProcessor(configName, "test-data-10"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 4); + VerifyData("test_logstore_2", 5, 10); +} + +void PipelineUpdateUnittest::TestPipelineParamUpdateCase2() const { + // Go -> Go -> Go + const std::string configName = "test2"; + // load old pipeline + Json::Value pipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, goFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(goInputConfig2, goProcessorConfig2, goFlusherConfig2); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + pipelineManager->UpdatePipelines(diffUpdate); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); +} + +void PipelineUpdateUnittest::TestPipelineParamUpdateCase3() const { + // Go -> Go -> C++ + const std::string configName = "test3"; + // load old pipeline + Json::Value pipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(goInputConfig2, goProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + pipelineManager->UpdatePipelines(diffUpdate); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + flusher = const_cast( + PipelineManager::GetInstance()->GetAllPipelines().at(configName).get()->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-4", flusher); + AddDataToSenderQueue(configName, "test-data-5", flusher); + AddDataToSenderQueue(configName, "test-data-6", flusher); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 3); + VerifyData("test_logstore_2", 4, 6); +} + +void PipelineUpdateUnittest::TestPipelineParamUpdateCase4() const { + // C++ -> Go -> C++ + const std::string configName = "test4"; + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, goProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + LogtailPluginMock::GetInstance()->BlockProcess(); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + AddDataToProcessor(configName, "test-data-4"); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(nativeInputConfig2, goProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + LogtailPluginMock::GetInstance()->UnblockProcess(); + }); + pipelineManager->UpdatePipelines(diffUpdate); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + AddDataToProcessor(configName, "test-data-8"); + AddDataToProcessor(configName, "test-data-9"); + AddDataToProcessor(configName, "test-data-10"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 4); + VerifyData("test_logstore_2", 5, 10); +} + +void PipelineUpdateUnittest::TestPipelineTypeUpdateCase1() const { + // C++ -> C++ -> C++ + const std::string configName = "test1"; + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + auto processor + = static_cast(const_cast(pipeline->mProcessorLine[0].get()->mPlugin.get())); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + AddDataToProcessor(configName, "test-data-4"); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + processor->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessor(configName, "test-data-8"); + AddDataToProcessor(configName, "test-data-9"); + AddDataToProcessor(configName, "test-data-10"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 4); + VerifyData("test_logstore_3", 5, 10); +} + +void PipelineUpdateUnittest::TestPipelineTypeUpdateCase2() const { + // Go -> Go -> Go + const std::string configName = "test2"; + // load old pipeline + Json::Value pipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, goFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(goInputConfig3, goProcessorConfig3, goFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + pipelineManager->UpdatePipelines(diffUpdate); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); +} + +void PipelineUpdateUnittest::TestPipelineTypeUpdateCase3() const { + // Go -> Go -> C++ + const std::string configName = "test3"; + // load old pipeline + Json::Value pipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(goInputConfig3, goProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + pipelineManager->UpdatePipelines(diffUpdate); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + flusher = const_cast( + PipelineManager::GetInstance()->GetAllPipelines().at(configName).get()->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-4", flusher); + AddDataToSenderQueue(configName, "test-data-5", flusher); + AddDataToSenderQueue(configName, "test-data-6", flusher); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 3); + VerifyData("test_logstore_3", 4, 6); +} + +void PipelineUpdateUnittest::TestPipelineTypeUpdateCase4() const { + // C++ -> Go -> C++ + const std::string configName = "test4"; + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, goProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + LogtailPluginMock::GetInstance()->BlockProcess(); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + AddDataToProcessor(configName, "test-data-4"); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(nativeInputConfig3, goProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + LogtailPluginMock::GetInstance()->UnblockProcess(); + }); + pipelineManager->UpdatePipelines(diffUpdate); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + AddDataToProcessor(configName, "test-data-8"); + AddDataToProcessor(configName, "test-data-9"); + AddDataToProcessor(configName, "test-data-10"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 4); + VerifyData("test_logstore_3", 5, 10); +} + +void PipelineUpdateUnittest::TestPipelineTopoUpdateCase1() const { + // C++ -> C++ -> C++ => Go -> Go -> Go + const std::string configName = "test1"; + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + auto processor + = static_cast(const_cast(pipeline->mProcessorLine[0].get()->mPlugin.get())); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + AddDataToProcessor(configName, "test-data-4"); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, goFlusherConfig); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + processor->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 4); +} + +void PipelineUpdateUnittest::TestPipelineTopoUpdateCase2() const { + // C++ -> C++ -> C++ => Go -> Go -> C++ + const std::string configName = "test2"; + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + auto processor + = static_cast(const_cast(pipeline->mProcessorLine[0].get()->mPlugin.get())); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + AddDataToProcessor(configName, "test-data-4"); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + processor->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + flusher = const_cast( + PipelineManager::GetInstance()->GetAllPipelines().at(configName).get()->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-8", flusher); + AddDataToSenderQueue(configName, "test-data-9", flusher); + AddDataToSenderQueue(configName, "test-data-10", flusher); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 4); + VerifyData("test_logstore_3", 8, 10); +} + +void PipelineUpdateUnittest::TestPipelineTopoUpdateCase3() const { + // C++ -> C++ -> C++ => C++ -> Go -> C++ + const std::string configName = "test3"; + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + auto processor + = static_cast(const_cast(pipeline->mProcessorLine[0].get()->mPlugin.get())); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + AddDataToProcessor(configName, "test-data-4"); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(nativeInputConfig3, goProcessorConfig, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + processor->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessor(configName, "test-data-8"); + AddDataToProcessor(configName, "test-data-9"); + AddDataToProcessor(configName, "test-data-10"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 4); + VerifyData("test_logstore_3", 5, 10); +} + +void PipelineUpdateUnittest::TestPipelineTopoUpdateCase4() const { + // Go -> Go -> Go => C++ -> C++ -> C++ + const std::string configName = "test4"; + // load old pipeline + Json::Value pipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, goFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + pipelineManager->UpdatePipelines(diffUpdate); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(false, LogtailPluginMock::GetInstance()->IsStarted()); + + AddDataToProcessor(configName, "test-data-1"); + AddDataToProcessor(configName, "test-data-2"); + AddDataToProcessor(configName, "test-data-3"); + + UnBlockProcessor(configName); + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_3", 1, 3); +} + +void PipelineUpdateUnittest::TestPipelineTopoUpdateCase5() const { + // Go -> Go -> Go => Go -> Go -> C++ + const std::string configName = "test5"; + // load old pipeline + Json::Value pipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, goFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(goInputConfig3, goProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + pipelineManager->UpdatePipelines(diffUpdate); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + auto flusher = const_cast( + PipelineManager::GetInstance()->GetAllPipelines().at(configName).get()->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_3", 1, 3); +} + +void PipelineUpdateUnittest::TestPipelineTopoUpdateCase6() const { + // Go -> Go -> Go => C++ -> Go -> C++ + const std::string configName = "test6"; + // load old pipeline + Json::Value pipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, goFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(nativeInputConfig3, goProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + pipelineManager->UpdatePipelines(diffUpdate); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + AddDataToProcessor(configName, "test-data-1"); + AddDataToProcessor(configName, "test-data-2"); + AddDataToProcessor(configName, "test-data-3"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_3", 1, 3); +} + +void PipelineUpdateUnittest::TestPipelineTopoUpdateCase7() const { + // Go -> Go -> C++ => C++ -> C++ -> C++ + const std::string configName = "test7"; + // load old pipeline + Json::Value pipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + pipelineManager->UpdatePipelines(diffUpdate); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(false, LogtailPluginMock::GetInstance()->IsStarted()); + + AddDataToProcessor(configName, "test-data-4"); + AddDataToProcessor(configName, "test-data-5"); + AddDataToProcessor(configName, "test-data-6"); + + UnBlockProcessor(configName); + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 3); + VerifyData("test_logstore_3", 4, 6); +} + +void PipelineUpdateUnittest::TestPipelineTopoUpdateCase8() const { + // Go -> Go -> C++ => Go -> Go -> Go + const std::string configName = "test8"; + // load old pipeline + Json::Value pipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(goInputConfig3, goProcessorConfig3, goFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + pipelineManager->UpdatePipelines(diffUpdate); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 3); +} + +void PipelineUpdateUnittest::TestPipelineTopoUpdateCase9() const { + // Go -> Go -> C++ => C++ -> Go -> C++ + const std::string configName = "test9"; + // load old pipeline + Json::Value pipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(nativeInputConfig3, goProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + pipelineManager->UpdatePipelines(diffUpdate); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + AddDataToProcessor(configName, "test-data-4"); + AddDataToProcessor(configName, "test-data-5"); + AddDataToProcessor(configName, "test-data-6"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 3); + VerifyData("test_logstore_3", 4, 6); +} + +void PipelineUpdateUnittest::TestPipelineTopoUpdateCase10() const { + // C++ -> Go -> C++ => C++ -> C++ -> C++ + const std::string configName = "test10"; + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, goProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + LogtailPluginMock::GetInstance()->BlockProcess(); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + AddDataToProcessor(configName, "test-data-4"); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + LogtailPluginMock::GetInstance()->UnblockProcess(); + }); + pipelineManager->UpdatePipelines(diffUpdate); + BlockProcessor(configName); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(false, LogtailPluginMock::GetInstance()->IsStarted()); + + AddDataToProcessor(configName, "test-data-8"); + AddDataToProcessor(configName, "test-data-9"); + AddDataToProcessor(configName, "test-data-10"); + + UnBlockProcessor(configName); + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 4); + VerifyData("test_logstore_3", 5, 10); +} + +void PipelineUpdateUnittest::TestPipelineTopoUpdateCase11() const { + // C++ -> Go -> C++ => Go -> Go -> Go + const std::string configName = "test11"; + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, goProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + LogtailPluginMock::GetInstance()->BlockProcess(); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + AddDataToProcessor(configName, "test-data-4"); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(goInputConfig3, goProcessorConfig3, goFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + LogtailPluginMock::GetInstance()->UnblockProcess(); + }); + pipelineManager->UpdatePipelines(diffUpdate); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 4); +} + +void PipelineUpdateUnittest::TestPipelineTopoUpdateCase12() const { + // C++ -> Go -> C++ => Go -> Go -> C++ + const std::string configName = "test12"; + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, goProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + LogtailPluginMock::GetInstance()->BlockProcess(); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + AddDataToProcessor(configName, "test-data-4"); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(goInputConfig3, goProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + LogtailPluginMock::GetInstance()->UnblockProcess(); + }); + pipelineManager->UpdatePipelines(diffUpdate); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + + flusher = const_cast( + PipelineManager::GetInstance()->GetAllPipelines().at(configName).get()->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-8", flusher); + AddDataToSenderQueue(configName, "test-data-9", flusher); + AddDataToSenderQueue(configName, "test-data-10", flusher); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 4); + VerifyData("test_logstore_3", 8, 10); +} + +void PipelineUpdateUnittest::TestPipelineInputBlock() const { + // C++ -> C++ -> C++ + const std::string configName = "test1"; + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto input = static_cast(const_cast(pipeline->GetInputs()[0].get()->GetPlugin())); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + auto processor + = static_cast(const_cast(pipeline->mProcessorLine[0].get()->mPlugin.get())); + input->Block(); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + AddDataToProcessor(configName, "test-data-4"); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(nativeInputConfig2, nativeProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result1 = async(launch::async, [&]() { + pipelineManager->UpdatePipelines(diffUpdate); + BlockProcessor(configName); + }); + this_thread::sleep_for(chrono::milliseconds(1000)); + APSARA_TEST_NOT_EQUAL_FATAL(future_status::ready, result1.wait_for(chrono::milliseconds(0))); + input->Unblock(); + auto result2 = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + processor->Unblock(); + }); + result1.get(); + result2.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessor(configName, "test-data-8"); + AddDataToProcessor(configName, "test-data-9"); + AddDataToProcessor(configName, "test-data-10"); + + UnBlockProcessor(configName); + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 4); + VerifyData("test_logstore_2", 5, 10); +} + +void PipelineUpdateUnittest::TestPipelineGoInputBlockCase1() const { + // Go -> Go -> C++ => Go -> Go -> C++ + const std::string configName = "test1"; + // load old pipeline + Json::Value pipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + LogtailPluginMock::GetInstance()->BlockStop(); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(goInputConfig3, goProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result = async(launch::async, [&]() { pipelineManager->UpdatePipelines(diffUpdate); }); + this_thread::sleep_for(chrono::milliseconds(1000)); + APSARA_TEST_NOT_EQUAL_FATAL(future_status::ready, result.wait_for(chrono::milliseconds(0))); + LogtailPluginMock::GetInstance()->UnblockStop(); + result.get(); + + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + flusher = const_cast( + PipelineManager::GetInstance()->GetAllPipelines().at(configName).get()->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-4", flusher); + AddDataToSenderQueue(configName, "test-data-5", flusher); + AddDataToSenderQueue(configName, "test-data-6", flusher); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 3); + VerifyData("test_logstore_3", 4, 6); +} + +void PipelineUpdateUnittest::TestPipelineGoInputBlockCase2() const { + // Go -> Go -> C++ => C++ -> Go -> C++ + const std::string configName = "test1"; + // load old pipeline + Json::Value pipelineConfigJson = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(true, LogtailPluginMock::GetInstance()->IsStarted()); + + // Add data without trigger + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + LogtailPluginMock::GetInstance()->BlockStop(); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate + = GeneratePipelineConfigJson(nativeInputConfig3, nativeFlusherConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate; + PipelineConfig pipelineConfigObjUpdate + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate)); + pipelineConfigObjUpdate.Parse(); + diffUpdate.mModified.push_back(std::move(pipelineConfigObjUpdate)); + auto result = async(launch::async, [&]() { pipelineManager->UpdatePipelines(diffUpdate); }); + this_thread::sleep_for(chrono::milliseconds(1000)); + APSARA_TEST_NOT_EQUAL_FATAL(future_status::ready, result.wait_for(chrono::milliseconds(0))); + LogtailPluginMock::GetInstance()->UnblockStop(); + result.get(); + + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + APSARA_TEST_EQUAL_FATAL(false, LogtailPluginMock::GetInstance()->IsStarted()); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + VerifyData("test_logstore_1", 1, 3); +} + +void PipelineUpdateUnittest::TestPipelineIsolationCase1() const { + PipelineConfigDiff diff; + auto pipelineManager = PipelineManager::GetInstance(); + // C++ -> C++ -> C++ + Json::Value pipelineConfigJson1 + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + PipelineConfig pipelineConfigObj1 = PipelineConfig("test1", make_unique(pipelineConfigJson1)); + pipelineConfigObj1.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj1)); + // Go -> Go -> Go + Json::Value pipelineConfigJson2 = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, goFlusherConfig); + PipelineConfig pipelineConfigObj2 = PipelineConfig("test2", make_unique(pipelineConfigJson2)); + pipelineConfigObj2.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj2)); + // Go -> Go -> C++ + Json::Value pipelineConfigJson3 = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, nativeFlusherConfig); + PipelineConfig pipelineConfigObj3 = PipelineConfig("test3", make_unique(pipelineConfigJson3)); + pipelineConfigObj3.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj3)); + // C++ -> Go -> C++ + Json::Value pipelineConfigJson4 + = GeneratePipelineConfigJson(nativeInputConfig, goProcessorConfig, nativeFlusherConfig); + PipelineConfig pipelineConfigObj4 = PipelineConfig("test4", make_unique(pipelineConfigJson4)); + pipelineConfigObj4.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj4)); + + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(4U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + PipelineConfigDiff diffUpdate; + diffUpdate.mRemoved.push_back("test1"); + auto pipeline = pipelineManager->GetAllPipelines().at("test1"); + auto input = static_cast(const_cast(pipeline->GetInputs()[0].get()->GetPlugin())); + input->Block(); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + auto result = async(launch::async, [&]() { pipelineManager->UpdatePipelines(diffUpdate); }); + { // add data to Go -> Go -> C++ + std::string configName = "test3"; + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + VerifyData("test_logstore_1", 1, 3); + } + HttpSinkMock::GetInstance()->ClearRequests(); + { // add data to C++ -> Go -> C++ + std::string configName = "test4"; + AddDataToProcessQueue(configName, "test-data-1"); + AddDataToProcessQueue(configName, "test-data-2"); + AddDataToProcessQueue(configName, "test-data-3"); + VerifyData("test_logstore_1", 1, 3); + } + + input->Unblock(); + result.get(); + APSARA_TEST_EQUAL_FATAL(3U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); +} + +void PipelineUpdateUnittest::TestPipelineIsolationCase2() const { + PipelineConfigDiff diff; + auto pipelineManager = PipelineManager::GetInstance(); + // C++ -> C++ -> C++ + Json::Value pipelineConfigJson1 + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + PipelineConfig pipelineConfigObj1 = PipelineConfig("test1", make_unique(pipelineConfigJson1)); + pipelineConfigObj1.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj1)); + // Go -> Go -> Go + Json::Value pipelineConfigJson2 = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, goFlusherConfig); + PipelineConfig pipelineConfigObj2 = PipelineConfig("test2", make_unique(pipelineConfigJson2)); + pipelineConfigObj2.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj2)); + // Go -> Go -> C++ + Json::Value pipelineConfigJson3 = GeneratePipelineConfigJson(goInputConfig, goProcessorConfig, nativeFlusherConfig); + PipelineConfig pipelineConfigObj3 = PipelineConfig("test3", make_unique(pipelineConfigJson3)); + pipelineConfigObj3.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj3)); + // C++ -> Go -> C++ + Json::Value pipelineConfigJson4 + = GeneratePipelineConfigJson(nativeInputConfig, goProcessorConfig, nativeFlusherConfig); + PipelineConfig pipelineConfigObj4 = PipelineConfig("test4", make_unique(pipelineConfigJson4)); + pipelineConfigObj4.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj4)); + + pipelineManager->UpdatePipelines(diff); + APSARA_TEST_EQUAL_FATAL(4U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + PipelineConfigDiff diffUpdate; + diffUpdate.mRemoved.push_back("test4"); + auto pipeline = pipelineManager->GetAllPipelines().at("test4"); + auto input = static_cast(const_cast(pipeline->GetInputs()[0].get()->GetPlugin())); + input->Block(); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + auto result = async(launch::async, [&]() { pipelineManager->UpdatePipelines(diffUpdate); }); + { // add data to C++ -> C++ -> C++ + std::string configName = "test1"; + AddDataToProcessor(configName, "test-data-1"); + AddDataToProcessor(configName, "test-data-2"); + AddDataToProcessor(configName, "test-data-3"); + VerifyData("test_logstore_1", 1, 3); + } + HttpSinkMock::GetInstance()->ClearRequests(); + { // add data to Go -> Go -> C++ + std::string configName = "test3"; + auto pipeline = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher = const_cast(pipeline->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher); + AddDataToSenderQueue(configName, "test-data-2", flusher); + AddDataToSenderQueue(configName, "test-data-3", flusher); + VerifyData("test_logstore_1", 1, 3); + } + + input->Unblock(); + result.get(); + APSARA_TEST_EQUAL_FATAL(3U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); +} + +void PipelineUpdateUnittest::TestPipelineUpdateManyCase1() const { + // update 3 times + // 1. process queue not empty, send queue not empty + // 2. add data + const std::string configName = "test1"; + ProcessorRunner::GetInstance()->Stop(); + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline1 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher1 = const_cast(pipeline1->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher1); + AddDataToSenderQueue(configName, "test-data-2", flusher1); + AddDataToSenderQueue(configName, "test-data-3", flusher1); + + AddDataToProcessQueue(configName, "test-data-4"); // will be popped to processor + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate2 + = GeneratePipelineConfigJson(nativeInputConfig2, nativeProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate2; + PipelineConfig pipelineConfigObjUpdate2 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate2)); + pipelineConfigObjUpdate2.Parse(); + diffUpdate2.mModified.push_back(std::move(pipelineConfigObjUpdate2)); + pipelineManager->UpdatePipelines(diffUpdate2); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + auto pipeline2 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + AddDataToProcessQueue(configName, "test-data-8"); + AddDataToProcessQueue(configName, "test-data-9"); + AddDataToProcessQueue(configName, "test-data-10"); + + ProcessorRunner::GetInstance()->Init(); + // load new pipeline + Json::Value pipelineConfigJsonUpdate3 + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate3; + PipelineConfig pipelineConfigObjUpdate3 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate3)); + pipelineConfigObjUpdate3.Parse(); + diffUpdate3.mModified.push_back(std::move(pipelineConfigObjUpdate3)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + auto processor1 + = static_cast(const_cast(pipeline1->mProcessorLine[0].get()->mPlugin.get())); + processor1->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate3); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessQueue(configName, "test-data-11"); + AddDataToProcessQueue(configName, "test-data-12"); + AddDataToProcessQueue(configName, "test-data-13"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + auto processor2 + = static_cast(const_cast(pipeline2->mProcessorLine[0].get()->mPlugin.get())); + processor2->Unblock(); + UnBlockProcessor(configName); + VerifyData("test_logstore_1", 1, 3); + VerifyData("test_logstore_2", 4, 4); + VerifyData("test_logstore_3", 5, 13); +} + +void PipelineUpdateUnittest::TestPipelineUpdateManyCase2() const { + // update 3 times + // 1. process queue not empty, send queue not empty + // 2. not add data + const std::string configName = "test1"; + ProcessorRunner::GetInstance()->Stop(); + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline1 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher1 = const_cast(pipeline1->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher1); + AddDataToSenderQueue(configName, "test-data-2", flusher1); + AddDataToSenderQueue(configName, "test-data-3", flusher1); + + AddDataToProcessQueue(configName, "test-data-4"); // will be popped to processor + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate2 + = GeneratePipelineConfigJson(nativeInputConfig2, nativeProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate2; + PipelineConfig pipelineConfigObjUpdate2 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate2)); + pipelineConfigObjUpdate2.Parse(); + diffUpdate2.mModified.push_back(std::move(pipelineConfigObjUpdate2)); + pipelineManager->UpdatePipelines(diffUpdate2); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + auto pipeline2 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + + ProcessorRunner::GetInstance()->Init(); + // load new pipeline + Json::Value pipelineConfigJsonUpdate3 + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate3; + PipelineConfig pipelineConfigObjUpdate3 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate3)); + pipelineConfigObjUpdate3.Parse(); + diffUpdate3.mModified.push_back(std::move(pipelineConfigObjUpdate3)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + auto processor1 + = static_cast(const_cast(pipeline1->mProcessorLine[0].get()->mPlugin.get())); + processor1->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate3); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessQueue(configName, "test-data-8"); + AddDataToProcessQueue(configName, "test-data-9"); + AddDataToProcessQueue(configName, "test-data-10"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + auto processor2 + = static_cast(const_cast(pipeline2->mProcessorLine[0].get()->mPlugin.get())); + processor2->Unblock(); + UnBlockProcessor(configName); + VerifyData("test_logstore_1", 1, 3); + VerifyData("test_logstore_2", 4, 4); + VerifyData("test_logstore_3", 5, 10); +} + +void PipelineUpdateUnittest::TestPipelineUpdateManyCase3() const { + // update 3 times + // 1. process queue empty, send queue not empty + // 2. add data + const std::string configName = "test1"; + ProcessorRunner::GetInstance()->Stop(); + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline1 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher1 = const_cast(pipeline1->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher1); + AddDataToSenderQueue(configName, "test-data-2", flusher1); + AddDataToSenderQueue(configName, "test-data-3", flusher1); + + AddDataToProcessQueue(configName, "test-data-4"); // will be popped to processor + + // load new pipeline + Json::Value pipelineConfigJsonUpdate2 + = GeneratePipelineConfigJson(nativeInputConfig2, nativeProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate2; + PipelineConfig pipelineConfigObjUpdate2 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate2)); + pipelineConfigObjUpdate2.Parse(); + diffUpdate2.mModified.push_back(std::move(pipelineConfigObjUpdate2)); + pipelineManager->UpdatePipelines(diffUpdate2); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + auto pipeline2 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + ProcessorRunner::GetInstance()->Init(); + // load new pipeline + Json::Value pipelineConfigJsonUpdate3 + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate3; + PipelineConfig pipelineConfigObjUpdate3 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate3)); + pipelineConfigObjUpdate3.Parse(); + diffUpdate3.mModified.push_back(std::move(pipelineConfigObjUpdate3)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + auto processor1 + = static_cast(const_cast(pipeline1->mProcessorLine[0].get()->mPlugin.get())); + processor1->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate3); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessQueue(configName, "test-data-8"); + AddDataToProcessQueue(configName, "test-data-9"); + AddDataToProcessQueue(configName, "test-data-10"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + auto processor2 + = static_cast(const_cast(pipeline2->mProcessorLine[0].get()->mPlugin.get())); + processor2->Unblock(); + UnBlockProcessor(configName); + VerifyData("test_logstore_1", 1, 3); + VerifyData("test_logstore_2", 4, 4); + VerifyData("test_logstore_3", 5, 10); +} + +void PipelineUpdateUnittest::TestPipelineUpdateManyCase4() const { + // update 3 times + // 1. process queue empty, send queue not empty + // 2. not add data + const std::string configName = "test1"; + ProcessorRunner::GetInstance()->Stop(); + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline1 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher1 = const_cast(pipeline1->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher1); + AddDataToSenderQueue(configName, "test-data-2", flusher1); + AddDataToSenderQueue(configName, "test-data-3", flusher1); + + AddDataToProcessQueue(configName, "test-data-4"); // will be popped to processor + + // load new pipeline + Json::Value pipelineConfigJsonUpdate2 + = GeneratePipelineConfigJson(nativeInputConfig2, nativeProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate2; + PipelineConfig pipelineConfigObjUpdate2 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate2)); + pipelineConfigObjUpdate2.Parse(); + diffUpdate2.mModified.push_back(std::move(pipelineConfigObjUpdate2)); + pipelineManager->UpdatePipelines(diffUpdate2); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + auto pipeline2 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + + ProcessorRunner::GetInstance()->Init(); + // load new pipeline + Json::Value pipelineConfigJsonUpdate3 + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate3; + PipelineConfig pipelineConfigObjUpdate3 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate3)); + pipelineConfigObjUpdate3.Parse(); + diffUpdate3.mModified.push_back(std::move(pipelineConfigObjUpdate3)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + auto processor1 + = static_cast(const_cast(pipeline1->mProcessorLine[0].get()->mPlugin.get())); + processor1->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate3); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + auto processor2 + = static_cast(const_cast(pipeline2->mProcessorLine[0].get()->mPlugin.get())); + processor2->Unblock(); + UnBlockProcessor(configName); + VerifyData("test_logstore_1", 1, 3); + VerifyData("test_logstore_2", 4, 4); + VerifyData("test_logstore_3", 5, 7); +} + +void PipelineUpdateUnittest::TestPipelineUpdateManyCase5() const { + // update 3 times + // 1. process queue not empty, send queue empty + // 2. add data + const std::string configName = "test1"; + ProcessorRunner::GetInstance()->Stop(); + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline1 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + AddDataToProcessQueue(configName, "test-data-1"); // will be popped to processor + AddDataToProcessQueue(configName, "test-data-2"); + AddDataToProcessQueue(configName, "test-data-3"); + AddDataToProcessQueue(configName, "test-data-4"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate2 + = GeneratePipelineConfigJson(nativeInputConfig2, nativeProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate2; + PipelineConfig pipelineConfigObjUpdate2 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate2)); + pipelineConfigObjUpdate2.Parse(); + diffUpdate2.mModified.push_back(std::move(pipelineConfigObjUpdate2)); + pipelineManager->UpdatePipelines(diffUpdate2); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + auto pipeline2 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + ProcessorRunner::GetInstance()->Init(); + // load new pipeline + Json::Value pipelineConfigJsonUpdate3 + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate3; + PipelineConfig pipelineConfigObjUpdate3 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate3)); + pipelineConfigObjUpdate3.Parse(); + diffUpdate3.mModified.push_back(std::move(pipelineConfigObjUpdate3)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + auto processor1 + = static_cast(const_cast(pipeline1->mProcessorLine[0].get()->mPlugin.get())); + processor1->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate3); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessQueue(configName, "test-data-8"); + AddDataToProcessQueue(configName, "test-data-9"); + AddDataToProcessQueue(configName, "test-data-10"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + auto processor2 + = static_cast(const_cast(pipeline2->mProcessorLine[0].get()->mPlugin.get())); + processor2->Unblock(); + UnBlockProcessor(configName); + VerifyData("test_logstore_2", 1, 1); + VerifyData("test_logstore_3", 2, 10); +} + +void PipelineUpdateUnittest::TestPipelineUpdateManyCase6() const { + // update 3 times + // 1. process queue not empty, send queue empty + // 2. not add data + const std::string configName = "test1"; + ProcessorRunner::GetInstance()->Stop(); + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline1 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + AddDataToProcessQueue(configName, "test-data-1"); // will be popped to processor + AddDataToProcessQueue(configName, "test-data-2"); + AddDataToProcessQueue(configName, "test-data-3"); + AddDataToProcessQueue(configName, "test-data-4"); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate2 + = GeneratePipelineConfigJson(nativeInputConfig2, nativeProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate2; + PipelineConfig pipelineConfigObjUpdate2 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate2)); + pipelineConfigObjUpdate2.Parse(); + diffUpdate2.mModified.push_back(std::move(pipelineConfigObjUpdate2)); + pipelineManager->UpdatePipelines(diffUpdate2); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + auto pipeline2 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + + ProcessorRunner::GetInstance()->Init(); + // load new pipeline + Json::Value pipelineConfigJsonUpdate3 + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate3; + PipelineConfig pipelineConfigObjUpdate3 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate3)); + pipelineConfigObjUpdate3.Parse(); + diffUpdate3.mModified.push_back(std::move(pipelineConfigObjUpdate3)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + auto processor1 + = static_cast(const_cast(pipeline1->mProcessorLine[0].get()->mPlugin.get())); + processor1->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate3); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + auto processor2 + = static_cast(const_cast(pipeline2->mProcessorLine[0].get()->mPlugin.get())); + processor2->Unblock(); + UnBlockProcessor(configName); + VerifyData("test_logstore_2", 1, 1); + VerifyData("test_logstore_3", 2, 7); +} + +void PipelineUpdateUnittest::TestPipelineUpdateManyCase7() const { + // update 3 times + // 1. process queue empty, send queue empty + // 2. add data + const std::string configName = "test1"; + ProcessorRunner::GetInstance()->Stop(); + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline1 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + AddDataToProcessQueue(configName, "test-data-1"); // will be popped to processor + + // load new pipeline + Json::Value pipelineConfigJsonUpdate2 + = GeneratePipelineConfigJson(nativeInputConfig2, nativeProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate2; + PipelineConfig pipelineConfigObjUpdate2 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate2)); + pipelineConfigObjUpdate2.Parse(); + diffUpdate2.mModified.push_back(std::move(pipelineConfigObjUpdate2)); + pipelineManager->UpdatePipelines(diffUpdate2); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + auto pipeline2 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + AddDataToProcessQueue(configName, "test-data-2"); + AddDataToProcessQueue(configName, "test-data-3"); + AddDataToProcessQueue(configName, "test-data-4"); + + ProcessorRunner::GetInstance()->Init(); + // load new pipeline + Json::Value pipelineConfigJsonUpdate3 + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate3; + PipelineConfig pipelineConfigObjUpdate3 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate3)); + pipelineConfigObjUpdate3.Parse(); + diffUpdate3.mModified.push_back(std::move(pipelineConfigObjUpdate3)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + auto processor1 + = static_cast(const_cast(pipeline1->mProcessorLine[0].get()->mPlugin.get())); + processor1->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate3); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + AddDataToProcessQueue(configName, "test-data-7"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + auto processor2 + = static_cast(const_cast(pipeline2->mProcessorLine[0].get()->mPlugin.get())); + processor2->Unblock(); + UnBlockProcessor(configName); + VerifyData("test_logstore_2", 1, 1); + VerifyData("test_logstore_3", 2, 7); +} + +void PipelineUpdateUnittest::TestPipelineUpdateManyCase8() const { + // update 3 times + // 1. process queue empty, send queue empty + // 2. not add data + const std::string configName = "test1"; + ProcessorRunner::GetInstance()->Stop(); + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline1 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + AddDataToProcessQueue(configName, "test-data-1"); // will be popped to processor + + // load new pipeline + Json::Value pipelineConfigJsonUpdate2 + = GeneratePipelineConfigJson(nativeInputConfig2, nativeProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate2; + PipelineConfig pipelineConfigObjUpdate2 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate2)); + pipelineConfigObjUpdate2.Parse(); + diffUpdate2.mModified.push_back(std::move(pipelineConfigObjUpdate2)); + pipelineManager->UpdatePipelines(diffUpdate2); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + auto pipeline2 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + + ProcessorRunner::GetInstance()->Init(); + // load new pipeline + Json::Value pipelineConfigJsonUpdate3 + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate3; + PipelineConfig pipelineConfigObjUpdate3 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate3)); + pipelineConfigObjUpdate3.Parse(); + diffUpdate3.mModified.push_back(std::move(pipelineConfigObjUpdate3)); + auto result = async(launch::async, [&]() { + this_thread::sleep_for(chrono::milliseconds(1000)); + auto processor1 + = static_cast(const_cast(pipeline1->mProcessorLine[0].get()->mPlugin.get())); + processor1->Unblock(); + }); + pipelineManager->UpdatePipelines(diffUpdate3); + result.get(); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessQueue(configName, "test-data-2"); + AddDataToProcessQueue(configName, "test-data-3"); + AddDataToProcessQueue(configName, "test-data-4"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + auto processor2 + = static_cast(const_cast(pipeline2->mProcessorLine[0].get()->mPlugin.get())); + processor2->Unblock(); + UnBlockProcessor(configName); + VerifyData("test_logstore_2", 1, 1); + VerifyData("test_logstore_3", 2, 4); +} + +void PipelineUpdateUnittest::TestPipelineUpdateManyCase9() const { + // update 3 times + // 1. process queue empty, send queue not empty + // 2. add data to send queue + const std::string configName = "test1"; + ProcessorRunner::GetInstance()->Stop(); + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline1 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher1 = const_cast(pipeline1->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher1); + AddDataToSenderQueue(configName, "test-data-2", flusher1); + AddDataToSenderQueue(configName, "test-data-3", flusher1); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate2 + = GeneratePipelineConfigJson(nativeInputConfig2, nativeProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate2; + PipelineConfig pipelineConfigObjUpdate2 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate2)); + pipelineConfigObjUpdate2.Parse(); + diffUpdate2.mModified.push_back(std::move(pipelineConfigObjUpdate2)); + pipelineManager->UpdatePipelines(diffUpdate2); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + auto pipeline2 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher2 = const_cast(pipeline2->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-4", flusher2); + AddDataToSenderQueue(configName, "test-data-5", flusher2); + AddDataToSenderQueue(configName, "test-data-6", flusher2); + + ProcessorRunner::GetInstance()->Init(); + // load new pipeline + Json::Value pipelineConfigJsonUpdate3 + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate3; + PipelineConfig pipelineConfigObjUpdate3 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate3)); + pipelineConfigObjUpdate3.Parse(); + diffUpdate3.mModified.push_back(std::move(pipelineConfigObjUpdate3)); + pipelineManager->UpdatePipelines(diffUpdate3); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessQueue(configName, "test-data-7"); + AddDataToProcessQueue(configName, "test-data-8"); + AddDataToProcessQueue(configName, "test-data-9"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + UnBlockProcessor(configName); + VerifyData("test_logstore_1", 1, 3); + VerifyData("test_logstore_2", 4, 6); + VerifyData("test_logstore_3", 7, 9); +} + +void PipelineUpdateUnittest::TestPipelineUpdateManyCase10() const { + // update 3 times + // 1. process queue empty, send queue not empty + // 2. not add data to send queue + const std::string configName = "test1"; + ProcessorRunner::GetInstance()->Stop(); + // load old pipeline + Json::Value pipelineConfigJson + = GeneratePipelineConfigJson(nativeInputConfig, nativeProcessorConfig, nativeFlusherConfig); + auto pipelineManager = PipelineManager::GetInstance(); + PipelineConfigDiff diff; + PipelineConfig pipelineConfigObj = PipelineConfig(configName, make_unique(pipelineConfigJson)); + pipelineConfigObj.Parse(); + diff.mAdded.push_back(std::move(pipelineConfigObj)); + pipelineManager->UpdatePipelines(diff); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + // Add data without trigger + auto pipeline1 = PipelineManager::GetInstance()->GetAllPipelines().at(configName).get(); + auto flusher1 = const_cast(pipeline1->GetFlushers()[0].get()->GetPlugin()); + AddDataToSenderQueue(configName, "test-data-1", flusher1); + AddDataToSenderQueue(configName, "test-data-2", flusher1); + AddDataToSenderQueue(configName, "test-data-3", flusher1); + + // load new pipeline + Json::Value pipelineConfigJsonUpdate2 + = GeneratePipelineConfigJson(nativeInputConfig2, nativeProcessorConfig2, nativeFlusherConfig2); + PipelineConfigDiff diffUpdate2; + PipelineConfig pipelineConfigObjUpdate2 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate2)); + pipelineConfigObjUpdate2.Parse(); + diffUpdate2.mModified.push_back(std::move(pipelineConfigObjUpdate2)); + pipelineManager->UpdatePipelines(diffUpdate2); + BlockProcessor(configName); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + ProcessorRunner::GetInstance()->Init(); + // load new pipeline + Json::Value pipelineConfigJsonUpdate3 + = GeneratePipelineConfigJson(nativeInputConfig3, nativeProcessorConfig3, nativeFlusherConfig3); + PipelineConfigDiff diffUpdate3; + PipelineConfig pipelineConfigObjUpdate3 + = PipelineConfig(configName, make_unique(pipelineConfigJsonUpdate3)); + pipelineConfigObjUpdate3.Parse(); + diffUpdate3.mModified.push_back(std::move(pipelineConfigObjUpdate3)); + pipelineManager->UpdatePipelines(diffUpdate3); + APSARA_TEST_EQUAL_FATAL(1U + builtinPipelineCnt, pipelineManager->GetAllPipelines().size()); + + AddDataToProcessQueue(configName, "test-data-4"); + AddDataToProcessQueue(configName, "test-data-5"); + AddDataToProcessQueue(configName, "test-data-6"); + + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + UnBlockProcessor(configName); + VerifyData("test_logstore_1", 1, 3); + VerifyData("test_logstore_3", 4, 6); +} + UNIT_TEST_CASE(PipelineUpdateUnittest, TestFileServerStart) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineParamUpdateCase1) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineParamUpdateCase2) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineParamUpdateCase3) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineParamUpdateCase4) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTypeUpdateCase1) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTypeUpdateCase2) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTypeUpdateCase3) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTypeUpdateCase4) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTopoUpdateCase1) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTopoUpdateCase2) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTopoUpdateCase3) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTopoUpdateCase4) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTopoUpdateCase5) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTopoUpdateCase6) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTopoUpdateCase7) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTopoUpdateCase8) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTopoUpdateCase9) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTopoUpdateCase10) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTopoUpdateCase11) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineTopoUpdateCase12) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineInputBlock) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineGoInputBlockCase1) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineGoInputBlockCase2) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineIsolationCase1) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineIsolationCase2) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineUpdateManyCase1) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineUpdateManyCase2) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineUpdateManyCase3) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineUpdateManyCase4) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineUpdateManyCase5) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineUpdateManyCase6) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineUpdateManyCase7) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineUpdateManyCase8) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineUpdateManyCase9) +UNIT_TEST_CASE(PipelineUpdateUnittest, TestPipelineUpdateManyCase10) } // namespace logtail diff --git a/core/unittest/plugin/PluginMock.h b/core/unittest/plugin/PluginMock.h index 14c95c8ce3..154d1930be 100644 --- a/core/unittest/plugin/PluginMock.h +++ b/core/unittest/plugin/PluginMock.h @@ -27,7 +27,9 @@ #include "pipeline/plugin/interface/HttpFlusher.h" #include "pipeline/plugin/interface/Input.h" #include "pipeline/plugin/interface/Processor.h" +#include "pipeline/queue/SLSSenderQueueItem.h" #include "pipeline/queue/SenderQueueManager.h" +#include "plugin/flusher/sls/FlusherSLS.h" #include "task_pipeline/Task.h" #include "task_pipeline/TaskRegistry.h" @@ -65,10 +67,21 @@ class InputMock : public Input { return true; } bool Start() override { return true; } - bool Stop(bool isPipelineRemoving) override { return true; } + bool Stop(bool isPipelineRemoving) override { + while (mBlockFlag) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + return true; + } bool SupportAck() const override { return mSupportAck; } + void Block() { mBlockFlag = true; } + void Unblock() { mBlockFlag = false; } + bool mSupportAck = true; + +private: + std::atomic_bool mBlockFlag = false; }; const std::string InputMock::sName = "input_mock"; @@ -79,12 +92,22 @@ class ProcessorMock : public Processor { const std::string& Name() const override { return sName; } bool Init(const Json::Value& config) override { return true; } - void Process(PipelineEventGroup& logGroup) override { ++mCnt; }; + void Process(PipelineEventGroup& logGroup) override { + while (mBlockFlag) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + ++mCnt; + }; + + void Block() { mBlockFlag = true; } + void Unblock() { mBlockFlag = false; } uint32_t mCnt = 0; protected: bool IsSupportedEvent(const PipelineEventPtr& e) const override { return true; }; + + std::atomic_bool mBlockFlag = false; }; const std::string ProcessorMock::sName = "processor_mock"; diff --git a/core/unittest/sender/FlusherRunnerUnittest.cpp b/core/unittest/sender/FlusherRunnerUnittest.cpp index 076b51fb07..e0ce09cf66 100644 --- a/core/unittest/sender/FlusherRunnerUnittest.cpp +++ b/core/unittest/sender/FlusherRunnerUnittest.cpp @@ -17,6 +17,7 @@ #include "runner/FlusherRunner.h" #include "runner/sink/http/HttpSink.h" #include "unittest/Unittest.h" +#include "unittest/pipeline/HttpSinkMock.h" #include "unittest/plugin/PluginMock.h" DECLARE_FLAG_INT32(discard_send_fail_interval); @@ -56,7 +57,7 @@ void FlusherRunnerUnittest::TestDispatch() { FlusherRunner::GetInstance()->Dispatch(realItem); unique_ptr req; - APSARA_TEST_TRUE(HttpSink::GetInstance()->mQueue.TryPop(req)); + APSARA_TEST_TRUE(HttpSinkMock::GetInstance()->mQueue.TryPop(req)); APSARA_TEST_NOT_EQUAL(nullptr, req); } { diff --git a/pluginmanager/config_update_test.go b/pluginmanager/config_update_test.go index 66a3d6aef7..2103104b24 100644 --- a/pluginmanager/config_update_test.go +++ b/pluginmanager/config_update_test.go @@ -25,12 +25,14 @@ import ( "github.com/alibaba/ilogtail/pkg/logger" _ "github.com/alibaba/ilogtail/plugins/aggregator/baseagg" "github.com/alibaba/ilogtail/plugins/flusher/checker" + "github.com/alibaba/ilogtail/plugins/input/mockd" "github.com/stretchr/testify/suite" ) var updateConfigName = "update_mock_block" var noblockUpdateConfigName = "update_mock_noblock" +var noblockUpdateNoInputConfigName = "update_mock_noblock_no_input" func TestConfigUpdate(t *testing.T) { suite.Run(t, new(configUpdateTestSuite)) @@ -57,6 +59,7 @@ func (s *configUpdateTestSuite) AfterTest(suiteName, testName string) { } func (s *configUpdateTestSuite) TestConfigUpdate() { + // block config -> block config, unblock config, no input config // block config LogtailConfigLock.RLock() config := LogtailConfig[updateConfigName] @@ -64,6 +67,8 @@ func (s *configUpdateTestSuite) TestConfigUpdate() { s.NotNil(config, "%s logstrore config should exist", updateConfigName) checkFlusher, ok := GetConfigFlushers(config.PluginRunner)[0].(*checker.FlusherChecker) s.True(ok) + mockInput, ok := GetConfigInputs(config.PluginRunner)[0].(*mockd.ServiceMock) + s.True(ok) s.Equal(0, checkFlusher.GetLogCount(), "the block flusher checker doesn't have any logs") // update same hang config @@ -72,15 +77,55 @@ func (s *configUpdateTestSuite) TestConfigUpdate() { _ = LoadAndStartMockConfig(updateConfigName, updateConfigName, updateConfigName, GetTestConfig(updateConfigName)) // Since independently load config, reload block config will be allowed s.NoError(LoadAndStartMockConfig(noblockUpdateConfigName, noblockUpdateConfigName, noblockUpdateConfigName, GetTestConfig(noblockUpdateConfigName))) + s.NoError(LoadAndStartMockConfig(noblockUpdateNoInputConfigName, noblockUpdateNoInputConfigName, noblockUpdateNoInputConfigName, GetTestConfig(noblockUpdateNoInputConfigName))) LogtailConfigLock.RLock() s.NotNil(LogtailConfig[updateConfigName]) s.NotNil(LogtailConfig[noblockUpdateConfigName]) + s.NotNil(LogtailConfig[noblockUpdateNoInputConfigName]) + LogtailConfigLock.RUnlock() + + time.Sleep(time.Second * time.Duration(10)) + LogtailConfigLock.RLock() + s.Equal(20000, GetConfigFlushers(LogtailConfig[noblockUpdateConfigName].PluginRunner)[0].(*checker.FlusherChecker).GetLogCount()) LogtailConfigLock.RUnlock() // unblock old config - checkFlusher.Block = false - time.Sleep(time.Second * time.Duration(5)) + mockInput.Block = false s.Equal(0, checkFlusher.GetLogCount()) +} + +func (s *configUpdateTestSuite) TestConfigUpdateTimeout() { + // block config -> block config, unblock config, no input config + // block config + LogtailConfigLock.RLock() + config := LogtailConfig[updateConfigName] + LogtailConfigLock.RUnlock() + s.NotNil(config, "%s logstrore config should exist", updateConfigName) + checkFlusher, ok := GetConfigFlushers(config.PluginRunner)[0].(*checker.FlusherChecker) + s.True(ok) + mockInput, ok := GetConfigInputs(config.PluginRunner)[0].(*mockd.ServiceMock) + s.True(ok) + s.Equal(0, checkFlusher.GetLogCount(), "the block flusher checker doesn't have any logs") + + // update same hang config + s.NoError(Stop(updateConfigName, false)) + s.Equal(0, checkFlusher.GetLogCount(), "the hold on block flusher checker doesn't have any logs") + + // unblock old config first to mock timeout instead of block + mockInput.Block = false + s.Equal(0, checkFlusher.GetLogCount()) + + _ = LoadAndStartMockConfig(updateConfigName, updateConfigName, updateConfigName, GetTestConfig(updateConfigName)) + // Since independently load config, reload block config will be allowed + s.NoError(LoadAndStartMockConfig(noblockUpdateConfigName, noblockUpdateConfigName, noblockUpdateConfigName, GetTestConfig(noblockUpdateConfigName))) + s.NoError(LoadAndStartMockConfig(noblockUpdateNoInputConfigName, noblockUpdateNoInputConfigName, noblockUpdateNoInputConfigName, GetTestConfig(noblockUpdateNoInputConfigName))) + LogtailConfigLock.RLock() + s.NotNil(LogtailConfig[updateConfigName]) + s.NotNil(LogtailConfig[noblockUpdateConfigName]) + s.NotNil(LogtailConfig[noblockUpdateNoInputConfigName]) + LogtailConfigLock.RUnlock() + + time.Sleep(time.Second * time.Duration(10)) LogtailConfigLock.RLock() s.Equal(20000, GetConfigFlushers(LogtailConfig[noblockUpdateConfigName].PluginRunner)[0].(*checker.FlusherChecker).GetLogCount()) LogtailConfigLock.RUnlock() @@ -93,6 +138,8 @@ func (s *configUpdateTestSuite) TestConfigUpdateMany() { s.NotNil(config, "%s logstrore config should exist", updateConfigName) checkFlusher, ok := GetConfigFlushers(config.PluginRunner)[0].(*checker.FlusherChecker) s.True(ok) + mockInput, ok := GetConfigInputs(config.PluginRunner)[0].(*mockd.ServiceMock) + s.True(ok) s.Equal(0, checkFlusher.GetLogCount(), "the hold on block flusher checker doesn't have any logs") // load block config @@ -101,7 +148,7 @@ func (s *configUpdateTestSuite) TestConfigUpdateMany() { s.Nil(err) s.NotNil(LogtailConfig[updateConfigName]) s.Equal(0, checkFlusher.GetLogCount(), "the hold on block flusher checker doesn't have any logs") - checkFlusher.Block = false + mockInput.Block = false time.Sleep(time.Second * time.Duration(5)) s.Equal(checkFlusher.GetLogCount(), 0) @@ -130,12 +177,14 @@ func (s *configUpdateTestSuite) TestConfigUpdateName() { LogtailConfigLock.RUnlock() s.NotNil(config) checkFlusher, ok := GetConfigFlushers(config.PluginRunner)[0].(*checker.FlusherChecker) + s.True(ok) + mockInput, ok := GetConfigInputs(config.PluginRunner)[0].(*mockd.ServiceMock) + s.True(ok) defer func() { - checkFlusher.Block = false + mockInput.Block = false time.Sleep(time.Second * 5) s.Equal(checkFlusher.GetLogCount(), 20000) }() - s.True(ok) s.Equal(0, checkFlusher.GetLogCount(), "the hold on blocking flusher checker doesn't have any logs") s.NoError(LoadAndStartMockConfig(updateConfigName+"_", updateConfigName+"_", updateConfigName+"_", GetTestConfig(updateConfigName))) @@ -144,10 +193,12 @@ func (s *configUpdateTestSuite) TestConfigUpdateName() { s.NotNil(LogtailConfig[updateConfigName]) s.NotNil(LogtailConfig[updateConfigName+"_"]) checkFlusher, ok := GetConfigFlushers(LogtailConfig[updateConfigName+"_"].PluginRunner)[0].(*checker.FlusherChecker) - LogtailConfigLock.RUnlock() s.True(ok) + mockInput, ok := GetConfigInputs(LogtailConfig[updateConfigName+"_"].PluginRunner)[0].(*mockd.ServiceMock) + s.True(ok) + LogtailConfigLock.RUnlock() s.Equal(checkFlusher.GetLogCount(), 0) - checkFlusher.Block = false + mockInput.Block = false time.Sleep(time.Second * 5) s.Equal(checkFlusher.GetLogCount(), 20000) } @@ -160,7 +211,9 @@ func (s *configUpdateTestSuite) TestStopAllExit() { s.NotNil(config) checkFlusher, ok := GetConfigFlushers(config.PluginRunner)[0].(*checker.FlusherChecker) s.True(ok) - checkFlusher.Block = false + mockInput, ok := GetConfigInputs(config.PluginRunner)[0].(*mockd.ServiceMock) + s.True(ok) + mockInput.Block = false time.Sleep(time.Second * time.Duration(5)) s.NoError(StopAllPipelines(true)) s.NoError(StopAllPipelines(false)) @@ -175,12 +228,14 @@ func (s *configUpdateTestSuite) TestStopAllExitTimeout() { s.NotNil(config) checkFlusher, ok := GetConfigFlushers(config.PluginRunner)[0].(*checker.FlusherChecker) s.True(ok) + mockInput, ok := GetConfigInputs(config.PluginRunner)[0].(*mockd.ServiceMock) + s.True(ok) s.Equal(0, checkFlusher.GetLogCount()) s.NoError(StopAllPipelines(true)) s.NoError(StopAllPipelines(false)) time.Sleep(time.Second) s.Equal(0, checkFlusher.GetLogCount()) - checkFlusher.Block = false + mockInput.Block = false time.Sleep(time.Second * time.Duration(5)) s.Equal(0, checkFlusher.GetLogCount()) } diff --git a/pluginmanager/plugin_runner_helper.go b/pluginmanager/plugin_runner_helper.go index b111134c9f..d92c27c0ff 100644 --- a/pluginmanager/plugin_runner_helper.go +++ b/pluginmanager/plugin_runner_helper.go @@ -126,6 +126,20 @@ func GetFlushCancelToken(runner PluginRunner) <-chan struct{} { return make(<-chan struct{}) } +func GetConfigInputs(runner PluginRunner) []pipeline.ServiceInput { + inputs := make([]pipeline.ServiceInput, 0) + if r, ok := runner.(*pluginv1Runner); ok { + for _, i := range r.ServicePlugins { + inputs = append(inputs, i.Input) + } + } else if r, ok := runner.(*pluginv2Runner); ok { + for _, i := range r.ServicePlugins { + inputs = append(inputs, i.Input) + } + } + return inputs +} + func GetConfigFlushers(runner PluginRunner) []pipeline.Flusher { flushers := make([]pipeline.Flusher, 0) if r, ok := runner.(*pluginv1Runner); ok { diff --git a/pluginmanager/test_config/update_mock_block.json b/pluginmanager/test_config/update_mock_block.json index c3c699d24d..ae4ddbbfa6 100644 --- a/pluginmanager/test_config/update_mock_block.json +++ b/pluginmanager/test_config/update_mock_block.json @@ -6,39 +6,41 @@ "DefaultLogQueueSize": 2, "DefaultLogGroupQueueSize": 1 }, - "inputs" : [ + "inputs": [ { - "type" : "service_mock", - "detail" : { + "type": "service_mock", + "detail": { + "Block": true, "LogsPerSecond": 10000, "MaxLogCount": 20000, - "Fields" : { - "content" : "time:2017.09.12 20:55:36\tjson:{\"array\" : [1, 2, 3, 4], \"key1\" : \"xx\", \"key2\": false, \"key3\":123.456, \"key4\" : { \"inner1\" : 1, \"inner2\" : {\"xxxx\" : \"yyyy\", \"zzzz\" : \"中文\"}}}\n" + "Fields": { + "content": "time:2017.09.12 20:55:36\tjson:{\"array\" : [1, 2, 3, 4], \"key1\" : \"xx\", \"key2\": false, \"key3\":123.456, \"key4\" : { \"inner1\" : 1, \"inner2\" : {\"xxxx\" : \"yyyy\", \"zzzz\" : \"中文\"}}}\n" } } } ], - "processors" : [ + "processors": [ { - "type" : "processor_anchor", - "detail" : {"SourceKey" : "content", - "NoAnchorError" : true, - "Anchors" : [ + "type": "processor_anchor", + "detail": { + "SourceKey": "content", + "NoAnchorError": true, + "Anchors": [ { - "Start" : "time", - "Stop" : "\t", - "FieldName" : "time", - "FieldType" : "string", - "ExpondJson" : false + "Start": "time", + "Stop": "\t", + "FieldName": "time", + "FieldType": "string", + "ExpondJson": false }, { - "Start" : "json:", - "Stop" : "\n", - "FieldName" : "val", - "FieldType" : "json", - "ExpondJson" : true, - "MaxExpondDepth" : 2, - "ExpondConnecter" : "#" + "Start": "json:", + "Stop": "\n", + "FieldName": "val", + "FieldType": "json", + "ExpondJson": true, + "MaxExpondDepth": 2, + "ExpondConnecter": "#" } ] } @@ -53,12 +55,10 @@ } } ], - "flushers" : [ + "flushers": [ { - "type" : "flusher_checker", - "detail" : { - "Block" : true - } + "type": "flusher_checker", + "detail": {} } ] } \ No newline at end of file diff --git a/pluginmanager/test_config/update_mock_noblock.json b/pluginmanager/test_config/update_mock_noblock.json index c56b300e2c..907b64f282 100644 --- a/pluginmanager/test_config/update_mock_noblock.json +++ b/pluginmanager/test_config/update_mock_noblock.json @@ -10,6 +10,7 @@ { "type": "service_mock", "detail": { + "Block": false, "LogsPerSecond": 10000, "MaxLogCount": 20000, "Fields": { @@ -57,9 +58,7 @@ "flushers": [ { "type": "flusher_checker", - "detail": { - "Block": false - } + "detail": {} } ] } \ No newline at end of file diff --git a/pluginmanager/test_config/update_mock_noblock_no_input.json b/pluginmanager/test_config/update_mock_noblock_no_input.json new file mode 100644 index 0000000000..bc99b3a412 --- /dev/null +++ b/pluginmanager/test_config/update_mock_noblock_no_input.json @@ -0,0 +1,52 @@ +{ + "global": { + "InputIntervalMs": 10000, + "AggregatIntervalMs": 300, + "FlushIntervalMs": 300, + "DefaultLogQueueSize": 2, + "DefaultLogGroupQueueSize": 3 + }, + "inputs": [], + "processors": [ + { + "type": "processor_anchor", + "detail": { + "SourceKey": "content", + "NoAnchorError": true, + "Anchors": [ + { + "Start": "time", + "Stop": "\t", + "FieldName": "time", + "FieldType": "string", + "ExpondJson": false + }, + { + "Start": "json:", + "Stop": "\n", + "FieldName": "val", + "FieldType": "json", + "ExpondJson": true, + "MaxExpondDepth": 2, + "ExpondConnecter": "#" + } + ] + } + } + ], + "aggregators": [ + { + "type": "aggregator_base", + "detail": { + "MaxLogGroupCount": 1, + "MaxLogCount": 100 + } + } + ], + "flushers": [ + { + "type": "flusher_checker", + "detail": {} + } + ] +} \ No newline at end of file diff --git a/plugins/flusher/checker/flusher_checker.go b/plugins/flusher/checker/flusher_checker.go index 3b17486db2..b73512750a 100644 --- a/plugins/flusher/checker/flusher_checker.go +++ b/plugins/flusher/checker/flusher_checker.go @@ -29,7 +29,6 @@ type FlusherChecker struct { context pipeline.Context LogGroup protocol.LogGroup Lock sync.RWMutex - Block bool } func (p *FlusherChecker) Init(context pipeline.Context) error { @@ -140,7 +139,7 @@ func (p *FlusherChecker) Flush(projectName string, logstoreName string, configNa // IsReady is ready to flush func (p *FlusherChecker) IsReady(projectName string, logstoreName string, logstoreKey int64) bool { - return !p.Block + return true } // Stop ... diff --git a/plugins/input/mockd/input_mockd.go b/plugins/input/mockd/input_mockd.go index d59a5024e6..31cd82d2f2 100644 --- a/plugins/input/mockd/input_mockd.go +++ b/plugins/input/mockd/input_mockd.go @@ -33,6 +33,7 @@ type ServiceMock struct { Index int64 LogsPerSecond int MaxLogCount int + Block bool nowLogCount int context pipeline.Context } @@ -66,6 +67,18 @@ func (p *ServiceMock) Start(c pipeline.Collector) error { p.waitGroup.Add(1) defer p.waitGroup.Done() for { + for { + if p.Block { + time.Sleep(time.Millisecond * 100) + continue + } + select { + case <-p.shutdown: + return nil + default: + } + break + } beginTime := time.Now() for i := 0; i < p.LogsPerSecond; i++ { p.MockOneLog(c) From 0544cfc3394b76377e38242cd690d21fb8ddfe14 Mon Sep 17 00:00:00 2001 From: dog Date: Mon, 30 Dec 2024 18:50:13 +0800 Subject: [PATCH 08/12] feat: prom stream scrape and stream process (#1925) --- core/CMakeLists.txt | 2 +- core/models/PipelineEventGroup.h | 2 + .../ProcessorPromRelabelMetricNative.cpp | 112 ++++++----- .../inner/ProcessorPromRelabelMetricNative.h | 19 +- core/prometheus/async/PromHttpRequest.h | 1 - core/prometheus/component/StreamScraper.cpp | 132 ++++++++++++ core/prometheus/component/StreamScraper.h | 64 ++++++ core/prometheus/labels/Labels.cpp | 2 +- core/prometheus/labels/Labels.h | 3 +- core/prometheus/labels/TextParser.cpp | 3 - core/prometheus/schedulers/BaseScheduler.cpp | 6 + core/prometheus/schedulers/BaseScheduler.h | 5 +- .../prometheus/schedulers/ScrapeScheduler.cpp | 122 ++++------- core/prometheus/schedulers/ScrapeScheduler.h | 42 +--- .../schedulers/TargetSubscriberScheduler.cpp | 2 +- ...ocessorPromRelabelMetricNativeUnittest.cpp | 24 ++- core/unittest/prometheus/CMakeLists.txt | 4 + .../prometheus/ScrapeSchedulerUnittest.cpp | 49 +++-- .../prometheus/StreamScraperUnittest.cpp | 189 ++++++++++++++++++ 19 files changed, 568 insertions(+), 215 deletions(-) create mode 100644 core/prometheus/component/StreamScraper.cpp create mode 100644 core/prometheus/component/StreamScraper.h create mode 100644 core/unittest/prometheus/StreamScraperUnittest.cpp diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 4780c30aa5..f59c9b847e 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -127,7 +127,7 @@ set(SUB_DIRECTORIES_LIST runner runner/sink/http protobuf/sls protobuf/models file_server file_server/event file_server/event_handler file_server/event_listener file_server/reader file_server/polling - prometheus prometheus/labels prometheus/schedulers prometheus/async + prometheus prometheus/labels prometheus/schedulers prometheus/async prometheus/component ebpf ebpf/observer ebpf/security ebpf/handler parser sls_control sdk ) diff --git a/core/models/PipelineEventGroup.h b/core/models/PipelineEventGroup.h index 171e134813..9e15733202 100644 --- a/core/models/PipelineEventGroup.h +++ b/core/models/PipelineEventGroup.h @@ -58,6 +58,8 @@ enum class EventGroupMetaKey { PROMETHEUS_SAMPLES_SCRAPED, PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC, PROMETHEUS_UP_STATE, + PROMETHEUS_STREAM_ID, + PROMETHEUS_STREAM_TOTAL, SOURCE_ID }; diff --git a/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp index 30a42fc56e..4c52663ba5 100644 --- a/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp +++ b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp @@ -71,7 +71,12 @@ void ProcessorPromRelabelMetricNative::Process(PipelineEventGroup& metricGroup) metricGroup.DelTag(k); } - AddAutoMetrics(metricGroup); + if (metricGroup.HasMetadata(EventGroupMetaKey::PROMETHEUS_STREAM_TOTAL)) { + auto autoMetric = prom::AutoMetric(); + UpdateAutoMetrics(metricGroup, autoMetric); + AddAutoMetrics(metricGroup, autoMetric); + } + // delete all tags for (const auto& [k, v] : targetTags) { @@ -138,70 +143,81 @@ vector ProcessorPromRelabelMetricNative::GetToDeleteTargetLabels(con return toDelete; } -void ProcessorPromRelabelMetricNative::AddAutoMetrics(PipelineEventGroup& metricGroup) { - // if up is set, then add self monitor metrics - if (metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE).empty()) { - return; +void ProcessorPromRelabelMetricNative::UpdateAutoMetrics(const PipelineEventGroup& eGroup, + prom::AutoMetric& autoMetric) const { + if (eGroup.HasMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_DURATION)) { + autoMetric.mScrapeDurationSeconds + = StringTo(eGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_DURATION).to_string()); } + if (eGroup.HasMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_RESPONSE_SIZE)) { + autoMetric.mScrapeResponseSizeBytes + = StringTo(eGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_RESPONSE_SIZE).to_string()); + } + autoMetric.mScrapeSamplesLimit = mScrapeConfigPtr->mSampleLimit; + if (eGroup.HasMetadata(EventGroupMetaKey::PROMETHEUS_SAMPLES_SCRAPED)) { + autoMetric.mScrapeSamplesScraped + = StringTo(eGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SAMPLES_SCRAPED).to_string()); + } + autoMetric.mScrapeTimeoutSeconds = mScrapeConfigPtr->mScrapeTimeoutSeconds; - auto targetTags = metricGroup.GetTags(); + if (eGroup.HasMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_STATE)) { + autoMetric.mScrapeState = eGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_STATE).to_string(); + } + + if (eGroup.HasMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE)) { + autoMetric.mUp = StringTo(eGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE).to_string()); + } +} + +void ProcessorPromRelabelMetricNative::AddAutoMetrics(PipelineEventGroup& eGroup, + const prom::AutoMetric& autoMetric) const { + auto targetTags = eGroup.GetTags(); + if (!eGroup.HasMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC)) { + LOG_ERROR(sLogger, ("scrape_timestamp_milliseconds is not set", "")); + return; + } - StringView scrapeTimestampMilliSecStr - = metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC); + StringView scrapeTimestampMilliSecStr = eGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC); auto timestampMilliSec = StringTo(scrapeTimestampMilliSecStr.to_string()); auto timestamp = timestampMilliSec / 1000; auto nanoSec = timestampMilliSec % 1000 * 1000000; - uint64_t samplesPostMetricRelabel = metricGroup.GetEvents().size(); - - auto scrapeDurationSeconds - = StringTo(metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_DURATION).to_string()); - - AddMetric(metricGroup, prometheus::SCRAPE_DURATION_SECONDS, scrapeDurationSeconds, timestamp, nanoSec, targetTags); - auto scrapeResponseSize - = StringTo(metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_RESPONSE_SIZE).to_string()); - AddMetric(metricGroup, prometheus::SCRAPE_RESPONSE_SIZE_BYTES, scrapeResponseSize, timestamp, nanoSec, targetTags); + AddMetric( + eGroup, prometheus::SCRAPE_DURATION_SECONDS, autoMetric.mScrapeDurationSeconds, timestamp, nanoSec, targetTags); - if (mScrapeConfigPtr->mSampleLimit > 0) { - AddMetric(metricGroup, - prometheus::SCRAPE_SAMPLES_LIMIT, - mScrapeConfigPtr->mSampleLimit, - timestamp, - nanoSec, - targetTags); - } - - AddMetric(metricGroup, - prometheus::SCRAPE_SAMPLES_POST_METRIC_RELABELING, - samplesPostMetricRelabel, + AddMetric(eGroup, + prometheus::SCRAPE_RESPONSE_SIZE_BYTES, + autoMetric.mScrapeResponseSizeBytes, timestamp, nanoSec, targetTags); - auto samplesScraped - = StringTo(metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SAMPLES_SCRAPED).to_string()); + if (autoMetric.mScrapeSamplesLimit > 0) { + AddMetric( + eGroup, prometheus::SCRAPE_SAMPLES_LIMIT, autoMetric.mScrapeSamplesLimit, timestamp, nanoSec, targetTags); + } - AddMetric(metricGroup, prometheus::SCRAPE_SAMPLES_SCRAPED, samplesScraped, timestamp, nanoSec, targetTags); + // AddMetric(eGroup, + // prometheus::SCRAPE_SAMPLES_POST_METRIC_RELABELING, + // autoMetric.mPostRelabel, + // timestamp, + // nanoSec, + // targetTags); - AddMetric(metricGroup, - prometheus::SCRAPE_TIMEOUT_SECONDS, - mScrapeConfigPtr->mScrapeTimeoutSeconds, - timestamp, - nanoSec, - targetTags); + AddMetric( + eGroup, prometheus::SCRAPE_SAMPLES_SCRAPED, autoMetric.mScrapeSamplesScraped, timestamp, nanoSec, targetTags); - // up metric must be the last one - bool upState = StringTo(metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE).to_string()); + AddMetric( + eGroup, prometheus::SCRAPE_TIMEOUT_SECONDS, autoMetric.mScrapeTimeoutSeconds, timestamp, nanoSec, targetTags); - if (metricGroup.HasMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_STATE)) { - auto scrapeState = metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_STATE); - AddMetric(metricGroup, prometheus::SCRAPE_STATE, 1.0 * upState, timestamp, nanoSec, targetTags); - auto& last = metricGroup.MutableEvents()[metricGroup.GetEvents().size() - 1]; - last.Cast().SetTag(METRIC_LABEL_KEY_STATUS, scrapeState); - } + AddMetric(eGroup, prometheus::SCRAPE_STATE, 1.0 * autoMetric.mUp, timestamp, nanoSec, targetTags); + auto& last = eGroup.MutableEvents()[eGroup.GetEvents().size() - 1]; + auto scrapeState = eGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_STATE); + last.Cast().SetTag(METRIC_LABEL_KEY_STATUS, scrapeState); - AddMetric(metricGroup, prometheus::UP, 1.0 * upState, timestamp, nanoSec, targetTags); + // up metric must be the last one + AddMetric(eGroup, prometheus::UP, 1.0 * autoMetric.mUp, timestamp, nanoSec, targetTags); } void ProcessorPromRelabelMetricNative::AddMetric(PipelineEventGroup& metricGroup, @@ -209,7 +225,7 @@ void ProcessorPromRelabelMetricNative::AddMetric(PipelineEventGroup& metricGroup double value, time_t timestamp, uint32_t nanoSec, - const GroupTags& targetTags) { + const GroupTags& targetTags) const { auto* metricEvent = metricGroup.AddMetricEvent(true); metricEvent->SetName(name); metricEvent->SetValue(value); diff --git a/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.h b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.h index 1843a09160..33a00d8437 100644 --- a/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.h +++ b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.h @@ -24,6 +24,20 @@ #include "prometheus/schedulers/ScrapeConfig.h" namespace logtail { + +namespace prom { + struct AutoMetric { + double mScrapeDurationSeconds; + uint64_t mScrapeResponseSizeBytes; + uint64_t mScrapeSamplesLimit; + // uint64_t mPostRelabel; + uint64_t mScrapeSamplesScraped; + uint64_t mScrapeTimeoutSeconds; + std::string mScrapeState; + bool mUp; + }; +} // namespace prom + class ProcessorPromRelabelMetricNative : public Processor { public: static const std::string sName; @@ -39,13 +53,14 @@ class ProcessorPromRelabelMetricNative : public Processor { bool ProcessEvent(PipelineEventPtr& e, const GroupTags& targetTags, const std::vector& toDelete); std::vector GetToDeleteTargetLabels(const GroupTags& targetTags) const; - void AddAutoMetrics(PipelineEventGroup& metricGroup); + void AddAutoMetrics(PipelineEventGroup& eGroup, const prom::AutoMetric& autoMetric) const; + void UpdateAutoMetrics(const PipelineEventGroup& eGroup, prom::AutoMetric& autoMetric) const; void AddMetric(PipelineEventGroup& metricGroup, const std::string& name, double value, time_t timestamp, uint32_t nanoSec, - const GroupTags& targetTags); + const GroupTags& targetTags) const; std::unique_ptr mScrapeConfigPtr; std::string mLoongCollectorScraper; diff --git a/core/prometheus/async/PromHttpRequest.h b/core/prometheus/async/PromHttpRequest.h index 1665b275b6..e9382b5470 100644 --- a/core/prometheus/async/PromHttpRequest.h +++ b/core/prometheus/async/PromHttpRequest.h @@ -4,7 +4,6 @@ #include #include "common/http/HttpRequest.h" -#include "models/PipelineEventGroup.h" #include "prometheus/async/PromFuture.h" namespace logtail { diff --git a/core/prometheus/component/StreamScraper.cpp b/core/prometheus/component/StreamScraper.cpp new file mode 100644 index 0000000000..da70609af9 --- /dev/null +++ b/core/prometheus/component/StreamScraper.cpp @@ -0,0 +1,132 @@ +#include "prometheus/component/StreamScraper.h" + +#include +#include +#include +#include + +#include "Flags.h" +#include "Labels.h" +#include "common/StringTools.h" +#include "models/PipelineEventGroup.h" +#include "pipeline/queue/ProcessQueueItem.h" +#include "pipeline/queue/ProcessQueueManager.h" +#include "prometheus/Utils.h" + +DEFINE_FLAG_INT64(prom_stream_bytes_size, "stream bytes size", 1024 * 1024); + +DEFINE_FLAG_BOOL(enable_prom_stream_scrape, "enable prom stream scrape", true); + +using namespace std; + +namespace logtail::prom { +size_t StreamScraper::MetricWriteCallback(char* buffer, size_t size, size_t nmemb, void* data) { + uint64_t sizes = size * nmemb; + + if (buffer == nullptr || data == nullptr) { + return 0; + } + + auto* body = static_cast(data); + + size_t begin = 0; + for (size_t end = begin; end < sizes; ++end) { + if (buffer[end] == '\n') { + if (begin == 0 && !body->mCache.empty()) { + body->mCache.append(buffer, end); + body->AddEvent(body->mCache.data(), body->mCache.size()); + body->mCache.clear(); + } else if (begin != end) { + body->AddEvent(buffer + begin, end - begin); + } + begin = end + 1; + } + } + + if (begin < sizes) { + body->mCache.append(buffer + begin, sizes - begin); + } + body->mRawSize += sizes; + body->mCurrStreamSize += sizes; + + if (BOOL_FLAG(enable_prom_stream_scrape) && body->mCurrStreamSize >= (size_t)INT64_FLAG(prom_stream_bytes_size)) { + body->mStreamIndex++; + body->SendMetrics(); + } + + return sizes; +} + +void StreamScraper::AddEvent(const char* line, size_t len) { + if (IsValidMetric(StringView(line, len))) { + auto* e = mEventGroup.AddRawEvent(true, mEventPool); + auto sb = mEventGroup.GetSourceBuffer()->CopyString(line, len); + e->SetContentNoCopy(sb); + mScrapeSamplesScraped++; + } +} + +void StreamScraper::FlushCache() { + if (!mCache.empty()) { + AddEvent(mCache.data(), mCache.size()); + mCache.clear(); + } +} + +void StreamScraper::SetTargetLabels(PipelineEventGroup& eGroup) const { + mTargetLabels.Range([&eGroup](const std::string& key, const std::string& value) { eGroup.SetTag(key, value); }); +} + +void StreamScraper::PushEventGroup(PipelineEventGroup&& eGroup) const { + auto item = make_unique(std::move(eGroup), mInputIndex); +#ifdef APSARA_UNIT_TEST_MAIN + mItem.emplace_back(std::move(item)); + return; +#endif + while (true) { + if (ProcessQueueManager::GetInstance()->PushQueue(mQueueKey, std::move(item)) == 0) { + break; + } + usleep(10 * 1000); + } +} + +void StreamScraper::SendMetrics() { + mEventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC, + ToString(mScrapeTimestampMilliSec)); + mEventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_STREAM_ID, GetId() + ToString(mScrapeTimestampMilliSec)); + + SetTargetLabels(mEventGroup); + PushEventGroup(std::move(mEventGroup)); + mEventGroup = PipelineEventGroup(std::make_shared()); + mCurrStreamSize = 0; +} + +void StreamScraper::Reset() { + mEventGroup = PipelineEventGroup(std::make_shared()); + mRawSize = 0; + mCurrStreamSize = 0; + mCache.clear(); + mStreamIndex = 0; + mScrapeSamplesScraped = 0; +} + +void StreamScraper::SetAutoMetricMeta(double scrapeDurationSeconds, bool upState, const string& scrapeState) { + mEventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_STATE, scrapeState); + mEventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC, + ToString(mScrapeTimestampMilliSec)); + mEventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SAMPLES_SCRAPED, ToString(mScrapeSamplesScraped)); + mEventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_DURATION, ToString(scrapeDurationSeconds)); + mEventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_RESPONSE_SIZE, ToString(mRawSize)); + mEventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE, ToString(upState)); + mEventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_STREAM_ID, GetId() + ToString(mScrapeTimestampMilliSec)); + mEventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_STREAM_TOTAL, ToString(mStreamIndex)); +} +std::string StreamScraper::GetId() { + return mHash; +} +void StreamScraper::SetScrapeTime(std::chrono::system_clock::time_point scrapeTime) { + mScrapeTimestampMilliSec + = std::chrono::duration_cast(scrapeTime.time_since_epoch()).count(); +} +} // namespace logtail::prom diff --git a/core/prometheus/component/StreamScraper.h b/core/prometheus/component/StreamScraper.h new file mode 100644 index 0000000000..3f3b9ab45d --- /dev/null +++ b/core/prometheus/component/StreamScraper.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include + +#include "Labels.h" +#include "models/PipelineEventGroup.h" +#include "pipeline/queue/QueueKey.h" + +#ifdef APSARA_UNIT_TEST_MAIN +#include + +#include "pipeline/queue/ProcessQueueItem.h" +#endif + +namespace logtail::prom { +class StreamScraper { +public: + StreamScraper(Labels labels, QueueKey queueKey, size_t inputIndex) + : mEventGroup(PipelineEventGroup(std::make_shared())), + mQueueKey(queueKey), + mInputIndex(inputIndex), + mTargetLabels(std::move(labels)) {} + + static size_t MetricWriteCallback(char* buffer, size_t size, size_t nmemb, void* data); + void FlushCache(); + void SendMetrics(); + void Reset(); + void SetAutoMetricMeta(double scrapeDurationSeconds, bool upState, const std::string& scrapeState); + + void SetScrapeTime(std::chrono::system_clock::time_point scrapeTime); + + std::string mHash; + size_t mRawSize = 0; + uint64_t mStreamIndex = 0; + uint64_t mScrapeSamplesScraped = 0; + EventPool* mEventPool = nullptr; + +private: + void AddEvent(const char* line, size_t len); + void PushEventGroup(PipelineEventGroup&&) const; + void SetTargetLabels(PipelineEventGroup& eGroup) const; + std::string GetId(); + + size_t mCurrStreamSize = 0; + std::string mCache; + PipelineEventGroup mEventGroup; + + // pipeline + QueueKey mQueueKey; + size_t mInputIndex; + + Labels mTargetLabels; + + // auto metrics + uint64_t mScrapeTimestampMilliSec = 0; +#ifdef APSARA_UNIT_TEST_MAIN + friend class ProcessorParsePrometheusMetricUnittest; + friend class ScrapeSchedulerUnittest; + friend class StreamScraperUnittest; + mutable std::vector> mItem; +#endif +}; +} // namespace logtail::prom diff --git a/core/prometheus/labels/Labels.cpp b/core/prometheus/labels/Labels.cpp index 383cd7cc08..e65e2adc74 100644 --- a/core/prometheus/labels/Labels.cpp +++ b/core/prometheus/labels/Labels.cpp @@ -96,7 +96,7 @@ void Labels::Del(const string& k) { } -void Labels::Range(const std::function& f) { +void Labels::Range(const std::function& f) const { if (mMetricEventPtr) { for (auto l = mMetricEventPtr->TagsBegin(); l != mMetricEventPtr->TagsEnd(); l++) { f(l->first.to_string(), l->second.to_string()); diff --git a/core/prometheus/labels/Labels.h b/core/prometheus/labels/Labels.h index 43081e7617..27a49e9530 100644 --- a/core/prometheus/labels/Labels.h +++ b/core/prometheus/labels/Labels.h @@ -47,10 +47,9 @@ class Labels { void Reset(MetricEvent*); - void Range(const std::function&); + void Range(const std::function&) const; private: - LabelMap mLabels; MetricEvent* mMetricEventPtr = nullptr; diff --git a/core/prometheus/labels/TextParser.cpp b/core/prometheus/labels/TextParser.cpp index a3ecb394ab..100d7ddd2e 100644 --- a/core/prometheus/labels/TextParser.cpp +++ b/core/prometheus/labels/TextParser.cpp @@ -17,16 +17,13 @@ #include "prometheus/labels/TextParser.h" #include -#include #include #include -#include "common/StringTools.h" #include "logger/Logger.h" #include "models/MetricEvent.h" #include "models/PipelineEventGroup.h" #include "models/StringView.h" -#include "prometheus/Constants.h" #include "prometheus/Utils.h" using namespace std; diff --git a/core/prometheus/schedulers/BaseScheduler.cpp b/core/prometheus/schedulers/BaseScheduler.cpp index e9e7897c35..877fbc3513 100644 --- a/core/prometheus/schedulers/BaseScheduler.cpp +++ b/core/prometheus/schedulers/BaseScheduler.cpp @@ -24,6 +24,12 @@ void BaseScheduler::SetFirstExecTime(chrono::steady_clock::time_point firstExecT mLatestScrapeTime = mFirstScrapeTime; } +void BaseScheduler::SetScrapeOnceTime(chrono::steady_clock::time_point onceExecTime, + chrono::system_clock::time_point onceScrapeTime) { + mLatestExecTime = onceExecTime; + mLatestScrapeTime = onceScrapeTime; +} + void BaseScheduler::DelayExecTime(uint64_t delaySeconds) { mLatestExecTime = mLatestExecTime + chrono::seconds(delaySeconds); mLatestScrapeTime = mLatestScrapeTime + chrono::seconds(delaySeconds); diff --git a/core/prometheus/schedulers/BaseScheduler.h b/core/prometheus/schedulers/BaseScheduler.h index 3d57155bd5..d66bf4b585 100644 --- a/core/prometheus/schedulers/BaseScheduler.h +++ b/core/prometheus/schedulers/BaseScheduler.h @@ -20,7 +20,10 @@ class BaseScheduler { std::chrono::steady_clock::time_point GetNextExecTime(); - void SetFirstExecTime(std::chrono::steady_clock::time_point firstExecTime,std::chrono::system_clock::time_point firstScrapeTime); + void SetFirstExecTime(std::chrono::steady_clock::time_point firstExecTime, + std::chrono::system_clock::time_point firstScrapeTime); + + void SetScrapeOnceTime(std::chrono::steady_clock::time_point, std::chrono::system_clock::time_point); void DelayExecTime(uint64_t delaySeconds); virtual void Cancel(); diff --git a/core/prometheus/schedulers/ScrapeScheduler.cpp b/core/prometheus/schedulers/ScrapeScheduler.cpp index 0adfae2209..8596d6f36b 100644 --- a/core/prometheus/schedulers/ScrapeScheduler.cpp +++ b/core/prometheus/schedulers/ScrapeScheduler.cpp @@ -25,138 +25,93 @@ #include "common/TimeUtil.h" #include "common/timer/HttpRequestTimerEvent.h" #include "logger/Logger.h" -#include "pipeline/queue/ProcessQueueItem.h" #include "pipeline/queue/ProcessQueueManager.h" #include "pipeline/queue/QueueKey.h" #include "prometheus/Constants.h" +#include "prometheus/Utils.h" #include "prometheus/async/PromFuture.h" #include "prometheus/async/PromHttpRequest.h" +#include "prometheus/component/StreamScraper.h" #include "sdk/Common.h" using namespace std; namespace logtail { -size_t PromMetricWriteCallback(char* buffer, size_t size, size_t nmemb, void* data) { - uint64_t sizes = size * nmemb; - - if (buffer == nullptr || data == nullptr) { - return 0; - } - - auto* body = static_cast(data); - - size_t begin = 0; - for (size_t end = begin; end < sizes; ++end) { - if (buffer[end] == '\n') { - if (begin == 0 && !body->mCache.empty()) { - body->mCache.append(buffer, end); - body->AddEvent(body->mCache.data(), body->mCache.size()); - body->mCache.clear(); - } else if (begin != end) { - body->AddEvent(buffer + begin, end - begin); - } - begin = end + 1; - } - } - if (begin < sizes) { - body->mCache.append(buffer + begin, sizes - begin); - } - body->mRawSize += sizes; - return sizes; -} - ScrapeScheduler::ScrapeScheduler(std::shared_ptr scrapeConfigPtr, std::string host, int32_t port, Labels labels, QueueKey queueKey, size_t inputIndex) - : mScrapeConfigPtr(std::move(scrapeConfigPtr)), + : mPromStreamScraper(labels, queueKey, inputIndex), + mScrapeConfigPtr(std::move(scrapeConfigPtr)), mHost(std::move(host)), mPort(port), - mTargetLabels(std::move(labels)), - mQueueKey(queueKey), - mInputIndex(inputIndex) { + mQueueKey(queueKey) { string tmpTargetURL = mScrapeConfigPtr->mScheme + "://" + mHost + ":" + ToString(mPort) + mScrapeConfigPtr->mMetricsPath + (mScrapeConfigPtr->mQueryString.empty() ? "" : "?" + mScrapeConfigPtr->mQueryString); - mHash = mScrapeConfigPtr->mJobName + tmpTargetURL + ToString(mTargetLabels.Hash()); + mHash = mScrapeConfigPtr->mJobName + tmpTargetURL + ToString(labels.Hash()); mInstance = mHost + ":" + ToString(mPort); mInterval = mScrapeConfigPtr->mScrapeIntervalSeconds; + + mPromStreamScraper.mHash = mHash; } void ScrapeScheduler::OnMetricResult(HttpResponse& response, uint64_t) { static double sRate = 0.001; auto now = GetCurrentTimeInMilliSeconds(); - mScrapeTimestampMilliSec + auto scrapeTimestampMilliSec = chrono::duration_cast(mLatestScrapeTime.time_since_epoch()).count(); - auto scrapeDurationMilliSeconds = now - mScrapeTimestampMilliSec; + auto scrapeDurationMilliSeconds = now - scrapeTimestampMilliSec; - auto& responseBody = *response.GetBody(); - responseBody.FlushCache(); mSelfMonitor->AddCounter(METRIC_PLUGIN_OUT_EVENTS_TOTAL, response.GetStatusCode()); - mSelfMonitor->AddCounter(METRIC_PLUGIN_OUT_SIZE_BYTES, response.GetStatusCode(), responseBody.mRawSize); + mSelfMonitor->AddCounter(METRIC_PLUGIN_OUT_SIZE_BYTES, response.GetStatusCode(), mPromStreamScraper.mRawSize); mSelfMonitor->AddCounter(METRIC_PLUGIN_PROM_SCRAPE_TIME_MS, response.GetStatusCode(), scrapeDurationMilliSeconds); const auto& networkStatus = response.GetNetworkStatus(); + string scrapeState; if (networkStatus.mCode != NetworkCode::Ok) { // not 0 means curl error - mScrapeState = prom::NetworkCodeToState(networkStatus.mCode); + scrapeState = prom::NetworkCodeToState(networkStatus.mCode); } else if (response.GetStatusCode() != 200) { - mScrapeState = prom::HttpCodeToState(response.GetStatusCode()); + scrapeState = prom::HttpCodeToState(response.GetStatusCode()); } else { // 0 means success - mScrapeState = prom::NetworkCodeToState(NetworkCode::Ok); + scrapeState = prom::NetworkCodeToState(NetworkCode::Ok); } mScrapeDurationSeconds = scrapeDurationMilliSeconds * sRate; - mScrapeResponseSizeBytes = responseBody.mRawSize; mUpState = response.GetStatusCode() == 200; if (response.GetStatusCode() != 200) { - mScrapeResponseSizeBytes = 0; LOG_WARNING(sLogger, ("scrape failed, status code", response.GetStatusCode())("target", mHash)("curl msg", response.GetNetworkStatus().mMessage)); } - auto& eventGroup = responseBody.mEventGroup; - SetAutoMetricMeta(eventGroup); - SetTargetLabels(eventGroup); - PushEventGroup(std::move(eventGroup)); - mPluginTotalDelayMs->Add(scrapeDurationMilliSeconds); -} - -void ScrapeScheduler::SetAutoMetricMeta(PipelineEventGroup& eGroup) { - eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_STATE, mScrapeState); - eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC, ToString(mScrapeTimestampMilliSec)); - eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_DURATION, ToString(mScrapeDurationSeconds)); - eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_RESPONSE_SIZE, ToString(mScrapeResponseSizeBytes)); - eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE, ToString(mUpState)); -} + auto mScrapeDurationSeconds = scrapeDurationMilliSeconds * sRate; + auto mUpState = response.GetStatusCode() == 200; + mPromStreamScraper.mStreamIndex++; + mPromStreamScraper.FlushCache(); + mPromStreamScraper.SetAutoMetricMeta(mScrapeDurationSeconds, mUpState, scrapeState); + mPromStreamScraper.SendMetrics(); + mPromStreamScraper.Reset(); -void ScrapeScheduler::SetTargetLabels(PipelineEventGroup& eGroup) { - mTargetLabels.Range([&eGroup](const std::string& key, const std::string& value) { eGroup.SetTag(key, value); }); + mPluginTotalDelayMs->Add(scrapeDurationMilliSeconds); } -void ScrapeScheduler::PushEventGroup(PipelineEventGroup&& eGroup) { - auto item = make_unique(std::move(eGroup), mInputIndex); -#ifdef APSARA_UNIT_TEST_MAIN - mItem.push_back(std::move(item)); - return; -#endif - while (true) { - if (ProcessQueueManager::GetInstance()->PushQueue(mQueueKey, std::move(item)) == 0) { - break; - } - usleep(10 * 1000); - } -} string ScrapeScheduler::GetId() const { return mHash; } +void ScrapeScheduler::SetComponent(shared_ptr timer, EventPool* eventPool) { + mTimer = std::move(timer); + mEventPool = eventPool; + mPromStreamScraper.mEventPool = mEventPool; +} + void ScrapeScheduler::ScheduleNext() { auto future = std::make_shared>(); auto isContextValidFuture = std::make_shared>(); @@ -169,12 +124,11 @@ void ScrapeScheduler::ScheduleNext() { isContextValidFuture->AddDoneCallback([this]() -> bool { if (ProcessQueueManager::GetInstance()->IsValidToPush(mQueueKey)) { return true; - } else { - this->DelayExecTime(1); - this->mPromDelayTotal->Add(1); - this->ScheduleNext(); - return false; } + this->DelayExecTime(1); + this->mPromDelayTotal->Add(1); + this->ScheduleNext(); + return false; }); if (IsCancelled()) { @@ -189,6 +143,7 @@ void ScrapeScheduler::ScheduleNext() { mIsContextValidFuture = isContextValidFuture; } + mPromStreamScraper.SetScrapeTime(mLatestScrapeTime); auto event = BuildScrapeTimerEvent(GetNextExecTime()); mTimer->PushEvent(std::move(event)); } @@ -211,6 +166,7 @@ std::unique_ptr ScrapeScheduler::BuildScrapeTimerEvent(std::chrono:: if (retry > 0) { retry -= 1; } + mPromStreamScraper.SetScrapeTime(mLatestScrapeTime); auto request = std::make_unique( sdk::HTTP_GET, mScrapeConfigPtr->mScheme == prometheus::HTTPS, @@ -221,15 +177,11 @@ std::unique_ptr ScrapeScheduler::BuildScrapeTimerEvent(std::chrono:: mScrapeConfigPtr->mRequestHeaders, "", HttpResponse( - new PromMetricResponseBody(mEventPool), - [](void* ptr) { delete static_cast(ptr); }, - PromMetricWriteCallback), + &mPromStreamScraper, [](void*) {}, prom::StreamScraper::MetricWriteCallback), mScrapeConfigPtr->mScrapeTimeoutSeconds, retry, this->mFuture, - this->mIsContextValidFuture, - mScrapeConfigPtr->mFollowRedirects, - mScrapeConfigPtr->mEnableTLS ? std::optional(mScrapeConfigPtr->mTLS) : std::nullopt); + this->mIsContextValidFuture); auto timerEvent = std::make_unique(execTime, std::move(request)); return timerEvent; } diff --git a/core/prometheus/schedulers/ScrapeScheduler.h b/core/prometheus/schedulers/ScrapeScheduler.h index fe73e5b86c..551ada0da9 100644 --- a/core/prometheus/schedulers/ScrapeScheduler.h +++ b/core/prometheus/schedulers/ScrapeScheduler.h @@ -21,11 +21,10 @@ #include "BaseScheduler.h" #include "common/http/HttpResponse.h" -#include "models/PipelineEventGroup.h" #include "monitor/metric_models/MetricTypes.h" #include "pipeline/queue/QueueKey.h" #include "prometheus/PromSelfMonitor.h" -#include "prometheus/Utils.h" +#include "prometheus/component/StreamScraper.h" #include "prometheus/schedulers/ScrapeConfig.h" #ifdef APSARA_UNIT_TEST_MAIN @@ -34,29 +33,6 @@ namespace logtail { -size_t PromMetricWriteCallback(char* buffer, size_t size, size_t nmemb, void* data); - -struct PromMetricResponseBody { - PipelineEventGroup mEventGroup; - std::string mCache; - size_t mRawSize = 0; - EventPool* mEventPool = nullptr; - - explicit PromMetricResponseBody(EventPool* eventPool) - : mEventGroup(std::make_shared()), mEventPool(eventPool) {}; - void AddEvent(char* line, size_t len) { - if (IsValidMetric(StringView(line, len))) { - auto* e = mEventGroup.AddRawEvent(true, mEventPool); - auto sb = mEventGroup.GetSourceBuffer()->CopyString(line, len); - e->SetContentNoCopy(sb); - } - } - void FlushCache() { - AddEvent(mCache.data(), mCache.size()); - mCache.clear(); - } -}; - class ScrapeScheduler : public BaseScheduler { public: ScrapeScheduler(std::shared_ptr scrapeConfigPtr, @@ -65,38 +41,35 @@ class ScrapeScheduler : public BaseScheduler { Labels labels, QueueKey queueKey, size_t inputIndex); - ScrapeScheduler(const ScrapeScheduler&) = default; + ScrapeScheduler(const ScrapeScheduler&) = delete; ~ScrapeScheduler() override = default; void OnMetricResult(HttpResponse&, uint64_t timestampMilliSec); std::string GetId() const; + void SetComponent(std::shared_ptr timer, EventPool* eventPool); + void ScheduleNext() override; void ScrapeOnce(std::chrono::steady_clock::time_point execTime); void Cancel() override; void InitSelfMonitor(const MetricLabels&); private: - void PushEventGroup(PipelineEventGroup&&); - void SetAutoMetricMeta(PipelineEventGroup& eGroup); - void SetTargetLabels(PipelineEventGroup& eGroup); - std::unique_ptr BuildScrapeTimerEvent(std::chrono::steady_clock::time_point execTime); - std::shared_ptr mScrapeConfigPtr; + prom::StreamScraper mPromStreamScraper; + std::shared_ptr mScrapeConfigPtr; std::string mHash; std::string mHost; int32_t mPort; std::string mInstance; - Labels mTargetLabels; + // pipeline QueueKey mQueueKey; - size_t mInputIndex; // auto metrics - std::string mScrapeState; uint64_t mScrapeTimestampMilliSec = 0; double mScrapeDurationSeconds = 0; uint64_t mScrapeResponseSizeBytes = 0; @@ -110,7 +83,6 @@ class ScrapeScheduler : public BaseScheduler { #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorParsePrometheusMetricUnittest; friend class ScrapeSchedulerUnittest; - std::vector> mItem; #endif }; diff --git a/core/prometheus/schedulers/TargetSubscriberScheduler.cpp b/core/prometheus/schedulers/TargetSubscriberScheduler.cpp index 7745f6dda1..ca8a3ad97c 100644 --- a/core/prometheus/schedulers/TargetSubscriberScheduler.cpp +++ b/core/prometheus/schedulers/TargetSubscriberScheduler.cpp @@ -121,7 +121,7 @@ void TargetSubscriberScheduler::UpdateScrapeScheduler( < mUnRegisterMs)) { // scrape once just now LOG_INFO(sLogger, ("scrape zero cost", ToString(tmpCurrentMilliSeconds))); - v->ScrapeOnce(std::chrono::steady_clock::now()); + v->SetScrapeOnceTime(chrono::steady_clock::now(), chrono::system_clock::now()); } v->ScheduleNext(); } diff --git a/core/unittest/processor/ProcessorPromRelabelMetricNativeUnittest.cpp b/core/unittest/processor/ProcessorPromRelabelMetricNativeUnittest.cpp index ad0cd3914e..24f263d348 100644 --- a/core/unittest/processor/ProcessorPromRelabelMetricNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorPromRelabelMetricNativeUnittest.cpp @@ -195,7 +195,9 @@ test_metric8{k1="v1", k3="v2", } 9.9410452992e+10 1715829785083 APSARA_TEST_EQUAL((size_t)8, eventGroup.GetEvents().size()); // without metadata - processor.AddAutoMetrics(eventGroup); + auto autoMetric = prom::AutoMetric(); + processor.UpdateAutoMetrics(eventGroup, autoMetric); + processor.AddAutoMetrics(eventGroup, autoMetric); APSARA_TEST_EQUAL((size_t)8, eventGroup.GetEvents().size()); // with metadata @@ -207,22 +209,24 @@ test_metric8{k1="v1", k3="v2", } 9.9410452992e+10 1715829785083 eventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_STATE, string("OK")); eventGroup.SetTag(string("instance"), "localhost:8080"); eventGroup.SetTag(string("job"), "test_job"); - processor.AddAutoMetrics(eventGroup); + processor.UpdateAutoMetrics(eventGroup, autoMetric); + processor.AddAutoMetrics(eventGroup, autoMetric); - APSARA_TEST_EQUAL((size_t)16, eventGroup.GetEvents().size()); + // SCRAPE_SAMPLES_POST_METRIC_RELABELING is removed + APSARA_TEST_EQUAL((size_t)15, eventGroup.GetEvents().size()); APSARA_TEST_EQUAL(1.5, eventGroup.GetEvents().at(8).Cast().GetValue()->mValue); APSARA_TEST_EQUAL(2325, eventGroup.GetEvents().at(9).Cast().GetValue()->mValue); APSARA_TEST_EQUAL(1000, eventGroup.GetEvents().at(10).Cast().GetValue()->mValue); APSARA_TEST_EQUAL(8, eventGroup.GetEvents().at(11).Cast().GetValue()->mValue); - APSARA_TEST_EQUAL(8, eventGroup.GetEvents().at(12).Cast().GetValue()->mValue); - APSARA_TEST_EQUAL(15, eventGroup.GetEvents().at(13).Cast().GetValue()->mValue); + // APSARA_TEST_EQUAL(8, eventGroup.GetEvents().at(12).Cast().GetValue()->mValue); + APSARA_TEST_EQUAL(15, eventGroup.GetEvents().at(12).Cast().GetValue()->mValue); // scrape_state - APSARA_TEST_EQUAL(1, eventGroup.GetEvents().at(14).Cast().GetValue()->mValue); - APSARA_TEST_EQUAL("OK", eventGroup.GetEvents().at(14).Cast().GetTag("status")); + APSARA_TEST_EQUAL(1, eventGroup.GetEvents().at(13).Cast().GetValue()->mValue); + APSARA_TEST_EQUAL("OK", eventGroup.GetEvents().at(13).Cast().GetTag("status")); // up - APSARA_TEST_EQUAL(1, eventGroup.GetEvents().at(15).Cast().GetValue()->mValue); - APSARA_TEST_EQUAL("localhost:8080", eventGroup.GetEvents().at(15).Cast().GetTag("instance")); - APSARA_TEST_EQUAL("test_job", eventGroup.GetEvents().at(15).Cast().GetTag("job")); + APSARA_TEST_EQUAL(1, eventGroup.GetEvents().at(14).Cast().GetValue()->mValue); + APSARA_TEST_EQUAL("localhost:8080", eventGroup.GetEvents().at(14).Cast().GetTag("instance")); + APSARA_TEST_EQUAL("test_job", eventGroup.GetEvents().at(14).Cast().GetTag("job")); } void ProcessorPromRelabelMetricNativeUnittest::TestHonorLabels() { diff --git a/core/unittest/prometheus/CMakeLists.txt b/core/unittest/prometheus/CMakeLists.txt index ef9aeddae5..29463fe97e 100644 --- a/core/unittest/prometheus/CMakeLists.txt +++ b/core/unittest/prometheus/CMakeLists.txt @@ -45,6 +45,9 @@ target_link_libraries(prom_utils_unittest ${UT_BASE_TARGET}) add_executable(prom_asyn_unittest PromAsynUnittest.cpp) target_link_libraries(prom_asyn_unittest ${UT_BASE_TARGET}) +add_executable(stream_scraper_unittest StreamScraperUnittest.cpp) +target_link_libraries(stream_scraper_unittest ${UT_BASE_TARGET}) + include(GoogleTest) gtest_discover_tests(prom_self_monitor_unittest) @@ -57,6 +60,7 @@ gtest_discover_tests(textparser_unittest) gtest_discover_tests(scrape_config_unittest) gtest_discover_tests(prom_utils_unittest) gtest_discover_tests(prom_asyn_unittest) +gtest_discover_tests(stream_scraper_unittest) add_executable(textparser_benchmark TextParserBenchmark.cpp) target_link_libraries(textparser_benchmark ${UT_BASE_TARGET}) \ No newline at end of file diff --git a/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp b/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp index a8d696db4d..dc159d3cb1 100644 --- a/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp +++ b/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp @@ -67,14 +67,13 @@ void ScrapeSchedulerUnittest::TestInitscrapeScheduler() { void ScrapeSchedulerUnittest::TestProcess() { EventPool eventPool{true}; - HttpResponse httpResponse = HttpResponse( - new PromMetricResponseBody(&eventPool), - [](void* ptr) { delete static_cast(ptr); }, - PromMetricWriteCallback); + Labels labels; labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); ScrapeScheduler event(mScrapeConfig, "localhost", 8080, labels, 0, 0); + HttpResponse httpResponse + = HttpResponse(&event.mPromStreamScraper, [](void*) {}, prom::StreamScraper::MetricWriteCallback); auto defaultLabels = MetricLabels(); event.InitSelfMonitor(defaultLabels); APSARA_TEST_EQUAL(event.GetId(), "test_jobhttp://localhost:8080/metrics" + ToString(labels.Hash())); @@ -83,18 +82,19 @@ void ScrapeSchedulerUnittest::TestProcess() { httpResponse.SetStatusCode(503); httpResponse.SetNetworkStatus(CURLE_OK); event.OnMetricResult(httpResponse, 0); - APSARA_TEST_EQUAL(1UL, event.mItem.size()); - event.mItem.clear(); + APSARA_TEST_EQUAL(1UL, event.mPromStreamScraper.mItem.size()); + event.mPromStreamScraper.mItem.clear(); - httpResponse.GetBody()->mEventGroup = PipelineEventGroup(std::make_shared()); httpResponse.SetStatusCode(503); httpResponse.SetNetworkStatus(CURLE_COULDNT_CONNECT); event.OnMetricResult(httpResponse, 0); - APSARA_TEST_EQUAL(event.mScrapeState, "ERR_CONN_FAILED"); - APSARA_TEST_EQUAL(1UL, event.mItem.size()); - event.mItem.clear(); + APSARA_TEST_EQUAL(event.mPromStreamScraper.mItem[0] + ->mEventGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_STATE) + .to_string(), + "ERR_CONN_FAILED"); + APSARA_TEST_EQUAL(1UL, event.mPromStreamScraper.mItem.size()); + event.mPromStreamScraper.mItem.clear(); - httpResponse.GetBody()->mEventGroup = PipelineEventGroup(std::make_shared()); httpResponse.SetStatusCode(200); httpResponse.SetNetworkStatus(CURLE_OK); string body1 = "# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n" @@ -118,23 +118,22 @@ void ScrapeSchedulerUnittest::TestProcess() { "# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n" "# TYPE go_memstats_alloc_bytes_total counter\n" "go_memstats_alloc_bytes_total 1.5159292e+08"; - PromMetricWriteCallback( - body1.data(), (size_t)1, (size_t)body1.length(), (void*)httpResponse.GetBody()); + prom::StreamScraper::MetricWriteCallback( + body1.data(), (size_t)1, (size_t)body1.length(), (void*)httpResponse.GetBody()); event.OnMetricResult(httpResponse, 0); - APSARA_TEST_EQUAL(1UL, event.mItem.size()); - APSARA_TEST_EQUAL(11UL, event.mItem[0]->mEventGroup.GetEvents().size()); + APSARA_TEST_EQUAL(1UL, event.mPromStreamScraper.mItem.size()); + APSARA_TEST_EQUAL(11UL, event.mPromStreamScraper.mItem[0]->mEventGroup.GetEvents().size()); } void ScrapeSchedulerUnittest::TestStreamMetricWriteCallback() { EventPool eventPool{true}; - HttpResponse httpResponse = HttpResponse( - new PromMetricResponseBody(&eventPool), - [](void* ptr) { delete static_cast(ptr); }, - PromMetricWriteCallback); + Labels labels; labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); ScrapeScheduler event(mScrapeConfig, "localhost", 8080, labels, 0, 0); + HttpResponse httpResponse + = HttpResponse(&event.mPromStreamScraper, [](void*) {}, prom::StreamScraper::MetricWriteCallback); APSARA_TEST_EQUAL(event.GetId(), "test_jobhttp://localhost:8080/metrics" + ToString(labels.Hash())); string body1 = "# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n" @@ -160,9 +159,9 @@ void ScrapeSchedulerUnittest::TestStreamMetricWriteCallback() { "# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n" "# TYPE go_memstats_alloc_bytes_total counter\n" "go_memstats_alloc_bytes_total 1.5159292e+08"; - PromMetricWriteCallback( - body1.data(), (size_t)1, (size_t)body1.length(), (void*)httpResponse.GetBody()); - auto& res = httpResponse.GetBody()->mEventGroup; + prom::StreamScraper::MetricWriteCallback( + body1.data(), (size_t)1, (size_t)body1.length(), (void*)httpResponse.GetBody()); + auto& res = httpResponse.GetBody()->mEventGroup; APSARA_TEST_EQUAL(7UL, res.GetEvents().size()); APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0\"} 1.5531e-05", res.GetEvents()[0].Cast().GetContent()); @@ -177,9 +176,9 @@ void ScrapeSchedulerUnittest::TestStreamMetricWriteCallback() { APSARA_TEST_EQUAL("go_gc_duration_seconds_sum 0.034885631", res.GetEvents()[5].Cast().GetContent()); APSARA_TEST_EQUAL("go_gc_duration_seconds_count 850", res.GetEvents()[6].Cast().GetContent()); // httpResponse.GetBody()->mEventGroup = PipelineEventGroup(std::make_shared()); - PromMetricWriteCallback( - body2.data(), (size_t)1, (size_t)body2.length(), (void*)httpResponse.GetBody()); - httpResponse.GetBody()->FlushCache(); + prom::StreamScraper::MetricWriteCallback( + body2.data(), (size_t)1, (size_t)body2.length(), (void*)httpResponse.GetBody()); + httpResponse.GetBody()->FlushCache(); APSARA_TEST_EQUAL(11UL, res.GetEvents().size()); APSARA_TEST_EQUAL("go_goroutines 7", res.GetEvents()[7].Cast().GetContent()); diff --git a/core/unittest/prometheus/StreamScraperUnittest.cpp b/core/unittest/prometheus/StreamScraperUnittest.cpp new file mode 100644 index 0000000000..caa08add6c --- /dev/null +++ b/core/unittest/prometheus/StreamScraperUnittest.cpp @@ -0,0 +1,189 @@ +/* + * Copyright 2024 iLogtail Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include +#include + +#include "EventPool.h" +#include "Flags.h" +#include "models/RawEvent.h" +#include "prometheus/Constants.h" +#include "prometheus/component/StreamScraper.h" +#include "prometheus/labels/Labels.h" +#include "prometheus/schedulers/ScrapeConfig.h" +#include "unittest/Unittest.h" + +using namespace std; + +DECLARE_FLAG_INT64(prom_stream_bytes_size); + +namespace logtail::prom { +class StreamScraperUnittest : public testing::Test { +public: + void TestStreamMetricWriteCallback(); + void TestStreamSendMetric(); + + +protected: + void SetUp() override { + mScrapeConfig = make_shared(); + mScrapeConfig->mJobName = "test_job"; + mScrapeConfig->mScheme = "http"; + mScrapeConfig->mScrapeIntervalSeconds = 10; + mScrapeConfig->mScrapeTimeoutSeconds = 10; + mScrapeConfig->mMetricsPath = "/metrics"; + mScrapeConfig->mRequestHeaders = {{"Authorization", "Bearer xxxxx"}}; + } + +private: + std::shared_ptr mScrapeConfig; +}; + +void StreamScraperUnittest::TestStreamMetricWriteCallback() { + EventPool eventPool{true}; + + Labels labels; + labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); + auto streamScraper = make_shared(labels, 0, 0); + streamScraper->mEventPool = &eventPool; + streamScraper->mHash = "test_jobhttp://localhost:8080/metrics"; + + string body1 = "# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n" + "# TYPE go_gc_duration_seconds summary\n" + "go_gc_duration_seconds{quantile=\"0\"} 1.5531e-05\n" + "go_gc_duration_seconds{quantile=\"0.25\"} 3.9357e-05\n" + "go_gc_duration_seconds{quantile=\"0.5\"} 4.1114e-05\n" + "go_gc_duration_seconds{quantile=\"0.75\"} 4.3372e-05\n" + "go_gc_duration_seconds{quantile=\"1\"} 0.000112326\n" + "go_gc_duration_seconds_sum 0.034885631\n" + "go_gc_duration_seconds_count 850\n" + "# HELP go_goroutines Number of goroutines t" + "hat currently exist.\n" + "# TYPE go_goroutines gauge\n" + "go_go"; + string body2 = "routines 7\n" + "# HELP go_info Information about the Go environment.\n" + "# TYPE go_info gauge\n" + "go_info{version=\"go1.22.3\"} 1\n" + "# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n" + "# TYPE go_memstats_alloc_bytes gauge\n" + "go_memstats_alloc_bytes 6.742688e+06\n" + "# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n" + "# TYPE go_memstats_alloc_bytes_total counter\n" + "go_memstats_alloc_bytes_total 1.5159292e+08"; + + StreamScraper::MetricWriteCallback(body1.data(), (size_t)1, (size_t)body1.length(), streamScraper.get()); + auto& res = streamScraper->mEventGroup; + APSARA_TEST_EQUAL(7UL, res.GetEvents().size()); + APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0\"} 1.5531e-05", + res.GetEvents()[0].Cast().GetContent()); + APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0.25\"} 3.9357e-05", + res.GetEvents()[1].Cast().GetContent()); + APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0.5\"} 4.1114e-05", + res.GetEvents()[2].Cast().GetContent()); + APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0.75\"} 4.3372e-05", + res.GetEvents()[3].Cast().GetContent()); + APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"1\"} 0.000112326", + res.GetEvents()[4].Cast().GetContent()); + APSARA_TEST_EQUAL("go_gc_duration_seconds_sum 0.034885631", res.GetEvents()[5].Cast().GetContent()); + APSARA_TEST_EQUAL("go_gc_duration_seconds_count 850", res.GetEvents()[6].Cast().GetContent()); + + StreamScraper::MetricWriteCallback(body2.data(), (size_t)1, (size_t)body2.length(), streamScraper.get()); + streamScraper->FlushCache(); + APSARA_TEST_EQUAL(11UL, res.GetEvents().size()); + + APSARA_TEST_EQUAL("go_goroutines 7", res.GetEvents()[7].Cast().GetContent()); + APSARA_TEST_EQUAL("go_info{version=\"go1.22.3\"} 1", res.GetEvents()[8].Cast().GetContent()); + APSARA_TEST_EQUAL("go_memstats_alloc_bytes 6.742688e+06", res.GetEvents()[9].Cast().GetContent()); + APSARA_TEST_EQUAL("go_memstats_alloc_bytes_total 1.5159292e+08", res.GetEvents()[10].Cast().GetContent()); +} + +void StreamScraperUnittest::TestStreamSendMetric() { + EventPool eventPool{true}; + + Labels labels; + labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); + auto streamScraper = make_shared(labels, 0, 0); + streamScraper->mEventPool = &eventPool; + streamScraper->mHash = "test_jobhttp://localhost:8080/metrics"; + + string body1 = "# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n" + "# TYPE go_gc_duration_seconds summary\n" + "go_gc_duration_seconds{quantile=\"0\"} 1.5531e-05\n" + "go_gc_duration_seconds{quantile=\"0.25\"} 3.9357e-05\n" + "go_gc_duration_seconds{quantile=\"0.5\"} 4.1114e-05\n" + "go_gc_duration_seconds{quantile=\"0.75\"} 4.3372e-05\n" + "go_gc_duration_seconds{quantile=\"1\"} 0.000112326\n" + "go_gc_duration_seconds_sum 0.034885631\n" + "go_gc_duration_seconds_count 850\n" + "# HELP go_goroutines Number of goroutines t" + "hat currently exist.\n" + "# TYPE go_goroutines gauge\n" + "go_go"; + string body2 = "routines 7\n" + "# HELP go_info Information about the Go environment.\n" + "# TYPE go_info gauge\n" + "go_info{version=\"go1.22.3\"} 1\n" + "# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n" + "# TYPE go_memstats_alloc_bytes gauge\n" + "go_memstats_alloc_bytes 6.742688e+06\n" + "# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n" + "# TYPE go_memstats_alloc_bytes_total counter\n" + "go_memstats_alloc_bytes_total 1.5159292e+08"; + + INT64_FLAG(prom_stream_bytes_size) = body1.length() - 2; + + StreamScraper::MetricWriteCallback(body1.data(), (size_t)1, (size_t)body1.length(), streamScraper.get()); + APSARA_TEST_EQUAL(0UL, streamScraper->mEventGroup.GetEvents().size()); + + auto& res = streamScraper->mItem[0]->mEventGroup; + APSARA_TEST_EQUAL(7UL, res.GetEvents().size()); + APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0\"} 1.5531e-05", + res.GetEvents()[0].Cast().GetContent()); + APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0.25\"} 3.9357e-05", + res.GetEvents()[1].Cast().GetContent()); + APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0.5\"} 4.1114e-05", + res.GetEvents()[2].Cast().GetContent()); + APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"0.75\"} 4.3372e-05", + res.GetEvents()[3].Cast().GetContent()); + APSARA_TEST_EQUAL("go_gc_duration_seconds{quantile=\"1\"} 0.000112326", + res.GetEvents()[4].Cast().GetContent()); + APSARA_TEST_EQUAL("go_gc_duration_seconds_sum 0.034885631", res.GetEvents()[5].Cast().GetContent()); + APSARA_TEST_EQUAL("go_gc_duration_seconds_count 850", res.GetEvents()[6].Cast().GetContent()); + + StreamScraper::MetricWriteCallback(body2.data(), (size_t)1, (size_t)body2.length(), streamScraper.get()); + APSARA_TEST_EQUAL(3UL, streamScraper->mEventGroup.GetEvents().size()); + streamScraper->FlushCache(); + APSARA_TEST_EQUAL(4UL, streamScraper->mEventGroup.GetEvents().size()); + streamScraper->SendMetrics(); + auto& res1 = streamScraper->mItem[1]->mEventGroup; + APSARA_TEST_EQUAL(4UL, res1.GetEvents().size()); + + APSARA_TEST_EQUAL("go_goroutines 7", res1.GetEvents()[0].Cast().GetContent()); + APSARA_TEST_EQUAL("go_info{version=\"go1.22.3\"} 1", res1.GetEvents()[1].Cast().GetContent()); + APSARA_TEST_EQUAL("go_memstats_alloc_bytes 6.742688e+06", res1.GetEvents()[2].Cast().GetContent()); + APSARA_TEST_EQUAL("go_memstats_alloc_bytes_total 1.5159292e+08", res1.GetEvents()[3].Cast().GetContent()); +} + + +UNIT_TEST_CASE(StreamScraperUnittest, TestStreamMetricWriteCallback) +UNIT_TEST_CASE(StreamScraperUnittest, TestStreamSendMetric) + + +} // namespace logtail::prom + +UNIT_TEST_MAIN \ No newline at end of file From 6b2fde74da42e8899506f671ee1ebe38dc15271b Mon Sep 17 00:00:00 2001 From: Takuka0311 <1914426213@qq.com> Date: Mon, 30 Dec 2024 19:00:46 +0800 Subject: [PATCH 09/12] add ut (#1997) --- core/monitor/SelfMonitorServer.h | 3 + core/pipeline/Pipeline.h | 1 + core/plugin/input/InputInternalMetrics.h | 3 + core/unittest/input/CMakeLists.txt | 4 + .../input/InputInternalMetricsUnittest.cpp | 184 ++++++++++++++++++ 5 files changed, 195 insertions(+) create mode 100644 core/unittest/input/InputInternalMetricsUnittest.cpp diff --git a/core/monitor/SelfMonitorServer.h b/core/monitor/SelfMonitorServer.h index cea3448b0c..fc92f6ac17 100644 --- a/core/monitor/SelfMonitorServer.h +++ b/core/monitor/SelfMonitorServer.h @@ -56,6 +56,9 @@ class SelfMonitorServer { PipelineContext* mAlarmPipelineCtx; std::mutex mAlarmPipelineMux; +#ifdef APSARA_UNIT_TEST_MAIN + friend class InputInternalMetricsUnittest; +#endif }; } // namespace logtail \ No newline at end of file diff --git a/core/pipeline/Pipeline.h b/core/pipeline/Pipeline.h index 5009b3c445..fe2c79bb2e 100644 --- a/core/pipeline/Pipeline.h +++ b/core/pipeline/Pipeline.h @@ -121,6 +121,7 @@ class Pipeline { friend class PipelineUnittest; friend class InputContainerStdioUnittest; friend class InputFileUnittest; + friend class InputInternalMetricsUnittest; friend class InputPrometheusUnittest; friend class ProcessorTagNativeUnittest; friend class FlusherSLSUnittest; diff --git a/core/plugin/input/InputInternalMetrics.h b/core/plugin/input/InputInternalMetrics.h index 694edf85af..6a11d5788e 100644 --- a/core/plugin/input/InputInternalMetrics.h +++ b/core/plugin/input/InputInternalMetrics.h @@ -32,6 +32,9 @@ class InputInternalMetrics : public Input { bool SupportAck() const override { return true; } private: SelfMonitorMetricRules mSelfMonitorMetricRules; +#ifdef APSARA_UNIT_TEST_MAIN + friend class InputInternalMetricsUnittest; +#endif }; } // namespace logtail \ No newline at end of file diff --git a/core/unittest/input/CMakeLists.txt b/core/unittest/input/CMakeLists.txt index 49129244cf..4544e875bf 100644 --- a/core/unittest/input/CMakeLists.txt +++ b/core/unittest/input/CMakeLists.txt @@ -36,6 +36,9 @@ target_link_libraries(input_ebpf_network_security_unittest unittest_base) add_executable(input_ebpf_network_observer_unittest InputNetworkObserverUnittest.cpp) target_link_libraries(input_ebpf_network_observer_unittest unittest_base) +add_executable(input_internal_metrics_unittest InputInternalMetricsUnittest.cpp) +target_link_libraries(input_internal_metrics_unittest ${UT_BASE_TARGET}) + include(GoogleTest) gtest_discover_tests(input_file_unittest) gtest_discover_tests(input_container_stdio_unittest) @@ -44,3 +47,4 @@ gtest_discover_tests(input_ebpf_file_security_unittest) gtest_discover_tests(input_ebpf_process_security_unittest) gtest_discover_tests(input_ebpf_network_security_unittest) gtest_discover_tests(input_ebpf_network_observer_unittest) +gtest_discover_tests(input_internal_metrics_unittest) diff --git a/core/unittest/input/InputInternalMetricsUnittest.cpp b/core/unittest/input/InputInternalMetricsUnittest.cpp new file mode 100644 index 0000000000..e9e01455b5 --- /dev/null +++ b/core/unittest/input/InputInternalMetricsUnittest.cpp @@ -0,0 +1,184 @@ +// Copyright 2023 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include +#include + +#include "app_config/AppConfig.h" +#include "common/JsonUtil.h" +#include "monitor/Monitor.h" +#include "pipeline/Pipeline.h" +#include "pipeline/PipelineContext.h" +#include "pipeline/plugin/PluginRegistry.h" +#include "plugin/input/InputInternalMetrics.h" +#include "unittest/Unittest.h" + +DECLARE_FLAG_INT32(default_plugin_log_queue_size); + +using namespace std; + +namespace logtail { + +class InputInternalMetricsUnittest : public testing::Test { +public: + void OnInit(); + void OnPipelineUpdate(); + +protected: + static void SetUpTestCase() { + LoongCollectorMonitor::GetInstance()->Init(); + PluginRegistry::GetInstance()->LoadPlugins(); + } + + static void TearDownTestCase() { + PluginRegistry::GetInstance()->UnloadPlugins(); + LoongCollectorMonitor::GetInstance()->Stop(); + } + + void SetUp() override { + p.mName = "test_config"; + ctx.SetConfigName("test_config"); + p.mPluginID.store(0); + ctx.SetPipeline(p); + } + +private: + Pipeline p; + PipelineContext ctx; +}; + +void InputInternalMetricsUnittest::OnInit() { + unique_ptr input; + Json::Value configJson, optionalGoPipeline; + string configStr, errorMsg; + + configStr = R"( + { + "Type": "input_internal_metrics", + "Agent": { + "Enable": true, + "Interval": 1 + }, + "Runner": { + "Enable": false, + "Interval": 2 + }, + "Pipeline": { + "Enable": true, + "Interval": 3 + }, + "PluginSource": { + "Enable": true, + "Interval": 4 + }, + "Plugin": { + "Enable": true, + "Interval": 5 + }, + "Component": { + "Enable": false, + "Interval": 6 + } + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.reset(new InputInternalMetrics()); + input->SetContext(ctx); + input->SetMetricsRecordRef(InputInternalMetrics::sName, "1"); + APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); + APSARA_TEST_TRUE(input->Start()); + APSARA_TEST_EQUAL(input->mSelfMonitorMetricRules.mAgentMetricsRule.mEnable, true); + APSARA_TEST_EQUAL(input->mSelfMonitorMetricRules.mAgentMetricsRule.mInterval, 1); + APSARA_TEST_EQUAL(input->mSelfMonitorMetricRules.mComponentMetricsRule.mEnable, false); + APSARA_TEST_EQUAL(input->mSelfMonitorMetricRules.mComponentMetricsRule.mInterval, 6); + APSARA_TEST_EQUAL(input->mSelfMonitorMetricRules.mPipelineMetricsRule.mEnable, true); + APSARA_TEST_EQUAL(input->mSelfMonitorMetricRules.mPipelineMetricsRule.mInterval, 3); + APSARA_TEST_EQUAL(input->mSelfMonitorMetricRules.mPluginMetricsRule.mEnable, true); + APSARA_TEST_EQUAL(input->mSelfMonitorMetricRules.mPluginMetricsRule.mInterval, 5); + APSARA_TEST_EQUAL(input->mSelfMonitorMetricRules.mPluginSourceMetricsRule.mEnable, true); + APSARA_TEST_EQUAL(input->mSelfMonitorMetricRules.mPluginSourceMetricsRule.mInterval, 4); + APSARA_TEST_EQUAL(input->mSelfMonitorMetricRules.mRunnerMetricsRule.mEnable, false); + APSARA_TEST_EQUAL(input->mSelfMonitorMetricRules.mRunnerMetricsRule.mInterval, 2); + APSARA_TEST_TRUE(input->Stop(true)); +} + +void InputInternalMetricsUnittest::OnPipelineUpdate() { + Json::Value configJson, optionalGoPipeline; + InputInternalMetrics input; + input.SetContext(ctx); + string configStr, errorMsg; + + configStr = R"( + { + "Type": "input_internal_metrics", + "Agent": { + "Enable": false, + "Interval": 7 + }, + "Runner": { + "Enable": true, + "Interval": 8 + }, + "Pipeline": { + "Enable": false, + "Interval": 9 + }, + "PluginSource": { + "Enable": false, + "Interval": 10 + }, + "Plugin": { + "Enable": false, + "Interval": 11 + }, + "Component": { + "Enable": true, + "Interval": 12 + } + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + input.SetContext(ctx); + input.SetMetricsRecordRef(InputInternalMetrics::sName, "1"); + APSARA_TEST_TRUE(input.Init(configJson, optionalGoPipeline)); + + APSARA_TEST_TRUE(input.Start()); + APSARA_TEST_NOT_EQUAL(nullptr, SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules); + APSARA_TEST_EQUAL(SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules->mAgentMetricsRule.mEnable, false); + APSARA_TEST_EQUAL(SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules->mAgentMetricsRule.mInterval, 7); + APSARA_TEST_EQUAL(SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules->mComponentMetricsRule.mEnable, true); + APSARA_TEST_EQUAL(SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules->mComponentMetricsRule.mInterval, 12); + APSARA_TEST_EQUAL(SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules->mPipelineMetricsRule.mEnable, false); + APSARA_TEST_EQUAL(SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules->mPipelineMetricsRule.mInterval, 9); + APSARA_TEST_EQUAL(SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules->mPluginMetricsRule.mEnable, false); + APSARA_TEST_EQUAL(SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules->mPluginMetricsRule.mInterval, 11); + APSARA_TEST_EQUAL(SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules->mPluginSourceMetricsRule.mEnable, false); + APSARA_TEST_EQUAL(SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules->mPluginSourceMetricsRule.mInterval, 10); + APSARA_TEST_EQUAL(SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules->mRunnerMetricsRule.mEnable, true); + APSARA_TEST_EQUAL(SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules->mRunnerMetricsRule.mInterval, 8); + + APSARA_TEST_TRUE(input.Stop(true)); + APSARA_TEST_EQUAL(nullptr, SelfMonitorServer::GetInstance()->mSelfMonitorMetricRules); + APSARA_TEST_EQUAL(nullptr, SelfMonitorServer::GetInstance()->mMetricPipelineCtx); +} + +UNIT_TEST_CASE(InputInternalMetricsUnittest, OnInit) +UNIT_TEST_CASE(InputInternalMetricsUnittest, OnPipelineUpdate) + +} // namespace logtail + +UNIT_TEST_MAIN From dc71cb83a5377b2b8d412398d1dac46af6a6003d Mon Sep 17 00:00:00 2001 From: Takuka0311 <1914426213@qq.com> Date: Tue, 31 Dec 2024 15:56:36 +0800 Subject: [PATCH 10/12] feat: add metrics for logtail mode (#2001) * init * fix ut --- core/models/MetricValue.cpp | 4 +-- core/models/MetricValue.h | 4 +-- .../metric_constants/MetricConstants.h | 1 + .../metric_constants/PluginMetrics.cpp | 1 + core/pipeline/Pipeline.cpp | 9 +++--- core/pipeline/plugin/interface/Plugin.h | 1 + core/pipeline/serializer/JsonSerializer.cpp | 4 +-- core/plugin/flusher/sls/FlusherSLS.cpp | 28 +++++++++++++++---- core/plugin/flusher/sls/FlusherSLS.h | 1 + core/unittest/pipeline/PipelineUnittest.cpp | 2 +- 10 files changed, 38 insertions(+), 17 deletions(-) diff --git a/core/models/MetricValue.cpp b/core/models/MetricValue.cpp index c262367e07..1fc05f76fd 100644 --- a/core/models/MetricValue.cpp +++ b/core/models/MetricValue.cpp @@ -56,11 +56,11 @@ void UntypedMultiDoubleValues::DelValue(StringView key) { mValues.erase(key); } -std::map::const_iterator UntypedMultiDoubleValues::ValusBegin() const { +std::map::const_iterator UntypedMultiDoubleValues::ValuesBegin() const { return mValues.begin(); } -std::map::const_iterator UntypedMultiDoubleValues::ValusEnd() const { +std::map::const_iterator UntypedMultiDoubleValues::ValuesEnd() const { return mValues.end(); } diff --git a/core/models/MetricValue.h b/core/models/MetricValue.h index b3caef108c..a18059cec0 100644 --- a/core/models/MetricValue.h +++ b/core/models/MetricValue.h @@ -58,8 +58,8 @@ struct UntypedMultiDoubleValues { void SetValueNoCopy(StringView key, double val); void DelValue(StringView key); - std::map::const_iterator ValusBegin() const; - std::map::const_iterator ValusEnd() const; + std::map::const_iterator ValuesBegin() const; + std::map::const_iterator ValuesEnd() const; size_t ValusSize() const; size_t DataSize() const; diff --git a/core/monitor/metric_constants/MetricConstants.h b/core/monitor/metric_constants/MetricConstants.h index 95a6eb1af2..c98522f4d1 100644 --- a/core/monitor/metric_constants/MetricConstants.h +++ b/core/monitor/metric_constants/MetricConstants.h @@ -162,6 +162,7 @@ extern const std::string METRIC_PLUGIN_FLUSHER_TOTAL_PACKAGE_TIME_MS; extern const std::string METRIC_PLUGIN_FLUSHER_OUT_EVENT_GROUPS_TOTAL; extern const std::string METRIC_PLUGIN_FLUSHER_SEND_DONE_TOTAL; extern const std::string METRIC_PLUGIN_FLUSHER_SUCCESS_TOTAL; +extern const std::string METRIC_PLUGIN_FLUSHER_DISCARD_TOTAL; extern const std::string METRIC_PLUGIN_FLUSHER_NETWORK_ERROR_TOTAL; extern const std::string METRIC_PLUGIN_FLUSHER_SERVER_ERROR_TOTAL; extern const std::string METRIC_PLUGIN_FLUSHER_UNAUTH_ERROR_TOTAL; diff --git a/core/monitor/metric_constants/PluginMetrics.cpp b/core/monitor/metric_constants/PluginMetrics.cpp index 329e487539..aa04551423 100644 --- a/core/monitor/metric_constants/PluginMetrics.cpp +++ b/core/monitor/metric_constants/PluginMetrics.cpp @@ -140,6 +140,7 @@ const string METRIC_PLUGIN_FLUSHER_TOTAL_PACKAGE_TIME_MS = "total_package_time_m const string METRIC_PLUGIN_FLUSHER_OUT_EVENT_GROUPS_TOTAL = "send_total"; const string METRIC_PLUGIN_FLUSHER_SEND_DONE_TOTAL = "send_done_total"; const string METRIC_PLUGIN_FLUSHER_SUCCESS_TOTAL = "success_total"; +const string METRIC_PLUGIN_FLUSHER_DISCARD_TOTAL = "discard_total"; const string METRIC_PLUGIN_FLUSHER_NETWORK_ERROR_TOTAL = "network_error_total"; const string METRIC_PLUGIN_FLUSHER_SERVER_ERROR_TOTAL = "server_error_total"; const string METRIC_PLUGIN_FLUSHER_UNAUTH_ERROR_TOTAL = "unauth_error_total"; diff --git a/core/pipeline/Pipeline.cpp b/core/pipeline/Pipeline.cpp index 3ed21f7d2b..8d4c2a75d6 100644 --- a/core/pipeline/Pipeline.cpp +++ b/core/pipeline/Pipeline.cpp @@ -317,10 +317,11 @@ bool Pipeline::Init(PipelineConfig&& config) { ProcessQueueManager::GetInstance()->SetDownStreamQueues(mContext.GetProcessQueueKey(), std::move(senderQueues)); } - WriteMetrics::GetInstance()->PrepareMetricsRecordRef( - mMetricsRecordRef, - MetricCategory::METRIC_CATEGORY_PIPELINE, - {{METRIC_LABEL_KEY_PROJECT, mContext.GetProjectName()}, {METRIC_LABEL_KEY_PIPELINE_NAME, mName}}); + WriteMetrics::GetInstance()->PrepareMetricsRecordRef(mMetricsRecordRef, + MetricCategory::METRIC_CATEGORY_PIPELINE, + {{METRIC_LABEL_KEY_PROJECT, mContext.GetProjectName()}, + {METRIC_LABEL_KEY_PIPELINE_NAME, mName}, + {METRIC_LABEL_KEY_LOGSTORE, mContext.GetLogstoreName()}}); mStartTime = mMetricsRecordRef.CreateIntGauge(METRIC_PIPELINE_START_TIME); mProcessorsInEventsTotal = mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_PROCESSORS_IN_EVENTS_TOTAL); mProcessorsInGroupsTotal = mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_PROCESSORS_IN_EVENT_GROUPS_TOTAL); diff --git a/core/pipeline/plugin/interface/Plugin.h b/core/pipeline/plugin/interface/Plugin.h index 999b10b921..6aef73b37a 100644 --- a/core/pipeline/plugin/interface/Plugin.h +++ b/core/pipeline/plugin/interface/Plugin.h @@ -42,6 +42,7 @@ class Plugin { MetricCategory::METRIC_CATEGORY_PLUGIN, {{METRIC_LABEL_KEY_PROJECT, mContext->GetProjectName()}, {METRIC_LABEL_KEY_PIPELINE_NAME, mContext->GetConfigName()}, + {METRIC_LABEL_KEY_LOGSTORE, mContext->GetLogstoreName()}, {METRIC_LABEL_KEY_PLUGIN_TYPE, name}, {METRIC_LABEL_KEY_PLUGIN_ID, id}}); } diff --git a/core/pipeline/serializer/JsonSerializer.cpp b/core/pipeline/serializer/JsonSerializer.cpp index c83b45e282..8535a735a2 100644 --- a/core/pipeline/serializer/JsonSerializer.cpp +++ b/core/pipeline/serializer/JsonSerializer.cpp @@ -83,8 +83,8 @@ bool JsonEventGroupSerializer::Serialize(BatchedEvents&& group, string& res, str eventJson[METRIC_RESERVED_KEY_VALUE] = e.GetValue()->mValue; } else if (e.Is()) { eventJson[METRIC_RESERVED_KEY_VALUE] = Json::Value(); - for (auto value = e.GetValue()->ValusBegin(); - value != e.GetValue()->ValusEnd(); + for (auto value = e.GetValue()->ValuesBegin(); + value != e.GetValue()->ValuesEnd(); value++) { eventJson[METRIC_RESERVED_KEY_VALUE][value->first.to_string()] = value->second; } diff --git a/core/plugin/flusher/sls/FlusherSLS.cpp b/core/plugin/flusher/sls/FlusherSLS.cpp index 3ecb22a0be..765318b77f 100644 --- a/core/plugin/flusher/sls/FlusherSLS.cpp +++ b/core/plugin/flusher/sls/FlusherSLS.cpp @@ -126,12 +126,14 @@ shared_ptr FlusherSLS::GetLogstoreConcurrencyLimiter(const s auto iter = sLogstoreConcurrencyLimiterMap.find(key); if (iter == sLogstoreConcurrencyLimiterMap.end()) { - auto limiter = make_shared(sName + "#quota#logstore#" + key, AppConfig::GetInstance()->GetSendRequestConcurrency()); + auto limiter = make_shared(sName + "#quota#logstore#" + key, + AppConfig::GetInstance()->GetSendRequestConcurrency()); sLogstoreConcurrencyLimiterMap.try_emplace(key, limiter); return limiter; } if (iter->second.expired()) { - auto limiter = make_shared(sName + "#quota#logstore#" + key, AppConfig::GetInstance()->GetSendRequestConcurrency()); + auto limiter = make_shared(sName + "#quota#logstore#" + key, + AppConfig::GetInstance()->GetSendRequestConcurrency()); iter->second = limiter; return limiter; } @@ -142,12 +144,14 @@ shared_ptr FlusherSLS::GetProjectConcurrencyLimiter(const st lock_guard lock(sMux); auto iter = sProjectConcurrencyLimiterMap.find(project); if (iter == sProjectConcurrencyLimiterMap.end()) { - auto limiter = make_shared(sName + "#quota#project#" + project, AppConfig::GetInstance()->GetSendRequestConcurrency()); + auto limiter = make_shared(sName + "#quota#project#" + project, + AppConfig::GetInstance()->GetSendRequestConcurrency()); sProjectConcurrencyLimiterMap.try_emplace(project, limiter); return limiter; } if (iter->second.expired()) { - auto limiter = make_shared(sName + "#quota#project#" + project, AppConfig::GetInstance()->GetSendRequestConcurrency()); + auto limiter = make_shared(sName + "#quota#project#" + project, + AppConfig::GetInstance()->GetSendRequestConcurrency()); iter->second = limiter; return limiter; } @@ -158,12 +162,20 @@ shared_ptr FlusherSLS::GetRegionConcurrencyLimiter(const str lock_guard lock(sMux); auto iter = sRegionConcurrencyLimiterMap.find(region); if (iter == sRegionConcurrencyLimiterMap.end()) { - auto limiter = make_shared(sName + "#network#region#" + region, AppConfig::GetInstance()->GetSendRequestConcurrency(), AppConfig::GetInstance()->GetSendRequestConcurrency()*AppConfig::GetInstance()->GetGlobalConcurrencyFreePercentageForOneRegion()); + auto limiter = make_shared( + sName + "#network#region#" + region, + AppConfig::GetInstance()->GetSendRequestConcurrency(), + AppConfig::GetInstance()->GetSendRequestConcurrency() + * AppConfig::GetInstance()->GetGlobalConcurrencyFreePercentageForOneRegion()); sRegionConcurrencyLimiterMap.try_emplace(region, limiter); return limiter; } if (iter->second.expired()) { - auto limiter = make_shared(sName + "#network#region#" + region, AppConfig::GetInstance()->GetSendRequestConcurrency(), AppConfig::GetInstance()->GetSendRequestConcurrency()*AppConfig::GetInstance()->GetGlobalConcurrencyFreePercentageForOneRegion()); + auto limiter = make_shared( + sName + "#network#region#" + region, + AppConfig::GetInstance()->GetSendRequestConcurrency(), + AppConfig::GetInstance()->GetSendRequestConcurrency() + * AppConfig::GetInstance()->GetGlobalConcurrencyFreePercentageForOneRegion()); iter->second = limiter; return limiter; } @@ -524,6 +536,7 @@ bool FlusherSLS::Init(const Json::Value& config, Json::Value& optionalGoPipeline mSendCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_OUT_EVENT_GROUPS_TOTAL); mSendDoneCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_SEND_DONE_TOTAL); mSuccessCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_SUCCESS_TOTAL); + mDiscardCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_DISCARD_TOTAL); mNetworkErrorCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_NETWORK_ERROR_TOTAL); mServerErrorCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_SERVER_ERROR_TOTAL); mShardWriteQuotaErrorCnt @@ -878,6 +891,9 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) DealSenderQueueItemAfterSend(item, true); break; case OperationOnFail::DISCARD: + if (mDiscardCnt) { + mDiscardCnt->Add(1); + } default: LOG_WARNING(sLogger, LOG_PATTERN); if (!isProfileData) { diff --git a/core/plugin/flusher/sls/FlusherSLS.h b/core/plugin/flusher/sls/FlusherSLS.h index 6a71b0f526..25291a7cc4 100644 --- a/core/plugin/flusher/sls/FlusherSLS.h +++ b/core/plugin/flusher/sls/FlusherSLS.h @@ -128,6 +128,7 @@ class FlusherSLS : public HttpFlusher { CounterPtr mSendCnt; CounterPtr mSendDoneCnt; CounterPtr mSuccessCnt; + CounterPtr mDiscardCnt; CounterPtr mNetworkErrorCnt; CounterPtr mServerErrorCnt; CounterPtr mShardWriteQuotaErrorCnt; diff --git a/core/unittest/pipeline/PipelineUnittest.cpp b/core/unittest/pipeline/PipelineUnittest.cpp index 614e6dd415..89df992071 100644 --- a/core/unittest/pipeline/PipelineUnittest.cpp +++ b/core/unittest/pipeline/PipelineUnittest.cpp @@ -124,7 +124,7 @@ void PipelineUnittest::OnSuccessfulInit() const { APSARA_TEST_EQUAL(QueueKeyManager::GetInstance()->GetKey("test_config-flusher_sls-test_project#test_logstore"), pipeline->GetContext().GetLogstoreKey()); APSARA_TEST_EQUAL(0, pipeline->mInProcessCnt.load()); - APSARA_TEST_EQUAL(2U, pipeline->mMetricsRecordRef->GetLabels()->size()); + APSARA_TEST_EQUAL(3U, pipeline->mMetricsRecordRef->GetLabels()->size()); APSARA_TEST_TRUE(pipeline->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PIPELINE_NAME, configName)); APSARA_TEST_TRUE(pipeline->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PROJECT, "test_project")); From 62c55e48dd7ac1ad4d323830885a4e6a4d244b57 Mon Sep 17 00:00:00 2001 From: henryzhx8 Date: Tue, 31 Dec 2024 16:13:03 +0800 Subject: [PATCH 11/12] refactor sls client manager (#1954) --- core/CMakeLists.txt | 2 +- core/app_config/AppConfig.cpp | 17 - core/app_config/AppConfig.h | 4 +- core/application/Application.cpp | 41 +- core/common/CompressTools.cpp | 171 ---- core/common/CompressTools.h | 28 +- core/common/DNSCache.cpp | 144 +-- core/common/DNSCache.h | 7 +- core/common/EncodingUtil.cpp | 75 ++ core/common/EncodingUtil.h | 25 + core/common/EndpointUtil.cpp | 24 +- core/common/EndpointUtil.h | 4 +- core/common/HashUtil.cpp | 18 + core/common/HashUtil.h | 1 + core/common/LogtailCommonFlags.cpp | 3 - core/common/LogtailCommonFlags.h | 3 - core/common/StringPiece.h | 315 ------- core/common/common.cmake | 2 +- core/common/http/AsynCurlRunner.cpp | 114 +-- core/common/http/AsynCurlRunner.h | 2 - core/common/http/Constant.cpp | 34 + core/common/http/Constant.h | 36 + core/common/http/Curl.cpp | 243 ++++- core/common/http/Curl.h | 6 + core/common/http/HttpRequest.cpp | 40 +- core/common/http/HttpRequest.h | 23 +- core/common/http/HttpResponse.h | 73 +- .../common_provider/CommonConfigProvider.cpp | 53 +- .../LegacyCommonConfigProvider.cpp | 127 ++- core/config/watcher/PipelineConfigWatcher.cpp | 2 +- core/file_server/event_handler/LogInput.cpp | 5 - core/file_server/reader/LogFileReader.cpp | 5 +- core/models/EventPool.cpp | 4 +- core/monitor/Monitor.cpp | 2 - core/monitor/profile_sender/ProfileSender.cpp | 27 +- core/pipeline/GlobalConfig.cpp | 2 +- core/pipeline/plugin/interface/HttpFlusher.h | 4 +- core/pipeline/queue/SLSSenderQueueItem.h | 2 +- core/plugin/flusher/sls/DiskBufferWriter.cpp | 366 +++++--- core/plugin/flusher/sls/DiskBufferWriter.h | 42 +- core/{sdk => plugin/flusher/sls}/Exception.h | 0 core/plugin/flusher/sls/FlusherSLS.cpp | 424 +++++---- core/plugin/flusher/sls/FlusherSLS.h | 43 +- core/plugin/flusher/sls/SLSClientManager.cpp | 856 +++++------------ core/plugin/flusher/sls/SLSClientManager.h | 180 ++-- core/plugin/flusher/sls/SLSConstant.cpp | 98 ++ core/plugin/flusher/sls/SLSConstant.h | 101 ++ core/plugin/flusher/sls/SLSResponse.cpp | 85 +- core/plugin/flusher/sls/SLSResponse.h | 2 + core/plugin/flusher/sls/SLSUtil.cpp | 307 ++++++ core/plugin/flusher/sls/SLSUtil.h | 82 ++ core/plugin/flusher/sls/SendResult.cpp | 16 +- .../processor/ProcessorDesensitizeNative.cpp | 4 +- core/prometheus/PrometheusInputRunner.cpp | 51 +- core/prometheus/PrometheusInputRunner.h | 6 +- core/prometheus/schedulers/ScrapeConfig.cpp | 4 +- .../prometheus/schedulers/ScrapeScheduler.cpp | 4 +- .../schedulers/TargetSubscriberScheduler.cpp | 6 +- core/protobuf/sls/logtail_buffer_meta.proto | 11 +- core/runner/FlusherRunner.cpp | 20 +- core/runner/FlusherRunner.h | 1 - core/runner/ProcessorRunner.cpp | 4 +- core/runner/sink/http/HttpSink.cpp | 41 +- core/runner/sink/http/HttpSinkRequest.h | 2 +- core/sdk/Client.cpp | 483 ---------- core/sdk/Client.h | 223 ----- core/sdk/Common.cpp | 883 ------------------ core/sdk/Common.h | 325 ------- core/sdk/CurlImp.cpp | 192 ---- core/sdk/CurlImp.h | 42 - core/sdk/Result.cpp | 128 --- core/sdk/Result.h | 47 - core/unittest/CMakeLists.txt | 1 - core/unittest/common/CMakeLists.txt | 5 - .../common/http/HttpResponseUnittest.cpp | 27 - core/unittest/flusher/CMakeLists.txt | 10 +- core/unittest/flusher/FlusherSLSUnittest.cpp | 709 ++++++++++++-- .../flusher/SLSClientManagerUnittest.cpp | 46 + core/unittest/models/MetricEventUnittest.cpp | 2 +- .../pipeline/GlobalConfigUnittest.cpp | 2 + core/unittest/pipeline/HttpSinkMock.h | 7 +- .../pipeline/PipelineUpdateUnittest.cpp | 9 +- core/unittest/plugin/PluginMock.h | 2 +- .../prometheus/ScrapeSchedulerUnittest.cpp | 10 +- core/unittest/sdk/CMakeLists.txt | 19 - core/unittest/sdk/SDKCommonUnittest.cpp | 290 ------ .../unittest/sender/FlusherRunnerUnittest.cpp | 6 +- .../serializer/SLSSerializerUnittest.cpp | 8 +- .../how-to-write-native-flusher-plugins.md | 2 +- 89 files changed, 2999 insertions(+), 4923 deletions(-) create mode 100644 core/common/EncodingUtil.cpp create mode 100644 core/common/EncodingUtil.h delete mode 100644 core/common/StringPiece.h create mode 100644 core/common/http/Constant.cpp create mode 100644 core/common/http/Constant.h rename core/{sdk => plugin/flusher/sls}/Exception.h (100%) create mode 100644 core/plugin/flusher/sls/SLSConstant.cpp create mode 100644 core/plugin/flusher/sls/SLSConstant.h create mode 100644 core/plugin/flusher/sls/SLSUtil.cpp create mode 100644 core/plugin/flusher/sls/SLSUtil.h delete mode 100644 core/sdk/Client.cpp delete mode 100644 core/sdk/Client.h delete mode 100644 core/sdk/Common.cpp delete mode 100644 core/sdk/Common.h delete mode 100644 core/sdk/CurlImp.cpp delete mode 100644 core/sdk/CurlImp.h delete mode 100644 core/sdk/Result.cpp delete mode 100644 core/sdk/Result.h delete mode 100644 core/unittest/common/http/HttpResponseUnittest.cpp create mode 100644 core/unittest/flusher/SLSClientManagerUnittest.cpp delete mode 100644 core/unittest/sdk/CMakeLists.txt delete mode 100644 core/unittest/sdk/SDKCommonUnittest.cpp diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index f59c9b847e..04461d658c 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -129,7 +129,7 @@ set(SUB_DIRECTORIES_LIST file_server file_server/event file_server/event_handler file_server/event_listener file_server/reader file_server/polling prometheus prometheus/labels prometheus/schedulers prometheus/async prometheus/component ebpf ebpf/observer ebpf/security ebpf/handler - parser sls_control sdk + parser ) if (LINUX) if (ENABLE_ENTERPRISE) diff --git a/core/app_config/AppConfig.cpp b/core/app_config/AppConfig.cpp index 1410714e3a..7ec06437c0 100644 --- a/core/app_config/AppConfig.cpp +++ b/core/app_config/AppConfig.cpp @@ -124,8 +124,6 @@ DECLARE_FLAG_INT32(reader_close_unused_file_time); DECLARE_FLAG_INT32(batch_send_interval); DECLARE_FLAG_INT32(batch_send_metric_size); -DECLARE_FLAG_BOOL(send_prefer_real_ip); -DECLARE_FLAG_INT32(send_switch_real_ip_interval); DECLARE_FLAG_INT32(truncate_pos_skip_bytes); DECLARE_FLAG_INT32(default_tail_limit_kb); @@ -989,26 +987,11 @@ void AppConfig::LoadResourceConf(const Json::Value& confJson) { mCheckPointFilePath = AbsolutePath(mCheckPointFilePath, mProcessExecutionDir); LOG_INFO(sLogger, ("logtail checkpoint path", mCheckPointFilePath)); - if (confJson.isMember("send_prefer_real_ip") && confJson["send_prefer_real_ip"].isBool()) { - BOOL_FLAG(send_prefer_real_ip) = confJson["send_prefer_real_ip"].asBool(); - } - - if (confJson.isMember("send_switch_real_ip_interval") && confJson["send_switch_real_ip_interval"].isInt()) { - INT32_FLAG(send_switch_real_ip_interval) = confJson["send_switch_real_ip_interval"].asInt(); - } - LoadInt32Parameter(INT32_FLAG(truncate_pos_skip_bytes), confJson, "truncate_pos_skip_bytes", "ALIYUN_LOGTAIL_TRUNCATE_POS_SKIP_BYTES"); - if (BOOL_FLAG(send_prefer_real_ip)) { - LOG_INFO(sLogger, - ("change send policy, prefer use real ip, switch interval seconds", - INT32_FLAG(send_switch_real_ip_interval))("truncate skip read offset", - INT32_FLAG(truncate_pos_skip_bytes))); - } - if (confJson.isMember("ignore_dir_inode_changed") && confJson["ignore_dir_inode_changed"].isBool()) { mIgnoreDirInodeChanged = confJson["ignore_dir_inode_changed"].asBool(); } diff --git a/core/app_config/AppConfig.h b/core/app_config/AppConfig.h index 71a8d2df49..4c555f5b8b 100644 --- a/core/app_config/AppConfig.h +++ b/core/app_config/AppConfig.h @@ -316,7 +316,7 @@ class AppConfig { public: AppConfig(); - ~AppConfig(){}; + ~AppConfig() {}; void LoadInstanceConfig(const std::map>&); @@ -533,6 +533,8 @@ class AppConfig { friend class InputPrometheusUnittest; friend class InputContainerStdioUnittest; friend class BatcherUnittest; + friend class EnterpriseSLSClientManagerUnittest; + friend class FlusherRunnerUnittest; friend class PipelineUpdateUnittest; #endif }; diff --git a/core/application/Application.cpp b/core/application/Application.cpp index 88cb9e5a31..a32acb08a8 100644 --- a/core/application/Application.cpp +++ b/core/application/Application.cpp @@ -46,6 +46,7 @@ #include "pipeline/queue/ExactlyOnceQueueManager.h" #include "pipeline/queue/SenderQueueManager.h" #include "plugin/flusher/sls/DiskBufferWriter.h" +#include "plugin/flusher/sls/FlusherSLS.h" #include "plugin/input/InputFeedbackInterfaceRegistry.h" #include "prometheus/PrometheusInputRunner.h" #include "runner/FlusherRunner.h" @@ -73,9 +74,6 @@ DEFINE_FLAG_INT32(queue_check_gc_interval_sec, "30s", 30); DEFINE_FLAG_BOOL(enable_cgroup, "", false); #endif -DECLARE_FLAG_BOOL(send_prefer_real_ip); -DECLARE_FLAG_BOOL(global_network_success); - using namespace std; namespace logtail { @@ -199,11 +197,13 @@ void Application::Start() { // GCOVR_EXCL_START #if defined(__ENTERPRISE__) && defined(_MSC_VER) InitWindowsSignalObject(); #endif - BoundedSenderQueueInterface::SetFeedback(ProcessQueueManager::GetInstance()); - HttpSink::GetInstance()->Init(); - FlusherRunner::GetInstance()->Init(); + // resource monitor + // TODO: move metric related initialization to input Init + LoongCollectorMonitor::GetInstance()->Init(); + LogtailMonitor::GetInstance()->Init(); + // config provider { // add local config dir filesystem::path localConfigPath = filesystem::path(AppConfig::GetInstance()->GetLoongcollectorConfDir()) @@ -217,7 +217,6 @@ void Application::Start() { // GCOVR_EXCL_START } PipelineConfigWatcher::GetInstance()->AddSource(localConfigPath.string()); } - #ifdef __ENTERPRISE__ EnterpriseConfigProvider::GetInstance()->Start(); LegacyConfigProvider::GetInstance()->Init("legacy"); @@ -225,10 +224,16 @@ void Application::Start() { // GCOVR_EXCL_START InitRemoteConfigProviders(); #endif - AlarmManager::GetInstance()->Init(); - LoongCollectorMonitor::GetInstance()->Init(); - LogtailMonitor::GetInstance()->Init(); + // runner + BoundedSenderQueueInterface::SetFeedback(ProcessQueueManager::GetInstance()); + HttpSink::GetInstance()->Init(); + FlusherRunner::GetInstance()->Init(); + ProcessorRunner::GetInstance()->Init(); + // flusher_sls resource should be explicitly initialized to allow internal metrics and alarms to be sent + FlusherSLS::InitResource(); + + // plugin registration PluginRegistry::GetInstance()->LoadPlugins(); InputFeedbackInterfaceRegistry::GetInstance()->LoadFeedbackInterfaces(); @@ -258,10 +263,10 @@ void Application::Start() { // GCOVR_EXCL_START LogtailPlugin::GetInstance()->LoadPluginBase(); } - ProcessorRunner::GetInstance()->Init(); + // TODO: this should be refactored to internal pipeline + AlarmManager::GetInstance()->Init(); - time_t curTime = 0, lastConfigCheckTime = 0, lastUpdateMetricTime = 0, - lastCheckTagsTime = 0, lastQueueGCTime = 0; + time_t curTime = 0, lastConfigCheckTime = 0, lastUpdateMetricTime = 0, lastCheckTagsTime = 0, lastQueueGCTime = 0; #ifndef LOGTAIL_NO_TC_MALLOC time_t lastTcmallocReleaseMemTime = 0; #endif @@ -393,16 +398,6 @@ void Application::CheckCriticalCondition(int32_t curTime) { _exit(1); } #endif - // if network is fail in 2 hours, force exit (for ant only) - // work around for no network when docker start - if (BOOL_FLAG(send_prefer_real_ip) && !BOOL_FLAG(global_network_success) && curTime - mStartTime > 7200) { - LOG_ERROR(sLogger, ("network is fail", "prepare force exit")); - AlarmManager::GetInstance()->SendAlarm(LOGTAIL_CRASH_ALARM, - "network is fail since " + ToString(mStartTime) + " force exit"); - AlarmManager::GetInstance()->ForceToSend(); - sleep(10); - _exit(1); - } } bool Application::GetUUIDThread() { diff --git a/core/common/CompressTools.cpp b/core/common/CompressTools.cpp index fc7d753ee5..4aacdfe487 100644 --- a/core/common/CompressTools.cpp +++ b/core/common/CompressTools.cpp @@ -15,141 +15,9 @@ #include "CompressTools.h" #include -#ifdef __ANDROID__ -#include -#else -#include -#endif -#include - -#include - -#include "protobuf/sls/sls_logs.pb.h" namespace logtail { -const int32_t ZSTD_DEFAULT_LEVEL = 1; - -bool UncompressData(sls_logs::SlsCompressType compressType, - const std::string& src, - uint32_t rawSize, - std::string& dst) { - switch (compressType) { - case sls_logs::SLS_CMP_NONE: - dst = src; - return true; - case sls_logs::SLS_CMP_LZ4: - return UncompressLz4(src, rawSize, dst); - case sls_logs::SLS_CMP_DEFLATE: - return UncompressDeflate(src, rawSize, dst); - case sls_logs::SLS_CMP_ZSTD: - return UncompressZstd(src, rawSize, dst); - default: - return false; - } -} - -bool CompressData(sls_logs::SlsCompressType compressType, const std::string& src, std::string& dst) { - switch (compressType) { - case sls_logs::SLS_CMP_NONE: - dst = src; - return true; - case sls_logs::SLS_CMP_LZ4: - return CompressLz4(src, dst); - case sls_logs::SLS_CMP_DEFLATE: - return CompressDeflate(src, dst); - case sls_logs::SLS_CMP_ZSTD: - return CompressZstd(src, dst, ZSTD_DEFAULT_LEVEL); - default: - return false; - } -} - -bool CompressData(sls_logs::SlsCompressType compressType, const char* src, uint32_t size, std::string& dst) { - switch (compressType) { - case sls_logs::SLS_CMP_NONE: { - dst.assign(src, size); - return true; - } - case sls_logs::SLS_CMP_LZ4: - return CompressLz4(src, size, dst); - case sls_logs::SLS_CMP_DEFLATE: - return CompressDeflate(src, size, dst); - case sls_logs::SLS_CMP_ZSTD: - return CompressZstd(src, size, dst, ZSTD_DEFAULT_LEVEL); - default: - return false; - } -} - -bool UncompressLz4(const std::string& src, const uint32_t rawSize, char* dst) { - uint32_t length = 0; - try { - length = LZ4_decompress_safe(src.c_str(), dst, src.length(), rawSize); - } catch (...) { - return false; - } - if (length != rawSize) { - return false; - } - return true; -} - -bool UncompressLz4(const char* srcPtr, const uint32_t srcSize, const uint32_t rawSize, std::string& dst) { - dst.resize(rawSize); - char* unCompressed = const_cast(dst.c_str()); - uint32_t length = 0; - try { - length = LZ4_decompress_safe(srcPtr, unCompressed, srcSize, rawSize); - } catch (...) { - return false; - } - if (length != rawSize) { - return false; - } - return true; -} -bool CompressDeflate(const char* srcPtr, const uint32_t srcSize, std::string& dst) { - int64_t dstLen = compressBound(srcSize); - dst.resize(dstLen); - if (compress((Bytef*)(dst.c_str()), (uLongf*)&dstLen, (const Bytef*)srcPtr, srcSize) == Z_OK) { - dst.resize(dstLen); - return true; - } - return false; -} - -bool CompressDeflate(const std::string& src, std::string& dst) { - int64_t dstLen = compressBound(src.size()); - dst.resize(dstLen); - if (compress((Bytef*)(dst.c_str()), (uLongf*)&dstLen, (const Bytef*)(src.c_str()), src.size()) == Z_OK) { - dst.resize(dstLen); - return true; - } - return false; -} - -bool UncompressDeflate(const char* srcPtr, const uint32_t srcSize, const int64_t rawSize, std::string& dst) { - static const int64_t MAX_UMCOMPRESS_SIZE = 128 * 1024 * 1024; - if (rawSize > MAX_UMCOMPRESS_SIZE) { - return false; - } - dst.resize(rawSize); - if (uncompress((Bytef*)(dst.c_str()), (uLongf*)&rawSize, (const Bytef*)(srcPtr), srcSize) != Z_OK) { - return false; - } - return true; -} - - -bool UncompressDeflate(const std::string& src, const int64_t rawSize, std::string& dst) { - return UncompressDeflate(src.c_str(), src.size(), rawSize, dst); -} - - -bool UncompressLz4(const std::string& src, const uint32_t rawSize, std::string& dst) { - return UncompressLz4(src.c_str(), src.length(), rawSize, dst); -} bool CompressLz4(const char* srcPtr, const uint32_t srcSize, std::string& dst) { uint32_t encodingSize = LZ4_compressBound(srcSize); dst.resize(encodingSize); @@ -169,43 +37,4 @@ bool CompressLz4(const std::string& src, std::string& dst) { return CompressLz4(src.c_str(), src.length(), dst); } -bool UncompressZstd(const std::string& src, const uint32_t rawSize, std::string& dst) { - return UncompressZstd(src.c_str(), src.length(), rawSize, dst); -} - -bool UncompressZstd(const char* srcPtr, const uint32_t srcSize, const uint32_t rawSize, std::string& dst) { - dst.resize(rawSize); - char* unCompressed = const_cast(dst.c_str()); - uint32_t length = 0; - try { - length = ZSTD_decompress(unCompressed, rawSize, srcPtr, srcSize); - } catch (...) { - return false; - } - if (length != rawSize) { - return false; - } - return true; -} - -bool CompressZstd(const char* srcPtr, const uint32_t srcSize, std::string& dst, int32_t level) { - uint32_t encodingSize = ZSTD_compressBound(srcSize); - dst.resize(encodingSize); - char* compressed = const_cast(dst.c_str()); - try { - size_t const cmp_size = ZSTD_compress(compressed, encodingSize, srcPtr, srcSize, level); - if (ZSTD_isError(cmp_size)) { - return false; - } - dst.resize(cmp_size); - return true; - } catch (...) { - } - return false; -} - -bool CompressZstd(const std::string& src, std::string& dst, int32_t level) { - return CompressZstd(src.c_str(), src.length(), dst, level); -} - } // namespace logtail diff --git a/core/common/CompressTools.h b/core/common/CompressTools.h index 9293150ff9..1ccf1fe041 100644 --- a/core/common/CompressTools.h +++ b/core/common/CompressTools.h @@ -15,36 +15,14 @@ */ #pragma once -#include -#include -#include "protobuf/sls/sls_logs.pb.h" - -namespace logtail { - -extern const int32_t ZSTD_DEFAULT_LEVEL; - -bool UncompressData(sls_logs::SlsCompressType compressType, const std::string& src, uint32_t rawSize, std::string& dst); -bool CompressData(sls_logs::SlsCompressType compressType, const std::string& src, std::string& dst); -bool CompressData(sls_logs::SlsCompressType compressType, const char* src, uint32_t size, std::string& dst); - -bool UncompressDeflate(const std::string& src, const int64_t rawSize, std::string& dst); -bool UncompressDeflate(const char* srcPtr, const uint32_t srcSize, const int64_t rawSize, std::string& dst); +#include -bool CompressDeflate(const std::string& src, std::string& dst); -bool CompressDeflate(const char* srcPtr, const uint32_t srcSize, std::string& dst); +#include -bool UncompressLz4(const std::string& src, const uint32_t rawSize, std::string& dst); -bool UncompressLz4(const std::string& src, const uint32_t rawSize, char* dst); -bool UncompressLz4(const char* srcPtr, const uint32_t srcSize, const uint32_t rawSize, std::string& dst); +namespace logtail { bool CompressLz4(const std::string& src, std::string& dst); bool CompressLz4(const char* srcPtr, const uint32_t srcSize, std::string& dest); -bool UncompressZstd(const std::string& src, const uint32_t rawSize, std::string& dst); -bool UncompressZstd(const char* srcPtr, const uint32_t srcSize, const uint32_t rawSize, std::string& dst); - -bool CompressZstd(const char* srcPtr, const uint32_t srcSize, std::string& dst, int32_t level); -bool CompressZstd(const std::string& src, std::string& dst, int32_t level); - } // namespace logtail \ No newline at end of file diff --git a/core/common/DNSCache.cpp b/core/common/DNSCache.cpp index 8c1ce33f76..a5fc00480f 100644 --- a/core/common/DNSCache.cpp +++ b/core/common/DNSCache.cpp @@ -13,6 +13,7 @@ // limitations under the License. #include "DNSCache.h" + #include #if defined(__linux__) #include @@ -22,86 +23,91 @@ #include #endif +DEFINE_FLAG_INT32(dns_cache_ttl_sec, "", 600); + namespace logtail { - // ParseHost only supports IPv4 now. - bool DnsCache::ParseHost(const char* host, std::string& ip) { +DnsCache::DnsCache(const int32_t ttlSeconds) : mUpdateTime(time(NULL)), mDnsTTL(ttlSeconds) { +} + +// ParseHost only supports IPv4 now. +bool DnsCache::ParseHost(const char* host, std::string& ip) { #if defined(__linux__) - struct sockaddr_in addr; - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; + struct sockaddr_in addr; + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; - char* buffer = NULL; - if (host && host[0]) { - if (IsRawIp(host)) { - if ((addr.sin_addr.s_addr = inet_addr(host)) == INADDR_NONE) - return false; - } else { - int bufferLen = 2048; - int rc, res; - struct hostent* hp = NULL; - struct hostent h; - while (true) { - buffer = new char[bufferLen]; - res = gethostbyname_r(host, &h, buffer, bufferLen, &hp, &rc); - if (res == ERANGE) { - if (buffer != NULL) - delete[] buffer; - bufferLen *= 4; - if (bufferLen > 32768) // 32KB - return false; - continue; - } - if (res != 0 || hp == NULL || hp->h_addr == NULL) { - if (buffer != NULL) - delete[] buffer; + char* buffer = NULL; + if (host && host[0]) { + if (IsRawIp(host)) { + if ((addr.sin_addr.s_addr = inet_addr(host)) == INADDR_NONE) + return false; + } else { + int bufferLen = 2048; + int rc, res; + struct hostent* hp = NULL; + struct hostent h; + while (true) { + buffer = new char[bufferLen]; + res = gethostbyname_r(host, &h, buffer, bufferLen, &hp, &rc); + if (res == ERANGE) { + if (buffer != NULL) + delete[] buffer; + bufferLen *= 4; + if (bufferLen > 32768) // 32KB return false; - } else - break; + continue; } - addr.sin_addr.s_addr = *((in_addr_t*)(hp->h_addr)); + if (res != 0 || hp == NULL || hp->h_addr == NULL) { + if (buffer != NULL) + delete[] buffer; + return false; + } else + break; } - } else { - addr.sin_addr.s_addr = htonl(INADDR_ANY); + addr.sin_addr.s_addr = *((in_addr_t*)(hp->h_addr)); } - ip = inet_ntoa(addr.sin_addr); - if (buffer != NULL) - delete[] buffer; - return true; + } else { + addr.sin_addr.s_addr = htonl(INADDR_ANY); + } + ip = inet_ntoa(addr.sin_addr); + if (buffer != NULL) + delete[] buffer; + return true; #elif defined(_MSC_VER) - struct sockaddr_in addr; - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; - if (host && host[0]) { - if (IsRawIp(host)) { - if ((addr.sin_addr.s_addr = inet_addr(host)) == INADDR_NONE) - return false; - } else { - addrinfo hints; - struct addrinfo* result = NULL; - std::memset(&hints, 0, sizeof(hints)); - auto ret = ::getaddrinfo(host, NULL, &hints, &result); - if (ret != 0) { - return false; - } + struct sockaddr_in addr; + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + if (host && host[0]) { + if (IsRawIp(host)) { + if ((addr.sin_addr.s_addr = inet_addr(host)) == INADDR_NONE) + return false; + } else { + addrinfo hints; + struct addrinfo* result = NULL; + std::memset(&hints, 0, sizeof(hints)); + auto ret = ::getaddrinfo(host, NULL, &hints, &result); + if (ret != 0) { + return false; + } - bool found = false; - for (auto ptr = result; ptr != NULL; ptr = ptr->ai_next) { - if (AF_INET == ptr->ai_family) { - addr.sin_addr = ((struct sockaddr_in*)ptr->ai_addr)->sin_addr; - found = true; - break; - } + bool found = false; + for (auto ptr = result; ptr != NULL; ptr = ptr->ai_next) { + if (AF_INET == ptr->ai_family) { + addr.sin_addr = ((struct sockaddr_in*)ptr->ai_addr)->sin_addr; + found = true; + break; } - freeaddrinfo(result); - if (!found) - return false; } - } else { - addr.sin_addr.s_addr = htonl(INADDR_ANY); + freeaddrinfo(result); + if (!found) + return false; } - ip = inet_ntoa(addr.sin_addr); - return true; -#endif + } else { + addr.sin_addr.s_addr = htonl(INADDR_ANY); } + ip = inet_ntoa(addr.sin_addr); + return true; +#endif +} -} // namespace logtail \ No newline at end of file +} // namespace logtail diff --git a/core/common/DNSCache.h b/core/common/DNSCache.h index aec08786b0..90395ed187 100644 --- a/core/common/DNSCache.h +++ b/core/common/DNSCache.h @@ -15,11 +15,16 @@ */ #pragma once + #include #include #include #include +#include "common/Flags.h" + +DECLARE_FLAG_INT32(dns_cache_ttl_sec); + namespace logtail { class DnsCache { @@ -75,7 +80,7 @@ class DnsCache { } private: - DnsCache(const int32_t ttlSeconds = 60 * 10) : mUpdateTime(time(NULL)), mDnsTTL(ttlSeconds) {} + DnsCache(const int32_t ttlSeconds = INT32_FLAG(dns_cache_ttl_sec)); ~DnsCache() = default; bool IsRawIp(const char* host) { diff --git a/core/common/EncodingUtil.cpp b/core/common/EncodingUtil.cpp new file mode 100644 index 0000000000..32c8d836c7 --- /dev/null +++ b/core/common/EncodingUtil.cpp @@ -0,0 +1,75 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "common/EncodingUtil.h" + +#include + +using namespace std; + +namespace logtail { + +static void Base64Encoding(istream& is, ostream& os, char makeupChar, const char* alphabet) { + int out[4]; + int remain = 0; + while (!is.eof()) { + int byte1 = is.get(); + if (byte1 < 0) { + break; + } + int byte2 = is.get(); + int byte3; + if (byte2 < 0) { + byte2 = 0; + byte3 = 0; + remain = 1; + } else { + byte3 = is.get(); + if (byte3 < 0) { + byte3 = 0; + remain = 2; + } + } + out[0] = static_cast(byte1) >> 2; + out[1] = ((byte1 & 0x03) << 4) + (static_cast(byte2) >> 4); + out[2] = ((byte2 & 0x0F) << 2) + (static_cast(byte3) >> 6); + out[3] = byte3 & 0x3F; + + if (remain == 1) { + os.put(out[0] = alphabet[out[0]]); + os.put(out[1] = alphabet[out[1]]); + os.put(makeupChar); + os.put(makeupChar); + } else if (remain == 2) { + os.put(out[0] = alphabet[out[0]]); + os.put(out[1] = alphabet[out[1]]); + os.put(out[2] = alphabet[out[2]]); + os.put(makeupChar); + } else { + os.put(out[0] = alphabet[out[0]]); + os.put(out[1] = alphabet[out[1]]); + os.put(out[2] = alphabet[out[2]]); + os.put(out[3] = alphabet[out[3]]); + } + } +} + +string Base64Enconde(const string& message) { + istringstream iss(message); + ostringstream oss; + Base64Encoding(iss, oss, '=', "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"); + return oss.str(); +} + +} // namespace logtail diff --git a/core/common/EncodingUtil.h b/core/common/EncodingUtil.h new file mode 100644 index 0000000000..f467818627 --- /dev/null +++ b/core/common/EncodingUtil.h @@ -0,0 +1,25 @@ +/* + * Copyright 2024 iLogtail Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +namespace logtail { + +std::string Base64Enconde(const std::string& message); + +} // namespace logtail diff --git a/core/common/EndpointUtil.cpp b/core/common/EndpointUtil.cpp index 67e4b34333..c466ab2eb3 100644 --- a/core/common/EndpointUtil.cpp +++ b/core/common/EndpointUtil.cpp @@ -16,6 +16,7 @@ #include "common/EndpointUtil.h" +#include "common/StringTools.h" #include "logger/Logger.h" using namespace std; @@ -23,10 +24,11 @@ using namespace std; namespace logtail { bool IsHttpsEndpoint(const string& endpoint) { - return endpoint.find("https://") == 0; + string trimmedEndpoint = TrimString(endpoint); + return trimmedEndpoint.find("https://") == 0; } - -string StandardizeEndpoint(const string& endpoint, const string& defaultEndpoint) { + +string StandardizeHost(const string& endpoint, const string& defaultEndpoint) { string res = endpoint; if (endpoint.find("https://") == 0) { if (endpoint.size() < string("https://x").size()) { @@ -49,6 +51,22 @@ string StandardizeEndpoint(const string& endpoint, const string& defaultEndpoint return res; } +string ExtractEndpoint(const string& endpoint) { + string trimmedEndpoint = TrimString(endpoint); + auto bpos = trimmedEndpoint.find("://"); + if (bpos == string::npos) { + bpos = 0; + } else { + bpos += strlen("://"); + } + + auto epos = trimmedEndpoint.find("/", bpos); + if (epos == string::npos) { + epos = trimmedEndpoint.length(); + } + return trimmedEndpoint.substr(bpos, epos - bpos); +} + string GetHostFromEndpoint(const std::string& endpoint) { static size_t httpSchemaLen = strlen("http://"), httpsSchemaLen = strlen("https://"); if (endpoint.find("https://") == 0) { diff --git a/core/common/EndpointUtil.h b/core/common/EndpointUtil.h index b678dc6792..3651a10e48 100644 --- a/core/common/EndpointUtil.h +++ b/core/common/EndpointUtil.h @@ -22,7 +22,9 @@ namespace logtail { bool IsHttpsEndpoint(const std::string& endpoint); -std::string StandardizeEndpoint(const std::string& endpoint, const std::string& defaultEndpoint); +std::string ExtractEndpoint(const std::string& endpoint); + +std::string StandardizeHost(const std::string& endpoint, const std::string& defaultEndpoint); std::string GetHostFromEndpoint(const std::string& endpoint); diff --git a/core/common/HashUtil.cpp b/core/common/HashUtil.cpp index 3b58726971..206c872f25 100644 --- a/core/common/HashUtil.cpp +++ b/core/common/HashUtil.cpp @@ -24,6 +24,8 @@ namespace logtail { +static constexpr uint32_t MD5_BYTES = 16; + /////////////////////////////////////////////// MACRO ////////////////////////////////////////////////// #define SHIFT_LEFT(a, b) ((a) << (b) | (a) >> (32 - b)) @@ -310,6 +312,22 @@ void DoMd5(const uint8_t* poolIn, const uint64_t inputBytesNum, uint8_t md5[16]) } } /// DoMd5 +static std::string HexToString(const uint8_t md5[16]) { + static const char* table = "0123456789ABCDEF"; + std::string ss(32, 'a'); + for (int i = 0; i < 16; ++i) { + ss[i * 2] = table[md5[i] >> 4]; + ss[i * 2 + 1] = table[md5[i] & 0x0F]; + } + return ss; +} + +std::string CalcMD5(const std::string& message) { + uint8_t md5[MD5_BYTES]; + DoMd5((const uint8_t*)message.data(), message.length(), md5); + return HexToString(md5); +} + bool SignatureToHash(const std::string& signature, uint64_t& sigHash, uint32_t& sigSize) { sigSize = (uint32_t)signature.size(); sigHash = (uint64_t)HashSignatureString(signature.c_str(), signature.size()); diff --git a/core/common/HashUtil.h b/core/common/HashUtil.h index 1612ffeee8..5bd7204c74 100644 --- a/core/common/HashUtil.h +++ b/core/common/HashUtil.h @@ -24,6 +24,7 @@ namespace logtail { // Hash(string(@poolIn, @inputBytesNum)) => @md5. // TODO: Same implementation in sdk module, merge them. void DoMd5(const uint8_t* poolIn, const uint64_t inputBytesNum, uint8_t md5[16]); +std::string CalcMD5(const std::string& message); bool SignatureToHash(const std::string& signature, uint64_t& sigHash, uint32_t& sigSize); bool CheckAndUpdateSignature(const std::string& signature, uint64_t& sigHash, uint32_t& sigSize); diff --git a/core/common/LogtailCommonFlags.cpp b/core/common/LogtailCommonFlags.cpp index 1d70138f27..ca328dce6c 100644 --- a/core/common/LogtailCommonFlags.cpp +++ b/core/common/LogtailCommonFlags.cpp @@ -96,10 +96,7 @@ DEFINE_FLAG_INT32(ilogtail_epoll_wait_events, "epoll_wait event number", 100); DEFINE_FLAG_INT32(ilogtail_max_epoll_events, "the max events number in epoll", 10000); // sls sender -DEFINE_FLAG_INT32(sls_client_send_timeout, "timeout time of one operation for SlsClient", 15); DEFINE_FLAG_BOOL(sls_client_send_compress, "whether compresses the data or not when put data", true); -DEFINE_FLAG_INT32(send_retrytimes, "how many times should retry if PostLogStoreLogs operation fail", 3); -DEFINE_FLAG_DOUBLE(loggroup_bytes_inflation, "", 1.2); DEFINE_FLAG_STRING(default_region_name, "for compatible with old user_log_config.json or old config server", "__default_region__"); diff --git a/core/common/LogtailCommonFlags.h b/core/common/LogtailCommonFlags.h index 2c980742a0..60bf0d1de9 100644 --- a/core/common/LogtailCommonFlags.h +++ b/core/common/LogtailCommonFlags.h @@ -34,10 +34,7 @@ DECLARE_FLAG_INT32(ilogtail_epoll_wait_events); DECLARE_FLAG_INT32(ilogtail_max_epoll_events); // sls sender -DECLARE_FLAG_INT32(sls_client_send_timeout); DECLARE_FLAG_BOOL(sls_client_send_compress); -DECLARE_FLAG_INT32(send_retrytimes); -DECLARE_FLAG_DOUBLE(loggroup_bytes_inflation); DECLARE_FLAG_STRING(default_region_name); // profile diff --git a/core/common/StringPiece.h b/core/common/StringPiece.h deleted file mode 100644 index 9e59362249..0000000000 --- a/core/common/StringPiece.h +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Copyright 2022 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include - -namespace logtail { -namespace detail { - - template - class StringPieceDetail { - public: - // for standard STL container - typedef size_t size_type; - typedef CharT value_type; - typedef const value_type* pointer; - typedef const value_type& reference; - typedef const value_type& const_reference; - typedef ptrdiff_t difference_type; - typedef const value_type* const_iterator; - typedef std::reverse_iterator const_reverse_iterator; - - static const size_type npos; - - typedef std::basic_string string_type; - typedef typename string_type::traits_type traits_type; - - public: - StringPieceDetail(const value_type* str = NULL) - : mStr(str), mLength((str == NULL) ? 0 : traits_type::length(str)) {} - StringPieceDetail(const string_type& str) : mStr(str.data()), mLength(str.size()) {} - StringPieceDetail(const value_type* str, size_type len) : mStr(str), mLength(len) {} - StringPieceDetail(const typename string_type::const_iterator& begin, - const typename string_type::const_iterator& end) - : mStr((end > begin) ? &(*begin) : NULL), mLength((end > begin) ? (size_type)(end - begin) : 0) {} - - bool operator==(const StringPieceDetail& s) const { return this == &s || this->compare(s) == 0; } - - // Iterators - const_iterator begin() const { return data(); } - const_iterator end() const { return data() + size(); } - const_reverse_iterator rbegin() const { return const_reverse_iterator(data() + size()); } - const_reverse_iterator rend() const { return const_reverse_iterator(data()); } - - // Capacity - size_type size() const { return mLength; } - size_type length() const { return size(); } - size_type max_size() const { return size(); } - size_type capacity() const { return size(); } - void clear() { - mStr = NULL; - mLength = 0; - } - bool empty() const { return size() == 0; } - - // Element access - value_type operator[](size_type pos) const { return data()[pos]; } - value_type at(size_type pos) const { - if (pos >= size()) { - throw std::out_of_range("pos is out of range"); - } - return data()[pos]; - } - - // Modifiers - void swap(StringPieceDetail& sp) { - std::swap(sp.mStr, mStr); - std::swap(sp.mLength, mLength); - } - - // String Operations - const value_type* c_str() const { return mStr; } - // data() may return a pointer to a buffer with embedded NULs, and the - // returned buffer may or may not be null terminated. Therefore it is - // typically a mistake to pass data() to a routine that expects a NUL - // terminated string. - const value_type* data() const { return mStr; } - - // find - size_type find(const StringPieceDetail& sp, size_type pos = 0) const { return find(sp.data(), pos, sp.size()); } - size_type find(const value_type* s, size_type pos = 0) const { - StringPieceDetail sp(s); - return find(sp, pos); - } - size_type find(const value_type* s, size_type pos, size_type n) const { - size_type ret = npos; - const size_type size = this->size(); - if (pos <= size && n <= size - pos) { - const value_type* data = this->data(); - const value_type* p = std::search(data + pos, data + size, s, s + n); - if (p != data + size || n == 0) { - ret = p - data; - } - } - return ret; - } - size_type find(const value_type c, size_type pos = 0) const { - size_type ret = npos; - const size_type size = this->size(); - if (pos < size) { - const value_type* data = this->data(); - const size_type n = size - pos; - const value_type* p = traits_type::find(data + pos, n, c); - if (p != 0) { - ret = p - data; - } - } - return ret; - } - // rfind - size_type rfind(const StringPieceDetail& sp, size_type pos = npos) const { - return rfind(sp.data(), pos, sp.size()); - } - size_type rfind(const value_type* s, size_type pos = npos) const { - StringPieceDetail sp(s); - return rfind(sp, pos); - } - size_type rfind(const value_type* s, size_type pos, size_type n) const { - const size_type size = this->size(); - if (n <= size) { - pos = std::min(size_type(size - n), pos); - const value_type* data = this->data(); - do { - if (traits_type::compare(data + pos, s, n) == 0) { - return pos; - } - } while (pos-- > 0); - } - return npos; - } - size_type rfind(const value_type c, size_type pos = npos) const { - size_type size = this->size(); - if (size) { - if (--size > pos) { - size = pos; - } - for (++size; size-- > 0;) { - if (traits_type::eq(data()[size], c)) { - return size; - } - } - } - return npos; - } - // find_first_of - size_type find_first_of(const StringPieceDetail& s, size_type pos = 0) const { - return find_first_of(s.data(), pos, s.size()); - } - size_type find_first_of(const value_type* s, size_type pos = 0) const { - StringPieceDetail sp(s); - return find_first_of(sp, pos); - } - size_type find_first_of(const value_type* s, size_type pos, size_type n) const { - for (; n && pos < size(); ++pos) { - const value_type* p = traits_type::find(s, n, data()[pos]); - if (p) { - return pos; - } - } - return npos; - } - size_type find_first_of(value_type c, size_type pos = 0) const { return find(c, pos); } - // find_last_of - size_type find_last_of(const StringPieceDetail& s, size_type pos = npos) const { - return find_last_of(s.data(), pos, s.size()); - } - size_type find_last_of(const value_type* s, size_type pos = npos) const { - StringPieceDetail sp(s); - return find_last_of(sp, pos); - } - size_type find_last_of(const value_type* s, size_type pos, size_type n) const { - size_type index = size(); - if (index && n) { - if (--index > pos) { - index = pos; - } - do { - if (traits_type::find(s, n, data()[index])) { - return index; - } - } while (index-- != 0); - } - return npos; - } - size_type find_last_of(value_type c, size_type pos = npos) const { return rfind(c, pos); } - // find_first_not_of - size_type find_first_not_of(const StringPieceDetail& s, size_type pos = 0) const { - return find_first_not_of(s.data(), pos, s.size()); - } - size_type find_first_not_of(const value_type* s, size_type pos = 0) const { - StringPieceDetail sp(s); - return find_first_not_of(sp, pos); - } - size_type find_first_not_of(const value_type* s, size_type pos, size_type n) const { - for (; pos < size(); ++pos) { - if (!traits_type::find(s, n, data()[pos])) { - return pos; - } - } - return npos; - } - size_type find_first_not_of(value_type c, size_type pos = 0) const { - StringPieceDetail sp(&c, 1); - return find_first_not_of(sp, pos); - } - // find_last_not_of - size_type find_last_not_of(const StringPieceDetail& s, size_type pos = npos) const { - return find_last_not_of(s.data(), pos, s.size()); - } - size_type find_last_not_of(const value_type* s, size_type pos = npos) const { - StringPieceDetail sp(s); - return find_last_not_of(s, pos); - } - size_type find_last_not_of(const value_type* s, size_type pos, size_type n) const { - size_type index = size(); - if (index) { - if (--index > pos) { - index = pos; - } - do { - if (!traits_type::find(s, n, data()[index])) { - return index; - } - } while (index--); - } - return npos; - } - size_type find_last_not_of(value_type c, size_type pos = npos) const { - StringPieceDetail sp(&c, 1); - return find_last_not_of(sp, pos); - } - - void set(const value_type* data, size_type len) { - mStr = data; - mLength = len; - } - void set(const value_type* str) { - mStr = str; - mLength = str ? traits_type::length(str) : 0; - } - void set(const string_type& str) { - mStr = str.data(); - mLength = str.size(); - } - - void remove_prefix(size_type n) { - if (mLength < n) { - throw std::out_of_range("invalid parameter"); - } - mStr += n; - mLength -= n; - } - void remove_suffix(size_type n) { - if (mLength < n) { - throw std::out_of_range("invalid parameter"); - } - mLength -= n; - } - - StringPieceDetail substr(size_t pos = 0, size_t len = npos) const { - if (pos > size()) { - throw std::out_of_range("pos is out of range"); - } - const value_type* p = data() + pos; - if (len == npos) { - len = size() - pos; - } else { - len = std::min(size() - pos, len); - } - return StringPieceDetail(p, len); - } - int compare(const StringPieceDetail& s) const { - const size_type this_size = size(); - const size_type other_size = s.size(); - const size_type len = std::min(this_size, other_size); - - int r = traits_type::compare(data(), s.data(), len); - if (r == 0) { - r = this_size - other_size; - } - return r; - } - - string_type as_string() const { return empty() ? string_type() : string_type(data(), size()); } - - private: - const value_type* mStr; - size_type mLength; - }; - - template - const size_t StringPieceDetail::npos = -1; - -} // namespace detail - -typedef detail::StringPieceDetail StringPiece; -// typedef detail::StringPieceDetail StringPiece16; - -} // namespace logtail diff --git a/core/common/common.cmake b/core/common/common.cmake index cd5e9401c6..5cc621bd51 100644 --- a/core/common/common.cmake +++ b/core/common/common.cmake @@ -28,7 +28,7 @@ endif () list(APPEND THIS_SOURCE_FILES_LIST ${XX_HASH_SOURCE_FILES}) # add memory in common list(APPEND THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/common/memory/SourceBuffer.h) -list(APPEND THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/common/http/AsynCurlRunner.cpp ${CMAKE_SOURCE_DIR}/common/http/Curl.cpp ${CMAKE_SOURCE_DIR}/common/http/HttpResponse.cpp ${CMAKE_SOURCE_DIR}/common/http/HttpRequest.cpp) +list(APPEND THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/common/http/AsynCurlRunner.cpp ${CMAKE_SOURCE_DIR}/common/http/Curl.cpp ${CMAKE_SOURCE_DIR}/common/http/HttpResponse.cpp ${CMAKE_SOURCE_DIR}/common/http/HttpRequest.cpp ${CMAKE_SOURCE_DIR}/common/http/Constant.cpp) list(APPEND THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/common/timer/Timer.cpp ${CMAKE_SOURCE_DIR}/common/timer/HttpRequestTimerEvent.cpp) list(APPEND THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/common/compression/Compressor.cpp ${CMAKE_SOURCE_DIR}/common/compression/CompressorFactory.cpp ${CMAKE_SOURCE_DIR}/common/compression/LZ4Compressor.cpp ${CMAKE_SOURCE_DIR}/common/compression/ZstdCompressor.cpp) # remove several files in common diff --git a/core/common/http/AsynCurlRunner.cpp b/core/common/http/AsynCurlRunner.cpp index ee9e514137..6ab352d997 100644 --- a/core/common/http/AsynCurlRunner.cpp +++ b/core/common/http/AsynCurlRunner.cpp @@ -16,7 +16,6 @@ #include -#include "app_config/AppConfig.h" #include "common/StringTools.h" #include "common/http/Curl.h" #include "logger/Logger.h" @@ -61,7 +60,7 @@ void AsynCurlRunner::Run() { LOG_DEBUG( sLogger, ("got request from queue, request address", request.get())("try cnt", ToString(request->mTryCnt))); - if (!AddRequestToClient(std::move(request))) { + if (!AddRequestToMultiCurlHandler(mClient, std::move(request))) { continue; } } else if (mIsFlush && mQueue.Empty()) { @@ -77,49 +76,6 @@ void AsynCurlRunner::Run() { } } -bool AsynCurlRunner::AddRequestToClient(unique_ptr&& request) { - curl_slist* headers = nullptr; - CURL* curl = CreateCurlHandler(request->mMethod, - request->mHTTPSFlag, - request->mHost, - request->mPort, - request->mUrl, - request->mQueryString, - request->mHeader, - request->mBody, - request->mResponse, - headers, - request->mTimeout, - AppConfig::GetInstance()->IsHostIPReplacePolicyEnabled(), - AppConfig::GetInstance()->GetBindInterface(), - request->mFollowRedirects, - request->mTls); - - if (curl == nullptr) { - LOG_ERROR(sLogger, ("failed to send request", "failed to init curl handler")("request address", request.get())); - request->mResponse.SetNetworkStatus(CURLE_FAILED_INIT); - request->OnSendDone(request->mResponse); - return false; - } - - request->mPrivateData = headers; - curl_easy_setopt(curl, CURLOPT_PRIVATE, request.get()); - request->mLastSendTime = std::chrono::system_clock::now(); - auto res = curl_multi_add_handle(mClient, curl); - if (res != CURLM_OK) { - LOG_ERROR(sLogger, - ("failed to send request", "failed to add the easy curl handle to multi_handle")( - "errMsg", curl_multi_strerror(res))("request address", request.get())); - request->mResponse.SetNetworkStatus(CURLE_FAILED_INIT); - request->OnSendDone(request->mResponse); - curl_easy_cleanup(curl); - return false; - } - // let runner destruct the request - request.release(); - return true; -} - void AsynCurlRunner::DoRun() { CURLMcode mc; int runningHandlers = 1; @@ -131,14 +87,14 @@ void AsynCurlRunner::DoRun() { this_thread::sleep_for(chrono::milliseconds(100)); continue; } - HandleCompletedRequests(runningHandlers); + HandleCompletedAsynRequests(mClient, runningHandlers); unique_ptr request; if (mQueue.TryPop(request)) { LOG_DEBUG(sLogger, ("got item from flusher runner, request address", request.get())("try cnt", ToString(request->mTryCnt))); - if (AddRequestToClient(std::move(request))) { + if (AddRequestToMultiCurlHandler(mClient, std::move(request))) { ++runningHandlers; } } @@ -179,68 +135,4 @@ void AsynCurlRunner::DoRun() { } } -void AsynCurlRunner::HandleCompletedRequests(int& runningHandlers) { - int msgsLeft = 0; - CURLMsg* msg = curl_multi_info_read(mClient, &msgsLeft); - while (msg) { - if (msg->msg == CURLMSG_DONE) { - bool requestReused = false; - CURL* handler = msg->easy_handle; - AsynHttpRequest* request = nullptr; - curl_easy_getinfo(handler, CURLINFO_PRIVATE, &request); - auto responseTime - = chrono::duration_cast(chrono::system_clock::now() - request->mLastSendTime) - .count(); - switch (msg->data.result) { - case CURLE_OK: { - long statusCode = 0; - curl_easy_getinfo(handler, CURLINFO_RESPONSE_CODE, &statusCode); - request->mResponse.SetNetworkStatus(CURLE_OK); - request->mResponse.SetStatusCode(statusCode); - request->OnSendDone(request->mResponse); - LOG_DEBUG(sLogger, - ("send http request succeeded, request address", - request)("response time", ToString(responseTime) + "ms")("try cnt", - ToString(request->mTryCnt))); - break; - } - default: - // considered as network error - if (request->mTryCnt < request->mMaxTryCnt) { - LOG_WARNING(sLogger, - ("failed to send http request", "retry immediately")("request address", request)( - "try cnt", request->mTryCnt)("errMsg", curl_easy_strerror(msg->data.result))); - // free first,becase mPrivateData will be reset in AddRequestToClient - if (request->mPrivateData) { - curl_slist_free_all((curl_slist*)request->mPrivateData); - request->mPrivateData = nullptr; - } - ++request->mTryCnt; - AddRequestToClient(unique_ptr(request)); - ++runningHandlers; - requestReused = true; - } else { - request->mResponse.SetNetworkStatus(msg->data.result); - request->OnSendDone(request->mResponse); - LOG_DEBUG( - sLogger, - ("failed to send http request", "abort")("request address", request)( - "response time", ToString(responseTime) + "ms")("try cnt", ToString(request->mTryCnt))); - } - break; - } - - curl_multi_remove_handle(mClient, handler); - curl_easy_cleanup(handler); - if (!requestReused) { - if (request->mPrivateData) { - curl_slist_free_all((curl_slist*)request->mPrivateData); - } - delete request; - } - } - msg = curl_multi_info_read(mClient, &msgsLeft); - } -} - } // namespace logtail diff --git a/core/common/http/AsynCurlRunner.h b/core/common/http/AsynCurlRunner.h index a5c02eb4d6..16a3dc3f93 100644 --- a/core/common/http/AsynCurlRunner.h +++ b/core/common/http/AsynCurlRunner.h @@ -48,9 +48,7 @@ class AsynCurlRunner { ~AsynCurlRunner() = default; void Run(); - bool AddRequestToClient(std::unique_ptr&& request); void DoRun(); - void HandleCompletedRequests(int& runningHandlers); CURLM* mClient = nullptr; SafeQueue> mQueue; diff --git a/core/common/http/Constant.cpp b/core/common/http/Constant.cpp new file mode 100644 index 0000000000..0a1421182c --- /dev/null +++ b/core/common/http/Constant.cpp @@ -0,0 +1,34 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "common/http/Constant.h" + +using namespace std; + +namespace logtail { + +const string HTTP_POST = "POST"; +const string HTTP_GET = "GET"; + +const string HOST = "Host"; +const string DATE = "Date"; +const string USER_AGENT = "User-Agent"; +const string CONTENT_TYPE = "Content-Type"; +const string CONTENT_LENGTH = "Content-Length"; +const string AUTHORIZATION = "Authorization"; +const string SIGNATURE = "Signature"; + +const string TYPE_LOG_PROTOBUF = "application/x-protobuf"; + +} // namespace logtail diff --git a/core/common/http/Constant.h b/core/common/http/Constant.h new file mode 100644 index 0000000000..e96716c9a2 --- /dev/null +++ b/core/common/http/Constant.h @@ -0,0 +1,36 @@ +/* + * Copyright 2024 iLogtail Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +namespace logtail { + +extern const std::string HTTP_POST; +extern const std::string HTTP_GET; + +extern const std::string HOST; +extern const std::string DATE; +extern const std::string USER_AGENT; +extern const std::string CONTENT_LENGTH; +extern const std::string CONTENT_TYPE; +extern const std::string AUTHORIZATION; +extern const std::string SIGNATURE; + +extern const std::string TYPE_LOG_PROTOBUF; + +} // namespace logtail diff --git a/core/common/http/Curl.cpp b/core/common/http/Curl.cpp index 19778c2abc..075ae6629d 100644 --- a/core/common/http/Curl.cpp +++ b/core/common/http/Curl.cpp @@ -20,6 +20,7 @@ #include "app_config/AppConfig.h" #include "common/DNSCache.h" +#include "common/StringTools.h" #include "common/http/HttpResponse.h" #include "logger/Logger.h" @@ -27,6 +28,46 @@ using namespace std; namespace logtail { +NetworkCode GetNetworkStatus(CURLcode code) { + // please refer to https://curl.se/libcurl/c/libcurl-errors.html + switch (code) { + case CURLE_OK: + return NetworkCode::Ok; + case CURLE_COULDNT_CONNECT: + return NetworkCode::ConnectionFailed; + case CURLE_LOGIN_DENIED: + case CURLE_REMOTE_ACCESS_DENIED: + return NetworkCode::RemoteAccessDenied; + case CURLE_OPERATION_TIMEDOUT: + return NetworkCode::Timeout; + case CURLE_SSL_CONNECT_ERROR: + return NetworkCode::SSLConnectError; + case CURLE_SSL_CERTPROBLEM: + case CURLE_SSL_CACERT: + return NetworkCode::SSLCertError; + case CURLE_SEND_ERROR: + case CURLE_SEND_FAIL_REWIND: + return NetworkCode::SendDataFailed; + case CURLE_RECV_ERROR: + return NetworkCode::RecvDataFailed; + case CURLE_SSL_PINNEDPUBKEYNOTMATCH: + case CURLE_SSL_INVALIDCERTSTATUS: + case CURLE_SSL_CACERT_BADFILE: + case CURLE_SSL_CIPHER: + case CURLE_SSL_ENGINE_NOTFOUND: + case CURLE_SSL_ENGINE_SETFAILED: + case CURLE_USE_SSL_FAILED: + case CURLE_SSL_ENGINE_INITFAILED: + case CURLE_SSL_CRL_BADFILE: + case CURLE_SSL_ISSUER_ERROR: + case CURLE_SSL_SHUTDOWN_FAILED: + return NetworkCode::SSLOtherProblem; + case CURLE_FAILED_INIT: + default: + return NetworkCode::Other; + } +} + static size_t header_write_callback(char* buffer, size_t size, size_t nmemb, @@ -57,21 +98,21 @@ static size_t header_write_callback(char* buffer, return sizes; } -CURL* CreateCurlHandler(const std::string& method, +CURL* CreateCurlHandler(const string& method, bool httpsFlag, - const std::string& host, + const string& host, int32_t port, - const std::string& url, - const std::string& queryString, - const std::map& header, - const std::string& body, + const string& url, + const string& queryString, + const map& header, + const string& body, HttpResponse& response, curl_slist*& headers, uint32_t timeout, bool replaceHostWithIp, - const std::string& intf, + const string& intf, bool followRedirects, - std::optional tls) { + optional tls) { static DnsCache* dnsCache = DnsCache::GetInstance(); CURL* curl = curl_easy_init(); @@ -80,7 +121,7 @@ CURL* CreateCurlHandler(const std::string& method, } string totalUrl = httpsFlag ? "https://" : "http://"; - std::string hostIP; + string hostIP; if (replaceHostWithIp && dnsCache->GetIPFromDnsCache(host, hostIP)) { totalUrl.append(hostIP); } else { @@ -144,7 +185,7 @@ CURL* CreateCurlHandler(const std::string& method, return curl; } -bool SendHttpRequest(std::unique_ptr&& request, HttpResponse& response) { +bool SendHttpRequest(unique_ptr&& request, HttpResponse& response) { curl_slist* headers = NULL; CURL* curl = CreateCurlHandler(request->mMethod, request->mHTTPSFlag, @@ -170,17 +211,30 @@ bool SendHttpRequest(std::unique_ptr&& request, HttpResponse& respo while (true) { CURLcode res = curl_easy_perform(curl); if (res == CURLE_OK) { - long http_code = 0; - curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code); - response.SetStatusCode(http_code); + long statusCode = 0; + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &statusCode); + curl_off_t responseTime; + curl_easy_getinfo(curl, CURLINFO_TOTAL_TIME_T, &responseTime); + auto responseTimeMs = responseTime / 1000; + response.SetNetworkStatus(NetworkCode::Ok, ""); + response.SetStatusCode(statusCode); + response.SetResponseTime(chrono::milliseconds(responseTimeMs)); success = true; + LOG_DEBUG(sLogger, + ("send http request succeeded, host", request->mHost)( + "response time", ToString(responseTimeMs) + "ms")("try cnt", ToString(request->mTryCnt))); break; } else if (request->mTryCnt < request->mMaxTryCnt) { - LOG_WARNING(sLogger, - ("failed to send http request", "retry immediately")("request address", request.get())( - "try cnt", request->mTryCnt)("errMsg", curl_easy_strerror(res))); + LOG_DEBUG(sLogger, + ("failed to send http request", "retry immediately")("host", request->mHost)( + "try cnt", request->mTryCnt)("errMsg", curl_easy_strerror(res))); ++request->mTryCnt; } else { + auto errMsg = curl_easy_strerror(res); + response.SetNetworkStatus(GetNetworkStatus(res), errMsg); + LOG_DEBUG(sLogger, + ("failed to send http request", + "abort")("host", request->mHost)("try cnt", ToString(request->mTryCnt))("errMsg", errMsg)); break; } } @@ -191,4 +245,161 @@ bool SendHttpRequest(std::unique_ptr&& request, HttpResponse& respo return success; } +bool AddRequestToMultiCurlHandler(CURLM* multiCurl, unique_ptr&& request) { + curl_slist* headers = NULL; + CURL* curl = CreateCurlHandler(request->mMethod, + request->mHTTPSFlag, + request->mHost, + request->mPort, + request->mUrl, + request->mQueryString, + request->mHeader, + request->mBody, + request->mResponse, + headers, + request->mTimeout, + AppConfig::GetInstance()->IsHostIPReplacePolicyEnabled(), + AppConfig::GetInstance()->GetBindInterface(), + request->mFollowRedirects, + request->mTls); + if (curl == NULL) { + request->mResponse.SetNetworkStatus(NetworkCode::Other, "failed to init curl handler"); + LOG_ERROR(sLogger, ("failed to send request", "failed to init curl handler")("request address", request.get())); + request->OnSendDone(request->mResponse); + return false; + } + + request->mPrivateData = headers; + curl_easy_setopt(curl, CURLOPT_PRIVATE, request.get()); + request->mLastSendTime = chrono::system_clock::now(); + + CURLMcode res = curl_multi_add_handle(multiCurl, curl); + if (res != CURLM_OK) { + request->mResponse.SetNetworkStatus(NetworkCode::Other, "failed to add the easy curl handle to multi_handle"); + LOG_ERROR(sLogger, + ("failed to send request", "failed to add the easy curl handle to multi_handle")( + "errMsg", curl_multi_strerror(res))("request address", request.get())); + request->OnSendDone(request->mResponse); + curl_easy_cleanup(curl); + return false; + } + // let callback destruct the request + request.release(); + return true; +} + +void HandleCompletedAsynRequests(CURLM* multiCurl, int& runningHandlers) { + int msgsLeft = 0; + CURLMsg* msg = curl_multi_info_read(multiCurl, &msgsLeft); + while (msg) { + if (msg->msg == CURLMSG_DONE) { + bool requestReused = false; + CURL* handler = msg->easy_handle; + AsynHttpRequest* request = nullptr; + curl_easy_getinfo(handler, CURLINFO_PRIVATE, &request); + switch (msg->data.result) { + case CURLE_OK: { + long statusCode = 0; + curl_easy_getinfo(handler, CURLINFO_RESPONSE_CODE, &statusCode); + curl_off_t responseTime; + curl_easy_getinfo(handler, CURLINFO_TOTAL_TIME_T, &responseTime); + auto responseTimeMs = responseTime / 1000; + request->mResponse.SetNetworkStatus(NetworkCode::Ok, ""); + request->mResponse.SetStatusCode(statusCode); + request->mResponse.SetResponseTime(chrono::milliseconds(responseTimeMs)); + LOG_DEBUG( + sLogger, + ("send http request succeeded, request address", + request)("host", request->mHost)("response time", ToString(responseTimeMs) + "ms")( + "try cnt", ToString(request->mTryCnt))("errMsg", curl_easy_strerror(msg->data.result))); + request->OnSendDone(request->mResponse); + break; + } + default: + // considered as network error + if (request->mTryCnt < request->mMaxTryCnt) { + LOG_DEBUG(sLogger, + ("failed to send http request", "retry immediately")("request address", + request)("host", request->mHost)( + "try cnt", request->mTryCnt)("errMsg", curl_easy_strerror(msg->data.result))); + // free first,becase mPrivateData will be reset in AddRequestToMultiCurlHandler + if (request->mPrivateData) { + curl_slist_free_all((curl_slist*)request->mPrivateData); + request->mPrivateData = nullptr; + } + ++request->mTryCnt; + AddRequestToMultiCurlHandler(multiCurl, unique_ptr(request)); + ++runningHandlers; + requestReused = true; + } else { + auto errMsg = curl_easy_strerror(msg->data.result); + request->mResponse.SetNetworkStatus(GetNetworkStatus(msg->data.result), errMsg); + LOG_DEBUG(sLogger, + ("failed to send http request", "abort")("request address", request)( + "host", request->mHost)("try cnt", ToString(request->mTryCnt))("errMsg", errMsg)); + request->OnSendDone(request->mResponse); + } + break; + } + + curl_multi_remove_handle(multiCurl, handler); + curl_easy_cleanup(handler); + if (!requestReused) { + if (request->mPrivateData) { + curl_slist_free_all((curl_slist*)request->mPrivateData); + } + delete request; + } + } + msg = curl_multi_info_read(multiCurl, &msgsLeft); + } +} + +void SendAsynRequests(CURLM* multiCurl) { + CURLMcode mc; + int runningHandlers = 0; + do { + if ((mc = curl_multi_perform(multiCurl, &runningHandlers)) != CURLM_OK) { + LOG_ERROR( + sLogger, + ("failed to call curl_multi_perform", "sleep 100ms and retry")("errMsg", curl_multi_strerror(mc))); + this_thread::sleep_for(chrono::milliseconds(100)); + continue; + } + HandleCompletedAsynRequests(multiCurl, runningHandlers); + + long curlTimeout = -1; + if ((mc = curl_multi_timeout(multiCurl, &curlTimeout)) != CURLM_OK) { + LOG_WARNING( + sLogger, + ("failed to call curl_multi_timeout", "use default timeout 1s")("errMsg", curl_multi_strerror(mc))); + } + struct timeval timeout { + 1, 0 + }; + if (curlTimeout >= 0) { + timeout.tv_sec = curlTimeout / 1000; + timeout.tv_usec = (curlTimeout % 1000) * 1000; + } + + int maxfd = -1; + fd_set fdread; + fd_set fdwrite; + fd_set fdexcep; + FD_ZERO(&fdread); + FD_ZERO(&fdwrite); + FD_ZERO(&fdexcep); + if ((mc = curl_multi_fdset(multiCurl, &fdread, &fdwrite, &fdexcep, &maxfd)) != CURLM_OK) { + LOG_ERROR(sLogger, ("failed to call curl_multi_fdset", "sleep 100ms")("errMsg", curl_multi_strerror(mc))); + } + if (maxfd == -1) { + // sleep min(timeout, 100ms) according to libcurl + int64_t sleepMs = (curlTimeout >= 0 && curlTimeout < 100) ? curlTimeout : 100; + this_thread::sleep_for(chrono::milliseconds(sleepMs)); + } else { + select(maxfd + 1, &fdread, &fdwrite, &fdexcep, &timeout); + } + } while (runningHandlers); +} + } // namespace logtail diff --git a/core/common/http/Curl.h b/core/common/http/Curl.h index e1f311c017..ad21f876e8 100644 --- a/core/common/http/Curl.h +++ b/core/common/http/Curl.h @@ -28,6 +28,8 @@ namespace logtail { +NetworkCode GetNetworkStatus(CURLcode code); + CURL* CreateCurlHandler(const std::string& method, bool httpsFlag, const std::string& host, @@ -46,4 +48,8 @@ CURL* CreateCurlHandler(const std::string& method, bool SendHttpRequest(std::unique_ptr&& request, HttpResponse& response); +bool AddRequestToMultiCurlHandler(CURLM* multiCurl, std::unique_ptr&& request); +void SendAsynRequests(CURLM* multiCurl); +void HandleCompletedAsynRequests(CURLM* multiCurl, int& runningHandlers); + } // namespace logtail diff --git a/core/common/http/HttpRequest.cpp b/core/common/http/HttpRequest.cpp index 3f6fd215d7..c108032283 100644 --- a/core/common/http/HttpRequest.cpp +++ b/core/common/http/HttpRequest.cpp @@ -14,9 +14,45 @@ #include "common/http/HttpRequest.h" -DEFINE_FLAG_INT32(default_http_request_timeout_secs, "", 15); +DEFINE_FLAG_INT32(default_http_request_timeout_sec, "", 15); DEFINE_FLAG_INT32(default_http_request_max_try_cnt, "", 3); using namespace std; -namespace logtail {} // namespace logtail +namespace logtail { + +static unsigned char ToHex(unsigned char x) { + return x > 9 ? x + 55 : x + 48; +} + +static string UrlEncode(const string& str) { + string strTemp; + size_t length = str.length(); + for (size_t i = 0; i < length; i++) { + if (isalnum((unsigned char)str[i]) || (str[i] == '-') || (str[i] == '_') || (str[i] == '.') || (str[i] == '~')) + strTemp += str[i]; + else if (str[i] == ' ') + strTemp += "+"; + else { + strTemp += '%'; + strTemp += ToHex((unsigned char)str[i] >> 4); + strTemp += ToHex((unsigned char)str[i] % 16); + } + } + return strTemp; +} + +string GetQueryString(const map& parameters) { + string res; + for (auto it = parameters.begin(); it != parameters.end(); ++it) { + if (it != parameters.begin()) { + res.append("&"); + } + res.append(it->first); + res.append("="); + res.append(UrlEncode(it->second)); + } + return res; +} + +} // namespace logtail diff --git a/core/common/http/HttpRequest.h b/core/common/http/HttpRequest.h index 13ef2db70c..2218568344 100644 --- a/core/common/http/HttpRequest.h +++ b/core/common/http/HttpRequest.h @@ -25,7 +25,7 @@ #include "common/Flags.h" #include "common/http/HttpResponse.h" -DECLARE_FLAG_INT32(default_http_request_timeout_secs); +DECLARE_FLAG_INT32(default_http_request_timeout_sec); DECLARE_FLAG_INT32(default_http_request_max_try_cnt); namespace logtail { @@ -49,7 +49,7 @@ struct HttpRequest { std::string mBody; std::string mHost; int32_t mPort; - uint32_t mTimeout = static_cast(INT32_FLAG(default_http_request_timeout_secs)); + uint32_t mTimeout = static_cast(INT32_FLAG(default_http_request_timeout_sec)); uint32_t mMaxTryCnt = static_cast(INT32_FLAG(default_http_request_max_try_cnt)); bool mFollowRedirects = false; std::optional mTls = std::nullopt; @@ -65,7 +65,7 @@ struct HttpRequest { const std::string& query, const std::map& header, const std::string& body, - uint32_t timeout = static_cast(INT32_FLAG(default_http_request_timeout_secs)), + uint32_t timeout = static_cast(INT32_FLAG(default_http_request_timeout_sec)), uint32_t maxTryCnt = static_cast(INT32_FLAG(default_http_request_max_try_cnt)), bool followRedirects = false, std::optional tls = std::nullopt) @@ -98,17 +98,28 @@ struct AsynHttpRequest : public HttpRequest { const std::map& header, const std::string& body, HttpResponse&& response = HttpResponse(), - uint32_t timeout = static_cast(INT32_FLAG(default_http_request_timeout_secs)), + uint32_t timeout = static_cast(INT32_FLAG(default_http_request_timeout_sec)), uint32_t maxTryCnt = static_cast(INT32_FLAG(default_http_request_max_try_cnt)), bool followRedirects = false, std::optional tls = std::nullopt) - : HttpRequest( - method, httpsFlag, host, port, url, query, header, body, timeout, maxTryCnt, followRedirects, std::move(tls)), + : HttpRequest(method, + httpsFlag, + host, + port, + url, + query, + header, + body, + timeout, + maxTryCnt, + followRedirects, + std::move(tls)), mResponse(std::move(response)) {} virtual bool IsContextValid() const = 0; virtual void OnSendDone(HttpResponse& response) = 0; }; +std::string GetQueryString(const std::map& parameters); } // namespace logtail diff --git a/core/common/http/HttpResponse.h b/core/common/http/HttpResponse.h index c4282df428..88bdaab1ab 100644 --- a/core/common/http/HttpResponse.h +++ b/core/common/http/HttpResponse.h @@ -16,8 +16,7 @@ #pragma once -#include - +#include #include #include #include @@ -82,6 +81,8 @@ class HttpResponse { : mHeader(compareHeader), mBody(body, bodyDeleter), mWriteCallback(callback) {} int32_t GetStatusCode() const { return mStatusCode; } + void SetStatusCode(int32_t code) { mStatusCode = code; } + const std::map& GetHeader() const { return mHeader; } template @@ -94,67 +95,31 @@ class HttpResponse { return static_cast(mBody.get()); } - void SetStatusCode(int32_t code) { mStatusCode = code; } + void SetResponseTime(const std::chrono::milliseconds& time) { mResponseTime = time; } + std::chrono::milliseconds GetResponseTime() const { return mResponseTime; } - void SetNetworkStatus(CURLcode code) { - mNetworkStatus.mMessage = curl_easy_strerror(code); - // please refer to https://curl.se/libcurl/c/libcurl-errors.html - switch (code) { - case CURLE_OK: - mNetworkStatus.mCode = NetworkCode::Ok; - break; - case CURLE_COULDNT_CONNECT: - mNetworkStatus.mCode = NetworkCode::ConnectionFailed; - break; - case CURLE_LOGIN_DENIED: - case CURLE_REMOTE_ACCESS_DENIED: - mNetworkStatus.mCode = NetworkCode::RemoteAccessDenied; - break; - case CURLE_OPERATION_TIMEDOUT: - mNetworkStatus.mCode = NetworkCode::Timeout; - break; - case CURLE_SSL_CONNECT_ERROR: - mNetworkStatus.mCode = NetworkCode::SSLConnectError; - break; - case CURLE_SSL_CERTPROBLEM: - case CURLE_SSL_CACERT: - mNetworkStatus.mCode = NetworkCode::SSLCertError; - break; - case CURLE_SEND_ERROR: - case CURLE_SEND_FAIL_REWIND: - mNetworkStatus.mCode = NetworkCode::SendDataFailed; - break; - case CURLE_RECV_ERROR: - mNetworkStatus.mCode = NetworkCode::RecvDataFailed; - break; - case CURLE_SSL_PINNEDPUBKEYNOTMATCH: - case CURLE_SSL_INVALIDCERTSTATUS: - case CURLE_SSL_CACERT_BADFILE: - case CURLE_SSL_CIPHER: - case CURLE_SSL_ENGINE_NOTFOUND: - case CURLE_SSL_ENGINE_SETFAILED: - case CURLE_USE_SSL_FAILED: - case CURLE_SSL_ENGINE_INITFAILED: - case CURLE_SSL_CRL_BADFILE: - case CURLE_SSL_ISSUER_ERROR: - case CURLE_SSL_SHUTDOWN_FAILED: - mNetworkStatus.mCode = NetworkCode::SSLOtherProblem; - break; - case CURLE_FAILED_INIT: - default: - mNetworkStatus.mCode = NetworkCode::Other; - break; - } + const NetworkStatus& GetNetworkStatus() { return mNetworkStatus; } + void SetNetworkStatus(NetworkCode code, const std::string& msg) { + mNetworkStatus.mCode = code; + mNetworkStatus.mMessage = msg; } - const NetworkStatus& GetNetworkStatus() { return mNetworkStatus; } +#ifdef APSARA_UNIT_TEST_MAIN + template + void SetBody(const T& body) { + *mBody = body; + } + + void AddHeader(const std::string& key, const std::string& value) { mHeader[key] = value; } +#endif private: int32_t mStatusCode = 0; // 0 means no response from server - NetworkStatus mNetworkStatus; // 0 means no error + NetworkStatus mNetworkStatus; std::map mHeader; std::unique_ptr> mBody; size_t (*mWriteCallback)(char*, size_t, size_t, void*) = nullptr; + std::chrono::milliseconds mResponseTime = std::chrono::milliseconds::max(); #ifdef APSARA_UNIT_TEST_MAIN friend class HttpSinkMock; diff --git a/core/config/common_provider/CommonConfigProvider.cpp b/core/config/common_provider/CommonConfigProvider.cpp index e86808d7a2..2e278c6ecf 100644 --- a/core/config/common_provider/CommonConfigProvider.cpp +++ b/core/config/common_provider/CommonConfigProvider.cpp @@ -26,6 +26,8 @@ #include "common/StringTools.h" #include "common/UUIDUtil.h" #include "common/YamlUtil.h" +#include "common/http/Constant.h" +#include "common/http/Curl.h" #include "common/version.h" #include "config/ConfigUtil.h" #include "config/PipelineConfig.h" @@ -33,9 +35,6 @@ #include "constants/Constants.h" #include "logger/Logger.h" #include "monitor/Monitor.h" -#include "sdk/Common.h" -#include "sdk/CurlImp.h" -#include "sdk/Exception.h" using namespace std; @@ -43,7 +42,9 @@ DEFINE_FLAG_INT32(heartbeat_interval, "second", 10); namespace logtail { -std::string CommonConfigProvider::configVersion = "version"; +const string AGENT = "/Agent"; + +string CommonConfigProvider::configVersion = "version"; void CommonConfigProvider::Init(const string& dir) { sName = "common config provider"; @@ -300,7 +301,7 @@ configserver::proto::v2::HeartbeatRequest CommonConfigProvider::PrepareHeartbeat bool CommonConfigProvider::SendHeartbeat(const configserver::proto::v2::HeartbeatRequest& heartbeatReq, configserver::proto::v2::HeartbeatResponse& heartbeatResponse) { - string operation = sdk::CONFIGSERVERAGENT; + string operation = AGENT; operation.append("/").append("Heartbeat"); string reqBody; heartbeatReq.SerializeToString(&reqBody); @@ -323,31 +324,25 @@ bool CommonConfigProvider::SendHttpRequest(const string& operation, // LCOV_EXCL_START ConfigServerAddress configServerAddress = GetOneConfigServerAddress(false); map httpHeader; - httpHeader[sdk::CONTENT_TYPE] = sdk::TYPE_LOG_PROTOBUF; - sdk::HttpMessage httpResponse; - httpResponse.header[sdk::X_LOG_REQUEST_ID] = requestId; - sdk::CurlClient client; - - try { - client.Send(sdk::HTTP_POST, - configServerAddress.host, - configServerAddress.port, - operation, - "", - httpHeader, - reqBody, - INT32_FLAG(sls_client_send_timeout), - httpResponse, - "", - false); - resp.swap(httpResponse.content); - return true; - } catch (const sdk::LOGException& e) { + httpHeader[CONTENT_TYPE] = TYPE_LOG_PROTOBUF; + + HttpResponse httpResponse; + if (!logtail::SendHttpRequest(make_unique(HTTP_POST, + false, + configServerAddress.host, + configServerAddress.port, + operation, + "", + httpHeader, + reqBody), + httpResponse)) { LOG_WARNING(sLogger, - (configType, "fail")("reqBody", reqBody)("errCode", e.GetErrorCode())("errMsg", e.GetMessage())( - "host", configServerAddress.host)("port", configServerAddress.port)); + (configType, "fail")("reqBody", + reqBody)("host", configServerAddress.host)("port", configServerAddress.port)); return false; } + resp = *httpResponse.GetBody(); + return true; // LCOV_EXCL_STOP } @@ -498,7 +493,7 @@ bool CommonConfigProvider::FetchInstanceConfigFromServer( reqConfig->set_name(config.name()); reqConfig->set_version(config.version()); } - string operation = sdk::CONFIGSERVERAGENT; + string operation = AGENT; operation.append("/FetchInstanceConfig"); string reqBody; fetchConfigRequest.SerializeToString(&reqBody); @@ -525,7 +520,7 @@ bool CommonConfigProvider::FetchPipelineConfigFromServer( reqConfig->set_name(config.name()); reqConfig->set_version(config.version()); } - string operation = sdk::CONFIGSERVERAGENT; + string operation = AGENT; operation.append("/FetchPipelineConfig"); string reqBody; fetchConfigRequest.SerializeToString(&reqBody); diff --git a/core/config/common_provider/LegacyCommonConfigProvider.cpp b/core/config/common_provider/LegacyCommonConfigProvider.cpp index 334b4dc85b..9231cee6db 100644 --- a/core/config/common_provider/LegacyCommonConfigProvider.cpp +++ b/core/config/common_provider/LegacyCommonConfigProvider.cpp @@ -22,14 +22,14 @@ #include "app_config/AppConfig.h" #include "application/Application.h" +#include "common/EncodingUtil.h" #include "common/LogtailCommonFlags.h" #include "common/StringTools.h" +#include "common/http/Constant.h" +#include "common/http/Curl.h" #include "common/version.h" #include "logger/Logger.h" #include "monitor/Monitor.h" -#include "sdk/Common.h" -#include "sdk/CurlImp.h" -#include "sdk/Exception.h" using namespace std; @@ -37,6 +37,8 @@ DEFINE_FLAG_INT32(config_update_interval, "second", 10); namespace logtail { +const string AGENT = "/Agent"; + void LegacyCommonConfigProvider::Init(const string& dir) { ConfigProvider::Init(dir); @@ -157,7 +159,7 @@ google::protobuf::RepeatedPtrField LegacyCommonConfigProvider::SendHeartbeat(const ConfigServerAddress& configServerAddress) { configserver::proto::HeartBeatRequest heartBeatReq; configserver::proto::AgentAttributes attributes; - string requestID = sdk::Base64Enconde(string("heartbeat").append(to_string(time(NULL)))); + string requestID = Base64Enconde(string("heartbeat").append(to_string(time(NULL)))); heartBeatReq.set_request_id(requestID); heartBeatReq.set_agent_id(Application::GetInstance()->GetInstanceId()); heartBeatReq.set_agent_type("iLogtail"); @@ -179,46 +181,41 @@ LegacyCommonConfigProvider::SendHeartbeat(const ConfigServerAddress& configServe } heartBeatReq.mutable_pipeline_configs()->MergeFrom(pipelineConfigs); - string operation = sdk::CONFIGSERVERAGENT; + string operation = AGENT; operation.append("/").append("HeartBeat"); map httpHeader; - httpHeader[sdk::CONTENT_TYPE] = sdk::TYPE_LOG_PROTOBUF; + httpHeader[CONTENT_TYPE] = TYPE_LOG_PROTOBUF; string reqBody; heartBeatReq.SerializeToString(&reqBody); - sdk::HttpMessage httpResponse; - httpResponse.header[sdk::X_LOG_REQUEST_ID] = "ConfigServer"; - sdk::CurlClient client; google::protobuf::RepeatedPtrField emptyResult; - try { - client.Send(sdk::HTTP_POST, - configServerAddress.host, - configServerAddress.port, - operation, - "", - httpHeader, - reqBody, - INT32_FLAG(sls_client_send_timeout), - httpResponse, - "", - false); - configserver::proto::HeartBeatResponse heartBeatResp; - heartBeatResp.ParseFromString(httpResponse.content); - - if (0 != strcmp(heartBeatResp.request_id().c_str(), requestID.c_str())) - return emptyResult; - - LOG_DEBUG(sLogger, - ("SendHeartbeat", "success")("reqBody", reqBody)("requestId", heartBeatResp.request_id())( - "statusCode", heartBeatResp.code())); - - return heartBeatResp.pipeline_check_results(); - } catch (const sdk::LOGException& e) { + HttpResponse httpResponse; + if (!logtail::SendHttpRequest(make_unique(HTTP_POST, + false, + configServerAddress.host, + configServerAddress.port, + operation, + "", + httpHeader, + reqBody), + httpResponse)) { LOG_WARNING(sLogger, - ("SendHeartbeat", "fail")("reqBody", reqBody)("errCode", e.GetErrorCode())( - "errMsg", e.GetMessage())("host", configServerAddress.host)("port", configServerAddress.port)); + ("SendHeartbeat", + "fail")("reqBody", reqBody)("host", configServerAddress.host)("port", configServerAddress.port)); return emptyResult; } + + configserver::proto::HeartBeatResponse heartBeatResp; + heartBeatResp.ParseFromString(*httpResponse.GetBody()); + + if (0 != strcmp(heartBeatResp.request_id().c_str(), requestID.c_str())) + return emptyResult; + + LOG_DEBUG(sLogger, + ("SendHeartbeat", "success")("reqBody", reqBody)("requestId", heartBeatResp.request_id())( + "statusCode", heartBeatResp.code())); + + return heartBeatResp.pipeline_check_results(); } google::protobuf::RepeatedPtrField LegacyCommonConfigProvider::FetchPipelineConfig( @@ -226,7 +223,7 @@ google::protobuf::RepeatedPtrField LegacyComm const google::protobuf::RepeatedPtrField& requestConfigs) { configserver::proto::FetchPipelineConfigRequest fetchConfigReq; string requestID - = sdk::Base64Enconde(Application::GetInstance()->GetInstanceId().append("_").append(to_string(time(NULL)))); + = Base64Enconde(Application::GetInstance()->GetInstanceId().append("_").append(to_string(time(NULL)))); fetchConfigReq.set_request_id(requestID); fetchConfigReq.set_agent_id(Application::GetInstance()->GetInstanceId()); @@ -242,47 +239,39 @@ google::protobuf::RepeatedPtrField LegacyComm } fetchConfigReq.mutable_req_configs()->MergeFrom(configInfos); - string operation = sdk::CONFIGSERVERAGENT; + string operation = AGENT; operation.append("/").append("FetchPipelineConfig"); map httpHeader; - httpHeader[sdk::CONTENT_TYPE] = sdk::TYPE_LOG_PROTOBUF; + httpHeader[CONTENT_TYPE] = TYPE_LOG_PROTOBUF; string reqBody; fetchConfigReq.SerializeToString(&reqBody); - sdk::HttpMessage httpResponse; - httpResponse.header[sdk::X_LOG_REQUEST_ID] = "ConfigServer"; - sdk::CurlClient client; google::protobuf::RepeatedPtrField emptyResult; - try { - client.Send(sdk::HTTP_POST, - configServerAddress.host, - configServerAddress.port, - operation, - "", - httpHeader, - reqBody, - INT32_FLAG(sls_client_send_timeout), - httpResponse, - "", - false); - - configserver::proto::FetchPipelineConfigResponse fetchConfigResp; - fetchConfigResp.ParseFromString(httpResponse.content); - - if (0 != strcmp(fetchConfigResp.request_id().c_str(), requestID.c_str())) - return emptyResult; - - LOG_DEBUG(sLogger, - ("GetConfigUpdateInfos", "success")("reqBody", reqBody)("requestId", fetchConfigResp.request_id())( - "statusCode", fetchConfigResp.code())); - - return fetchConfigResp.config_details(); - } catch (const sdk::LOGException& e) { - LOG_WARNING(sLogger, - ("GetConfigUpdateInfos", "fail")("reqBody", reqBody)("errCode", e.GetErrorCode())("errMsg", - e.GetMessage())); + HttpResponse httpResponse; + if (!logtail::SendHttpRequest(make_unique(HTTP_POST, + false, + configServerAddress.host, + configServerAddress.port, + operation, + "", + httpHeader, + reqBody), + httpResponse)) { + LOG_WARNING(sLogger, ("GetConfigUpdateInfos", "fail")("reqBody", reqBody)); return emptyResult; } + + configserver::proto::FetchPipelineConfigResponse fetchConfigResp; + fetchConfigResp.ParseFromString(*httpResponse.GetBody()); + + if (0 != strcmp(fetchConfigResp.request_id().c_str(), requestID.c_str())) + return emptyResult; + + LOG_DEBUG(sLogger, + ("GetConfigUpdateInfos", "success")("reqBody", reqBody)("requestId", fetchConfigResp.request_id())( + "statusCode", fetchConfigResp.code())); + + return fetchConfigResp.config_details(); } void LegacyCommonConfigProvider::UpdateRemoteConfig( diff --git a/core/config/watcher/PipelineConfigWatcher.cpp b/core/config/watcher/PipelineConfigWatcher.cpp index fb914c4d55..0cd8c00570 100644 --- a/core/config/watcher/PipelineConfigWatcher.cpp +++ b/core/config/watcher/PipelineConfigWatcher.cpp @@ -440,7 +440,7 @@ bool PipelineConfigWatcher::CheckUnchangedConfig(const std::string& configName, return false; } if (!IsConfigEnabled(configName, *detail)) { - LOG_INFO(sLogger, ("unchanged config found and disabled", "skip current object")("config", configName)); + LOG_DEBUG(sLogger, ("unchanged config found and disabled", "skip current object")("config", configName)); return false; } PipelineConfig config(configName, std::move(detail)); diff --git a/core/file_server/event_handler/LogInput.cpp b/core/file_server/event_handler/LogInput.cpp index 012d83967c..6f0dbacebf 100644 --- a/core/file_server/event_handler/LogInput.cpp +++ b/core/file_server/event_handler/LogInput.cpp @@ -39,9 +39,6 @@ #include "logger/Logger.h" #include "monitor/AlarmManager.h" #include "monitor/Monitor.h" -#ifdef __ENTERPRISE__ -#include "config/provider/EnterpriseConfigProvider.h" -#endif #include "file_server/FileServer.h" using namespace std; @@ -60,8 +57,6 @@ DEFINE_FLAG_BOOL(force_close_file_on_container_stopped, "whether close file handler immediately when associate container stopped", false); -DECLARE_FLAG_BOOL(send_prefer_real_ip); - namespace logtail { LogInput::LogInput() : mAccessMainThreadRWL(ReadWriteLock::PREFER_WRITER) { diff --git a/core/file_server/reader/LogFileReader.cpp b/core/file_server/reader/LogFileReader.cpp index 08b7693354..d5024ca48c 100644 --- a/core/file_server/reader/LogFileReader.cpp +++ b/core/file_server/reader/LogFileReader.cpp @@ -53,7 +53,6 @@ #include "pipeline/queue/QueueKeyManager.h" #include "plugin/processor/inner/ProcessorParseContainerLogNative.h" #include "rapidjson/document.h" -#include "sdk/Common.h" using namespace sls_logs; using namespace std; @@ -90,6 +89,8 @@ namespace logtail { size_t LogFileReader::BUFFER_SIZE = 1024 * 512; // 512KB +const int64_t kFirstHashKeySeqID = 1; + LogFileReader* LogFileReader::CreateLogFileReader(const string& hostLogPathDir, const string& hostLogPathFile, const DevInode& devInode, @@ -439,7 +440,7 @@ void LogFileReader::initExactlyOnce(uint32_t concurrency) { auto partitionRange = detail::getPartitionRange(partIdx, mEOOption->concurrency, kPartitionCount); auto partitionID = partitionRange.first + rand() % (partitionRange.second - partitionRange.first + 1); rangeCpt->data.set_hash_key(GenerateHashKey(baseHashKey, partitionID, kPartitionCount)); - rangeCpt->data.set_sequence_id(sdk::kFirstHashKeySeqID); + rangeCpt->data.set_sequence_id(kFirstHashKeySeqID); rangeCpt->data.set_committed(false); } LOG_DEBUG(sLogger, diff --git a/core/models/EventPool.cpp b/core/models/EventPool.cpp index 5e50091750..0c045438b5 100644 --- a/core/models/EventPool.cpp +++ b/core/models/EventPool.cpp @@ -19,7 +19,7 @@ #include "common/Flags.h" #include "logger/Logger.h" -DEFINE_FLAG_INT32(event_pool_gc_interval_secs, "", 60); +DEFINE_FLAG_INT32(event_pool_gc_interval_sec, "", 60); using namespace std; @@ -143,7 +143,7 @@ void DoGC(vector& pool, vector& poolBak, size_t& minUnusedCnt, mutex* mu } void EventPool::CheckGC() { - if (time(nullptr) - mLastGCTime > INT32_FLAG(event_pool_gc_interval_secs)) { + if (time(nullptr) - mLastGCTime > INT32_FLAG(event_pool_gc_interval_sec)) { if (mEnableLock) { lock_guard lock(mPoolMux); DoGC(mLogEventPool, mLogEventPoolBak, mMinUnusedLogEventsCnt, &mPoolBakMux, "log"); diff --git a/core/monitor/Monitor.cpp b/core/monitor/Monitor.cpp index 34fabe9059..b00bbb9594 100644 --- a/core/monitor/Monitor.cpp +++ b/core/monitor/Monitor.cpp @@ -41,7 +41,6 @@ #include "plugin/flusher/sls/FlusherSLS.h" #include "protobuf/sls/sls_logs.pb.h" #include "runner/FlusherRunner.h" -#include "sdk/Common.h" #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" #endif @@ -52,7 +51,6 @@ using namespace std; using namespace sls_logs; DEFINE_FLAG_BOOL(logtail_dump_monitor_info, "enable to dump Logtail monitor info (CPU, mem)", false); -DECLARE_FLAG_BOOL(send_prefer_real_ip); DECLARE_FLAG_BOOL(check_profile_region); namespace logtail { diff --git a/core/monitor/profile_sender/ProfileSender.cpp b/core/monitor/profile_sender/ProfileSender.cpp index 2c6b942c67..ec2d50ea30 100644 --- a/core/monitor/profile_sender/ProfileSender.cpp +++ b/core/monitor/profile_sender/ProfileSender.cpp @@ -27,7 +27,6 @@ #endif #include "app_config/AppConfig.h" #include "plugin/flusher/sls/SLSClientManager.h" -#include "sdk/Exception.h" // TODO: temporarily used #include "common/compression/CompressorFactory.h" @@ -153,7 +152,7 @@ void ProfileSender::SendRunningStatus(sls_logs::LogGroup& logGroup) { string region = "cn-shanghai"; string project = "ilogtail-community-edition"; string logstore = "ilogtail-online"; - string endpoint = region + ".log.aliyuncs.com"; + string host = project + "." + region + ".log.aliyuncs.com"; Json::Value logtailStatus; logtailStatus["__topic__"] = "logtail_status_profile"; @@ -171,26 +170,14 @@ void ProfileSender::SendRunningStatus(sls_logs::LogGroup& logGroup) { } logtailStatus["__logs__"][0] = status; string logBody = logtailStatus.toStyledString(); - sdk::Client client("", endpoint, INT32_FLAG(sls_client_send_timeout)); - client.SetPort(AppConfig::GetInstance()->GetDataServerPort()); - try { - string res; - if (!CompressLz4(logBody, res)) { - LOG_ERROR(sLogger, ("lz4 compress data", "fail")); - return; - } - - sdk::PostLogStoreLogsResponse resp - = client.PostLogUsingWebTracking(project, logstore, sls_logs::SLS_CMP_LZ4, res, logBody.size()); - LOG_DEBUG(sLogger, - ("SendToProfileProject", - "success")("logBody", logBody)("requestId", resp.requestId)("statusCode", resp.statusCode)); - } catch (const sdk::LOGException& e) { - LOG_DEBUG(sLogger, - ("SendToProfileProject", "fail")("logBody", logBody)("errCode", e.GetErrorCode())("errMsg", - e.GetMessage())); + string res; + if (!CompressLz4(logBody, res)) { + LOG_ERROR(sLogger, ("lz4 compress data", "fail")); + return; } + + PutWebTracking(host, true, logstore, "lz4", res, logBody.size()); } } // namespace logtail diff --git a/core/pipeline/GlobalConfig.cpp b/core/pipeline/GlobalConfig.cpp index 15e98c10bd..91a3ed43bd 100644 --- a/core/pipeline/GlobalConfig.cpp +++ b/core/pipeline/GlobalConfig.cpp @@ -94,7 +94,7 @@ bool GlobalConfig::Init(const Json::Value& config, const PipelineContext& ctx, J } // Priority - uint32_t priority = 0; + uint32_t priority = 1; if (!GetOptionalUIntParam(config, "Priority", priority, errorMsg)) { PARAM_WARNING_DEFAULT(ctx.GetLogger(), ctx.GetAlarm(), diff --git a/core/pipeline/plugin/interface/HttpFlusher.h b/core/pipeline/plugin/interface/HttpFlusher.h index a8bba21296..f68eab2bd2 100644 --- a/core/pipeline/plugin/interface/HttpFlusher.h +++ b/core/pipeline/plugin/interface/HttpFlusher.h @@ -16,6 +16,8 @@ #pragma once +#include + #include "common/http/HttpResponse.h" #include "pipeline/plugin/interface/Flusher.h" #include "pipeline/queue/SenderQueueItem.h" @@ -27,7 +29,7 @@ class HttpFlusher : public Flusher { public: virtual ~HttpFlusher() = default; - virtual bool BuildRequest(SenderQueueItem* item, std::unique_ptr& req, bool* keepItem) const = 0; + virtual bool BuildRequest(SenderQueueItem* item, std::unique_ptr& req, bool* keepItem, std::string* errMsg) = 0; virtual void OnSendDone(const HttpResponse& response, SenderQueueItem* item) = 0; virtual SinkType GetSinkType() override { return SinkType::HTTP; } diff --git a/core/pipeline/queue/SLSSenderQueueItem.h b/core/pipeline/queue/SLSSenderQueueItem.h index 1d34efe40f..2ee185dbb0 100644 --- a/core/pipeline/queue/SLSSenderQueueItem.h +++ b/core/pipeline/queue/SLSSenderQueueItem.h @@ -32,7 +32,7 @@ struct SLSSenderQueueItem : public SenderQueueItem { std::string mLogstore; RangeCheckpointPtr mExactlyOnceCheckpoint; - std::string mCurrentEndpoint; + std::string mCurrentHost; bool mRealIpFlag = false; int32_t mLastLogWarningTime = 0; // temporaily used diff --git a/core/plugin/flusher/sls/DiskBufferWriter.cpp b/core/plugin/flusher/sls/DiskBufferWriter.cpp index 2d8e6e22f4..f613da5f30 100644 --- a/core/plugin/flusher/sls/DiskBufferWriter.cpp +++ b/core/plugin/flusher/sls/DiskBufferWriter.cpp @@ -28,10 +28,14 @@ #include "pipeline/queue/QueueKeyManager.h" #include "pipeline/queue/SLSSenderQueueItem.h" #include "plugin/flusher/sls/FlusherSLS.h" +#ifdef __ENTERPRISE__ +#include "plugin/flusher/sls/EnterpriseSLSClientManager.h" +#endif #include "plugin/flusher/sls/SLSClientManager.h" +#include "plugin/flusher/sls/SLSConstant.h" +#include "plugin/flusher/sls/SendResult.h" #include "protobuf/sls/sls_logs.pb.h" #include "provider/Provider.h" -#include "sdk/Exception.h" DEFINE_FLAG_INT32(write_secondary_wait_timeout, "interval of dump seconary buffer from memory to file, seconds", 2); DEFINE_FLAG_INT32(buffer_file_alive_interval, "the max alive time of a bufferfile, 5 minutes", 300); @@ -41,13 +45,60 @@ DEFINE_FLAG_INT32(secondary_buffer_count_limit, "data ready for write buffer fil DEFINE_FLAG_INT32(send_retry_sleep_interval, "sleep microseconds when sync send fail, 50ms", 50000); DEFINE_FLAG_INT32(buffer_check_period, "check logtail local storage buffer period", 60); DEFINE_FLAG_INT32(unauthorized_wait_interval, "", 1); +DEFINE_FLAG_INT32(send_retrytimes, "how many times should retry if PostLogStoreLogs operation fail", 3); DECLARE_FLAG_INT32(discard_send_fail_interval); using namespace std; - namespace logtail { +#ifdef __ENTERPRISE__ +static EndpointMode GetEndpointMode(sls_logs::EndpointMode mode) { + switch (mode) { + case sls_logs::EndpointMode::DEFAULT: + return EndpointMode::DEFAULT; + case sls_logs::EndpointMode::ACCELERATE: + return EndpointMode::ACCELERATE; + case sls_logs::EndpointMode::CUSTOM: + return EndpointMode::CUSTOM; + } + return EndpointMode::DEFAULT; +} + +static sls_logs::EndpointMode GetEndpointMode(EndpointMode mode) { + switch (mode) { + case EndpointMode::DEFAULT: + return sls_logs::EndpointMode::DEFAULT; + case EndpointMode::ACCELERATE: + return sls_logs::EndpointMode::ACCELERATE; + case EndpointMode::CUSTOM: + return sls_logs::EndpointMode::CUSTOM; + } + return sls_logs::EndpointMode::DEFAULT; +} + +static const string kAKErrorMsg = "can not get valid access key"; +#endif + +static const string kNoHostErrorMsg = "can not get available host"; + +static const string& GetSLSCompressTypeString(sls_logs::SlsCompressType compressType) { + switch (compressType) { + case sls_logs::SLS_CMP_NONE: { + static string none = ""; + return none; + } + case sls_logs::SLS_CMP_ZSTD: { + static string zstd = "zstd"; + return zstd; + } + default: { + static string lz4 = "lz4"; + return lz4; + } + } +} + const int32_t DiskBufferWriter::BUFFER_META_BASE_SIZE = 65536; void DiskBufferWriter::Init() { @@ -146,13 +197,6 @@ void DiskBufferWriter::BufferSenderThread() { LOG_INFO(sLogger, ("disk buffer sender", "started")); unique_lock lock(mBufferSenderThreadRunningMux); while (mIsSendBufferThreadRunning) { - if (!SLSClientManager::GetInstance()->HasNetworkAvailable()) { - if (mStopCV.wait_for( - lock, chrono::seconds(mCheckPeriod), [this]() { return !mIsSendBufferThreadRunning; })) { - break; - } - continue; - } vector filesToSend; if (!LoadFileToSend(mBufferDivideTime, filesToSend)) { if (mStopCV.wait_for( @@ -200,6 +244,9 @@ void DiskBufferWriter::BufferSenderThread() { "check header of buffer file failed, delete file: " + fileName); } } +#ifdef __ENTERPRISE__ + mCandidateHostsInfos.clear(); +#endif // mIsSendingBuffer = false; lock.lock(); if (mStopCV.wait_for(lock, chrono::seconds(mCheckPeriod), [this]() { return !mIsSendBufferThreadRunning; })) { @@ -386,7 +433,7 @@ bool DiskBufferWriter::ReadNextEncryption(int32_t& pos, } } else { bufferMeta.set_project(encodedInfo); - bufferMeta.set_endpoint(FlusherSLS::GetDefaultRegion()); // new mode + bufferMeta.set_region(FlusherSLS::GetDefaultRegion()); // new mode bufferMeta.set_aliuid(""); } if (!bufferMeta.has_compresstype()) { @@ -395,6 +442,14 @@ bool DiskBufferWriter::ReadNextEncryption(int32_t& pos, if (!bufferMeta.has_telemetrytype()) { bufferMeta.set_telemetrytype(sls_logs::SLS_TELEMETRY_TYPE_LOGS); } +#ifdef __ENTERPRISE__ + if (!bufferMeta.has_endpointmode()) { + bufferMeta.set_endpointmode(sls_logs::EndpointMode::DEFAULT); + } +#endif + if (!bufferMeta.has_endpoint()) { + bufferMeta.set_endpoint(""); + } buffer = new char[meta.mEncryptionSize + 1]; nbytes = fread(buffer, sizeof(char), meta.mEncryptionSize, fin); @@ -480,20 +535,72 @@ void DiskBufferWriter::SendEncryptionBuffer(const std::string& filename, int32_t } } if (!sendResult) { - string errorCode; - SendResult res = SendBufferFileData(bufferMeta, logData, errorCode); - if (res == SEND_OK) - sendResult = true; - else if (res == SEND_DISCARD_ERROR || res == SEND_PARAMETER_INVALID) { - AlarmManager::GetInstance()->SendAlarm(SEND_DATA_FAIL_ALARM, - string("send buffer file fail, rawsize:") - + ToString(bufferMeta.rawsize()) - + "errorCode: " + errorCode, - bufferMeta.project(), - bufferMeta.logstore(), - ""); - sendResult = true; - discardCount++; + time_t beginTime = time(nullptr); + while (true) { + string host; + auto response = SendBufferFileData(bufferMeta, logData, host); + SendResult sendRes = SEND_OK; + if (response.mStatusCode != 200) { + sendRes = ConvertErrorCode(response.mErrorCode); + } + switch (sendRes) { + case SEND_OK: + sendResult = true; + break; + case SEND_NETWORK_ERROR: + case SEND_SERVER_ERROR: + if (response.mErrorMsg != kNoHostErrorMsg) { + LOG_WARNING( + sLogger, + ("send data to SLS fail", "retry later")("request id", response.mRequestId)( + "error_code", response.mErrorCode)("error_message", response.mErrorMsg)( + "endpoint", host)("projectName", bufferMeta.project())( + "logstore", bufferMeta.logstore())("rawsize", bufferMeta.rawsize())); + } + usleep(INT32_FLAG(send_retry_sleep_interval)); + break; + case SEND_QUOTA_EXCEED: + AlarmManager::GetInstance()->SendAlarm(SEND_QUOTA_EXCEED_ALARM, + "error_code: " + response.mErrorCode + + ", error_message: " + response.mErrorMsg, + bufferMeta.project(), + bufferMeta.logstore(), + ""); + // no region + if (!GetProfileSender()->IsProfileData("", bufferMeta.project(), bufferMeta.logstore())) + LOG_WARNING( + sLogger, + ("send data to SLS fail", "retry later")("request id", response.mRequestId)( + "error_code", response.mErrorCode)("error_message", response.mErrorMsg)( + "endpoint", host)("projectName", bufferMeta.project())( + "logstore", bufferMeta.logstore())("rawsize", bufferMeta.rawsize())); + usleep(INT32_FLAG(quota_exceed_wait_interval)); + break; + case SEND_UNAUTHORIZED: + usleep(INT32_FLAG(unauthorized_wait_interval)); + break; + default: + sendResult = true; + discardCount++; + break; + } +#ifdef __ENTERPRISE__ + if (sendRes != SEND_NETWORK_ERROR && sendRes != SEND_SERVER_ERROR) { + bool hasAuthError + = sendRes == SEND_UNAUTHORIZED && response.mErrorMsg != kAKErrorMsg; + EnterpriseSLSClientManager::GetInstance()->UpdateAccessKeyStatus(bufferMeta.aliuid(), + !hasAuthError); + EnterpriseSLSClientManager::GetInstance()->UpdateProjectAnonymousWriteStatus( + bufferMeta.project(), !hasAuthError); + } +#endif + if (time(nullptr) - beginTime >= INT32_FLAG(discard_send_fail_interval)) { + sendResult = true; + discardCount++; + } + if (sendResult) { + break; + } } } } @@ -656,7 +763,7 @@ bool DiskBufferWriter::SendToBufferFile(SenderQueueItem* dataPtr) { sls_logs::LogtailBufferMeta bufferMeta; bufferMeta.set_project(flusher->mProject); - bufferMeta.set_endpoint(flusher->mRegion); + bufferMeta.set_region(flusher->mRegion); bufferMeta.set_aliuid(flusher->mAliuid); bufferMeta.set_logstore(data->mLogstore); bufferMeta.set_datatype(int32_t(data->mType)); @@ -664,6 +771,10 @@ bool DiskBufferWriter::SendToBufferFile(SenderQueueItem* dataPtr) { bufferMeta.set_shardhashkey(data->mShardHashKey); bufferMeta.set_compresstype(ConvertCompressType(flusher->GetCompressType())); bufferMeta.set_telemetrytype(flusher->mTelemetryType); +#ifdef __ENTERPRISE__ + bufferMeta.set_endpointmode(GetEndpointMode(flusher->mEndpointMode)); +#endif + bufferMeta.set_endpoint(flusher->mEndpoint); string encodedInfo; bufferMeta.SerializeToString(&encodedInfo); @@ -702,138 +813,85 @@ bool DiskBufferWriter::SendToBufferFile(SenderQueueItem* dataPtr) { return true; } -SendResult DiskBufferWriter::SendBufferFileData(const sls_logs::LogtailBufferMeta& bufferMeta, - const std::string& logData, - std::string& errorCode) { +SLSResponse DiskBufferWriter::SendBufferFileData(const sls_logs::LogtailBufferMeta& bufferMeta, + const std::string& logData, + std::string& host) { RateLimiter::FlowControl(bufferMeta.rawsize(), mSendLastTime, mSendLastByte, false); - string region = bufferMeta.endpoint(); - if (region.find("http://") == 0) // old buffer file which record the endpoint - region = SLSClientManager::GetInstance()->GetRegionFromEndpoint(region); - - sdk::Client* sendClient = SLSClientManager::GetInstance()->GetClient(region, bufferMeta.aliuid()); - SendResult sendRes; - const string& endpoint = sendClient->GetRawSlsHost(); - if (endpoint.empty()) - sendRes = SEND_NETWORK_ERROR; - else { - sendRes = SendToNetSync(sendClient, region, endpoint, bufferMeta, logData, errorCode); - } - return sendRes; -} + string region = bufferMeta.region(); +#ifdef __ENTERPRISE__ + // old buffer file which record the endpoint + if (region.find("http://") == 0) { + region = EnterpriseSLSClientManager::GetInstance()->GetRegionFromEndpoint(region); + } +#endif -SendResult DiskBufferWriter::SendToNetSync(sdk::Client* sendClient, - const std::string& region, - const std::string& endpoint, - const sls_logs::LogtailBufferMeta& bufferMeta, - const std::string& logData, - std::string& errorCode) { - int32_t retryTimes = 0; - time_t beginTime = time(NULL); - while (true) { - ++retryTimes; - try { - if (bufferMeta.datatype() == int(RawDataType::EVENT_GROUP)) { - if (bufferMeta.has_telemetrytype() - && bufferMeta.telemetrytype() == sls_logs::SLS_TELEMETRY_TYPE_METRICS) { - sendClient->PostMetricStoreLogs(bufferMeta.project(), - bufferMeta.logstore(), - bufferMeta.compresstype(), - logData, - bufferMeta.rawsize()); - } else if (bufferMeta.has_shardhashkey() && !bufferMeta.shardhashkey().empty()) - sendClient->PostLogStoreLogs(bufferMeta.project(), - bufferMeta.logstore(), - bufferMeta.compresstype(), - logData, - bufferMeta.rawsize(), - bufferMeta.shardhashkey()); - else - sendClient->PostLogStoreLogs(bufferMeta.project(), - bufferMeta.logstore(), - bufferMeta.compresstype(), - logData, - bufferMeta.rawsize()); - } else { - if (bufferMeta.has_shardhashkey() && !bufferMeta.shardhashkey().empty()) - sendClient->PostLogStoreLogPackageList(bufferMeta.project(), - bufferMeta.logstore(), - bufferMeta.compresstype(), - logData, - bufferMeta.shardhashkey()); - else - sendClient->PostLogStoreLogPackageList( - bufferMeta.project(), bufferMeta.logstore(), bufferMeta.compresstype(), logData); - } - return SEND_OK; - } catch (sdk::LOGException& ex) { - errorCode = ex.GetErrorCode(); - SendResult sendRes = ConvertErrorCode(errorCode); - bool hasAuthError = false; - switch (sendRes) { - case SEND_NETWORK_ERROR: - case SEND_SERVER_ERROR: - SLSClientManager::GetInstance()->UpdateEndpointStatus(region, endpoint, false); - SLSClientManager::GetInstance()->ResetClientEndpoint(bufferMeta.aliuid(), region, time(NULL)); - LOG_WARNING(sLogger, - ("send data to SLS fail", "retry later")("error_code", errorCode)( - "error_message", ex.GetMessage())("endpoint", sendClient->GetRawSlsHost())( - "projectName", bufferMeta.project())("logstore", bufferMeta.logstore())( - "RetryTimes", retryTimes)("rawsize", bufferMeta.rawsize())); - usleep(INT32_FLAG(send_retry_sleep_interval)); - break; - case SEND_QUOTA_EXCEED: - AlarmManager::GetInstance()->SendAlarm(SEND_QUOTA_EXCEED_ALARM, - "error_code: " + errorCode - + ", error_message: " + ex.GetMessage(), - bufferMeta.project(), - bufferMeta.logstore(), - ""); - // no region - if (!GetProfileSender()->IsProfileData("", bufferMeta.project(), bufferMeta.logstore())) - LOG_WARNING(sLogger, - ("send data to SLS fail, error_code", errorCode)("error_message", ex.GetMessage())( - "endpoint", sendClient->GetRawSlsHost())("projectName", bufferMeta.project())( - "logstore", bufferMeta.logstore())("RetryTimes", retryTimes)( - "rawsize", bufferMeta.rawsize())); - usleep(INT32_FLAG(quota_exceed_wait_interval)); - break; - case SEND_UNAUTHORIZED: - hasAuthError = true; - usleep(INT32_FLAG(unauthorized_wait_interval)); - break; - default: - break; - } - SLSClientManager::GetInstance()->UpdateAccessKeyStatus(bufferMeta.aliuid(), !hasAuthError); - if (time(nullptr) - beginTime >= INT32_FLAG(discard_send_fail_interval)) { - sendRes = SEND_DISCARD_ERROR; - } - if (sendRes != SEND_NETWORK_ERROR && sendRes != SEND_SERVER_ERROR && sendRes != SEND_QUOTA_EXCEED - && sendRes != SEND_UNAUTHORIZED) { - return sendRes; - } - { - lock_guard lock(mBufferSenderThreadRunningMux); - if (!mIsSendBufferThreadRunning) { - return sendRes; - } - } - } catch (...) { - if (retryTimes >= INT32_FLAG(send_retrytimes)) { - LOG_ERROR(sLogger, - ("send data fail", "unknown excepiton")("endpoint", sendClient->GetRawSlsHost())( - "projectName", bufferMeta.project())("logstore", bufferMeta.logstore())( - "rawsize", bufferMeta.rawsize())); - return SEND_DISCARD_ERROR; - } else { - LOG_DEBUG(sLogger, - ("send data fail", "unknown excepiton, retry later")("endpoint", sendClient->GetRawSlsHost())( - "projectName", bufferMeta.project())("logstore", bufferMeta.logstore())( - "rawsize", bufferMeta.rawsize())); - usleep(INT32_FLAG(send_retry_sleep_interval)); - } + SLSClientManager::AuthType type; + string accessKeyId, accessKeySecret; + if (!SLSClientManager::GetInstance()->GetAccessKey(bufferMeta.aliuid(), type, accessKeyId, accessKeySecret)) { +#ifdef __ENTERPRISE__ + if (!EnterpriseSLSClientManager::GetInstance()->GetAccessKeyIfProjectSupportsAnonymousWrite( + bufferMeta.project(), type, accessKeyId, accessKeySecret)) { + SLSResponse response; + response.mErrorCode = LOGE_UNAUTHORIZED; + response.mErrorMsg = kAKErrorMsg; + return response; } +#endif + } + +#ifdef __ENTERPRISE__ + if (bufferMeta.endpointmode() == sls_logs::EndpointMode::DEFAULT) { + EnterpriseSLSClientManager::GetInstance()->UpdateRemoteRegionEndpoints( + region, {bufferMeta.endpoint()}, EnterpriseSLSClientManager::RemoteEndpointUpdateAction::CREATE); + } + auto info = EnterpriseSLSClientManager::GetInstance()->GetCandidateHostsInfo( + region, bufferMeta.project(), GetEndpointMode(bufferMeta.endpointmode())); + mCandidateHostsInfos.insert(info); + + host = info->GetCurrentHost(); + if (host.empty()) { + SLSResponse response; + response.mErrorCode = LOGE_REQUEST_ERROR; + response.mErrorMsg = kNoHostErrorMsg; + return response; + } +#else + host = bufferMeta.project() + "." + bufferMeta.endpoint(); +#endif + + bool httpsFlag = SLSClientManager::GetInstance()->UsingHttps(region); + + RawDataType dataType; + if (bufferMeta.datatype() == 0) { + dataType = RawDataType::EVENT_GROUP_LIST; + } else { + dataType = RawDataType::EVENT_GROUP; + } + if (bufferMeta.has_telemetrytype() && bufferMeta.telemetrytype() == sls_logs::SLS_TELEMETRY_TYPE_METRICS) { + return PostMetricStoreLogs(accessKeyId, + accessKeySecret, + type, + host, + httpsFlag, + bufferMeta.project(), + bufferMeta.logstore(), + GetSLSCompressTypeString(bufferMeta.compresstype()), + logData, + bufferMeta.rawsize()); + } else { + return PostLogStoreLogs(accessKeyId, + accessKeySecret, + type, + host, + httpsFlag, + bufferMeta.project(), + bufferMeta.logstore(), + GetSLSCompressTypeString(bufferMeta.compresstype()), + dataType, + logData, + bufferMeta.rawsize(), + bufferMeta.has_shardhashkey() ? bufferMeta.shardhashkey() : ""); } } -} // namespace logtail \ No newline at end of file +} // namespace logtail diff --git a/core/plugin/flusher/sls/DiskBufferWriter.h b/core/plugin/flusher/sls/DiskBufferWriter.h index 0ed9367c5e..6075ffa763 100644 --- a/core/plugin/flusher/sls/DiskBufferWriter.h +++ b/core/plugin/flusher/sls/DiskBufferWriter.h @@ -16,18 +16,27 @@ #pragma once +#include #include #include +#include #include +#include #include #include +#ifdef __ENTERPRIRSE__ +#include +#endif #include #include "common/SafeQueue.h" #include "pipeline/queue/SenderQueueItem.h" -#include "plugin/flusher/sls/SendResult.h" +#ifdef __ENTERPRISE__ +#include "plugin/flusher/sls/EnterpriseSLSClientManager.h" +#endif +#include "plugin/flusher/sls/SLSClientManager.h" +#include "plugin/flusher/sls/SLSResponse.h" #include "protobuf/sls/logtail_buffer_meta.pb.h" -#include "sdk/Client.h" namespace logtail { @@ -64,15 +73,8 @@ class DiskBufferWriter { void BufferWriterThread(); void BufferSenderThread(); - SendResult SendToNetSync(sdk::Client* sendClient, - const std::string& region, - const std::string& endpoint, - const sls_logs::LogtailBufferMeta& bufferMeta, - const std::string& logData, - std::string& errorCode); - SendResult SendBufferFileData(const sls_logs::LogtailBufferMeta& bufferMeta, - const std::string& logData, - std::string& errorCode); + SLSResponse + SendBufferFileData(const sls_logs::LogtailBufferMeta& bufferMeta, const std::string& logData, std::string& host); bool SendToBufferFile(SenderQueueItem* dataPtr); bool LoadFileToSend(time_t timeLine, std::vector& filesToSend); bool CreateNewFile(); @@ -100,12 +102,28 @@ class DiskBufferWriter { bool mIsSendBufferThreadRunning = true; mutable std::condition_variable mStopCV; +#ifdef __ENTERPRISE__ + struct PointerHash { + std::size_t operator()(const std::shared_ptr& ptr) const { + return std::hash()(ptr.get()); + } + }; + + struct PointerEqual { + bool operator()(const std::shared_ptr& lhs, + const std::shared_ptr& rhs) const { + return lhs.get() == rhs.get(); + } + }; + + std::unordered_set, PointerHash, PointerEqual> mCandidateHostsInfos; +#endif + mutable std::mutex mBufferFileLock; std::string mBufferFilePath; std::string mBufferFileName; volatile time_t mBufferDivideTime = 0; - // volatile bool mIsSendingBuffer = false; int64_t mCheckPeriod = 0; int64_t mSendLastTime = 0; diff --git a/core/sdk/Exception.h b/core/plugin/flusher/sls/Exception.h similarity index 100% rename from core/sdk/Exception.h rename to core/plugin/flusher/sls/Exception.h diff --git a/core/plugin/flusher/sls/FlusherSLS.cpp b/core/plugin/flusher/sls/FlusherSLS.cpp index 765318b77f..614f121700 100644 --- a/core/plugin/flusher/sls/FlusherSLS.cpp +++ b/core/plugin/flusher/sls/FlusherSLS.cpp @@ -21,6 +21,7 @@ #include "common/ParamExtractor.h" #include "common/TimeUtil.h" #include "common/compression/CompressorFactory.h" +#include "common/http/Constant.h" #include "sls_logs.pb.h" #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" @@ -30,13 +31,17 @@ #include "pipeline/queue/QueueKeyManager.h" #include "pipeline/queue/SLSSenderQueueItem.h" #include "pipeline/queue/SenderQueueManager.h" +#ifdef __ENTERPRISE__ +#include "plugin/flusher/sls/EnterpriseSLSClientManager.h" +#endif #include "plugin/flusher/sls/PackIdManager.h" #include "plugin/flusher/sls/SLSClientManager.h" +#include "plugin/flusher/sls/SLSConstant.h" #include "plugin/flusher/sls/SLSResponse.h" +#include "plugin/flusher/sls/SLSUtil.h" #include "plugin/flusher/sls/SendResult.h" #include "provider/Provider.h" #include "runner/FlusherRunner.h" -#include "sdk/Common.h" // TODO: temporarily used here #include "pipeline/PipelineManager.h" #include "plugin/flusher/sls/DiskBufferWriter.h" @@ -58,7 +63,6 @@ DEFINE_FLAG_INT32(unauthorized_allowed_delay_after_reset, "allowed delay to retr DEFINE_FLAG_INT32(discard_send_fail_interval, "discard data when send fail after 6 * 3600 seconds", 6 * 3600); DEFINE_FLAG_INT32(profile_data_send_retrytimes, "how many times should retry if profile data send fail", 5); DEFINE_FLAG_INT32(unknow_error_try_max, "discard data when try times > this value", 5); -DEFINE_FLAG_BOOL(global_network_success, "global network success flag, default false", false); DEFINE_FLAG_BOOL(enable_metricstore_channel, "only works for metrics data for enhance metrics query performance", true); DEFINE_FLAG_INT32(max_send_log_group_size, "bytes", 10 * 1024 * 1024); DEFINE_FLAG_DOUBLE(sls_serialize_size_expansion_ratio, "", 1.2); @@ -71,6 +75,8 @@ enum class OperationOnFail { RETRY_IMMEDIATELY, RETRY_LATER, DISCARD }; static const int ON_FAIL_LOG_WARNING_INTERVAL_SECOND = 10; +static constexpr int64_t kInvalidHashKeySeqID = 0; + static const char* GetOperationString(OperationOnFail op) { switch (op) { case OperationOnFail::RETRY_IMMEDIATELY: @@ -225,7 +231,6 @@ void FlusherSLS::SetDefaultRegion(const string& region) { mutex FlusherSLS::sProjectRegionMapLock; unordered_map FlusherSLS::sProjectRefCntMap; -unordered_map FlusherSLS::sRegionRefCntMap; unordered_map FlusherSLS::sProjectRegionMap; string FlusherSLS::GetAllProjects() { @@ -237,11 +242,6 @@ string FlusherSLS::GetAllProjects() { return result; } -bool FlusherSLS::IsRegionContainingConfig(const string& region) { - lock_guard lock(sProjectRegionMapLock); - return sRegionRefCntMap.find(region) != sRegionRefCntMap.end(); -} - std::string FlusherSLS::GetProjectRegion(const std::string& project) { lock_guard lock(sProjectRegionMapLock); auto iter = sProjectRegionMap.find(project); @@ -254,7 +254,6 @@ std::string FlusherSLS::GetProjectRegion(const std::string& project) { void FlusherSLS::IncreaseProjectRegionReferenceCnt(const string& project, const string& region) { lock_guard lock(sProjectRegionMapLock); ++sProjectRefCntMap[project]; - ++sRegionRefCntMap[region]; sProjectRegionMap[project] = region; } @@ -267,32 +266,6 @@ void FlusherSLS::DecreaseProjectRegionReferenceCnt(const string& project, const sProjectRegionMap.erase(project); } } - - auto regionRefCnt = sRegionRefCntMap.find(region); - if (regionRefCnt != sRegionRefCntMap.end()) { - if (--regionRefCnt->second == 0) { - sRegionRefCntMap.erase(regionRefCnt); - } - } -} - -mutex FlusherSLS::sRegionStatusLock; -unordered_map FlusherSLS::sAllRegionStatus; - -void FlusherSLS::UpdateRegionStatus(const string& region, bool status) { - LOG_DEBUG(sLogger, ("update region status, region", region)("is network in good condition", ToString(status))); - lock_guard lock(sRegionStatusLock); - sAllRegionStatus[region] = status; -} - -bool FlusherSLS::GetRegionStatus(const string& region) { - lock_guard lock(sRegionStatusLock); - auto rst = sAllRegionStatus.find(region); - if (rst == sAllRegionStatus.end()) { - return true; - } else { - return rst->second; - } } bool FlusherSLS::sIsResourceInited = false; @@ -303,10 +276,10 @@ const unordered_set FlusherSLS::sNativeParam = {"Project", "Logstore", "Region", "Endpoint", + "EndpointMode", "Aliuid", "CompressType", "TelemetryType", - "FlowControlExpireTime", "MaxSendRate", "ShardHashKeys", "Batch"}; @@ -341,26 +314,66 @@ bool FlusherSLS::Init(const Json::Value& config, Json::Value& optionalGoPipeline mContext->GetRegion()); } + // Region + if ( #ifdef __ENTERPRISE__ - if (EnterpriseConfigProvider::GetInstance()->IsDataServerPrivateCloud()) { - mRegion = STRING_FLAG(default_region_name); - } else { + !EnterpriseConfigProvider::GetInstance()->IsDataServerPrivateCloud() && #endif - // Region - if (!GetOptionalStringParam(config, "Region", mRegion, errorMsg)) { - PARAM_WARNING_DEFAULT(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - mRegion, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); - } + !GetOptionalStringParam(config, "Region", mRegion, errorMsg)) { + PARAM_WARNING_DEFAULT(mContext->GetLogger(), + mContext->GetAlarm(), + errorMsg, + mRegion, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } - // Endpoint #ifdef __ENTERPRISE__ + // Aliuid + if (!GetOptionalStringParam(config, "Aliuid", mAliuid, errorMsg)) { + PARAM_WARNING_IGNORE(mContext->GetLogger(), + mContext->GetAlarm(), + errorMsg, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } + + // EndpointMode + string endpointMode = "default"; + if (!GetOptionalStringParam(config, "EndpointMode", endpointMode, errorMsg)) { + PARAM_WARNING_DEFAULT(mContext->GetLogger(), + mContext->GetAlarm(), + errorMsg, + endpointMode, + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } + if (endpointMode == "accelerate") { + mEndpointMode = EndpointMode::ACCELERATE; + } else if (endpointMode != "default") { + PARAM_WARNING_DEFAULT(mContext->GetLogger(), + mContext->GetAlarm(), + "string param EndpointMode is not valid", + "default", + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); + } + if (mEndpointMode == EndpointMode::DEFAULT) { + // for local pipeline whose flusher region is neither specified in local info nor included by config provider, + // param Endpoint should be used, and the mode is set to default. + // warning: if inconsistency exists among configs, only the first config would be considered in this situation. if (!GetOptionalStringParam(config, "Endpoint", mEndpoint, errorMsg)) { PARAM_WARNING_IGNORE(mContext->GetLogger(), mContext->GetAlarm(), @@ -370,8 +383,17 @@ bool FlusherSLS::Init(const Json::Value& config, Json::Value& optionalGoPipeline mContext->GetProjectName(), mContext->GetLogstoreName(), mContext->GetRegion()); - } else { + } + EnterpriseSLSClientManager::GetInstance()->UpdateRemoteRegionEndpoints( + mRegion, {mEndpoint}, EnterpriseSLSClientManager::RemoteEndpointUpdateAction::CREATE); + } + mCandidateHostsInfo + = EnterpriseSLSClientManager::GetInstance()->GetCandidateHostsInfo(mRegion, mProject, mEndpointMode); + LOG_INFO(mContext->GetLogger(), + ("get candidate hosts info, region", mRegion)("project", mProject)("logstore", mLogstore)( + "endpoint mode", EndpointModeToString(mCandidateHostsInfo->GetMode()))); #else + // Endpoint if (!GetMandatoryStringParam(config, "Endpoint", mEndpoint, errorMsg)) { PARAM_ERROR_RETURN(mContext->GetLogger(), mContext->GetAlarm(), @@ -381,35 +403,22 @@ bool FlusherSLS::Init(const Json::Value& config, Json::Value& optionalGoPipeline mContext->GetProjectName(), mContext->GetLogstoreName(), mContext->GetRegion()); - } else { -#endif - mEndpoint = TrimString(mEndpoint); - if (!mEndpoint.empty()) { - SLSClientManager::GetInstance()->AddEndpointEntry(mRegion, - StandardizeEndpoint(mEndpoint, mEndpoint), - false, - SLSClientManager::EndpointSourceType::REMOTE); - } - } -#ifdef __ENTERPRISE__ } - - // Aliuid - if (!GetOptionalStringParam(config, "Aliuid", mAliuid, errorMsg)) { - PARAM_WARNING_IGNORE(mContext->GetLogger(), - mContext->GetAlarm(), - errorMsg, - sName, - mContext->GetConfigName(), - mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion()); + mEndpoint = TrimString(mEndpoint); + if (mEndpoint.empty()) { + PARAM_ERROR_RETURN(mContext->GetLogger(), + mContext->GetAlarm(), + "param Endpoint is empty", + sName, + mContext->GetConfigName(), + mContext->GetProjectName(), + mContext->GetLogstoreName(), + mContext->GetRegion()); } #endif // TelemetryType string telemetryType; - if (!GetOptionalStringParam(config, "TelemetryType", telemetryType, errorMsg)) { PARAM_WARNING_DEFAULT(mContext->GetLogger(), mContext->GetAlarm(), @@ -554,10 +563,8 @@ bool FlusherSLS::Init(const Json::Value& config, Json::Value& optionalGoPipeline bool FlusherSLS::Start() { Flusher::Start(); - InitResource(); IncreaseProjectRegionReferenceCnt(mProject, mRegion); - SLSClientManager::GetInstance()->IncreaseAliuidReferenceCntForRegion(mRegion, mAliuid); return true; } @@ -565,7 +572,6 @@ bool FlusherSLS::Stop(bool isPipelineRemoving) { Flusher::Stop(isPipelineRemoving); DecreaseProjectRegionReferenceCnt(mProject, mRegion); - SLSClientManager::GetInstance()->DecreaseAliuidReferenceCntForRegion(mRegion, mAliuid); return true; } @@ -592,71 +598,82 @@ bool FlusherSLS::FlushAll() { return SerializeAndPush(std::move(res)); } -bool FlusherSLS::BuildRequest(SenderQueueItem* item, unique_ptr& req, bool* keepItem) const { - auto data = static_cast(item); - sdk::Client* sendClient = SLSClientManager::GetInstance()->GetClient(mRegion, mAliuid); - - int32_t curTime = time(NULL); - static int32_t lastResetEndpointTime = 0; - data->mCurrentEndpoint = sendClient->GetRawSlsHost(); - if (data->mCurrentEndpoint.empty()) { - if (curTime - lastResetEndpointTime >= 30) { - SLSClientManager::GetInstance()->ResetClientEndpoint(mAliuid, mRegion, curTime); - data->mCurrentEndpoint = sendClient->GetRawSlsHost(); - lastResetEndpointTime = curTime; - } - } +bool FlusherSLS::BuildRequest(SenderQueueItem* item, unique_ptr& req, bool* keepItem, string* errMsg) { if (mSendCnt) { mSendCnt->Add(1); } - if (BOOL_FLAG(send_prefer_real_ip)) { - if (curTime - sendClient->GetSlsRealIpUpdateTime() >= INT32_FLAG(send_check_real_ip_interval)) { - SLSClientManager::GetInstance()->UpdateSendClientRealIp(sendClient, mRegion); + + SLSClientManager::AuthType type; + string accessKeyId, accessKeySecret; + if (!SLSClientManager::GetInstance()->GetAccessKey(mAliuid, type, accessKeyId, accessKeySecret)) { +#ifdef __ENTERPRISE__ + if (!EnterpriseSLSClientManager::GetInstance()->GetAccessKeyIfProjectSupportsAnonymousWrite( + mProject, type, accessKeyId, accessKeySecret)) { + *keepItem = true; + *errMsg = "failed to get access key"; + return false; } - data->mRealIpFlag = sendClient->GetRawSlsHostFlag(); +#endif } - if (data->mType == RawDataType::EVENT_GROUP) { - if (mTelemetryType == sls_logs::SLS_TELEMETRY_TYPE_METRICS) { - req = sendClient->CreatePostMetricStoreLogsRequest( - mProject, data->mLogstore, ConvertCompressType(GetCompressType()), data->mData, data->mRawSize, item); - } else { - if (data->mShardHashKey.empty()) { - req = sendClient->CreatePostLogStoreLogsRequest(mProject, - data->mLogstore, - ConvertCompressType(GetCompressType()), - data->mData, - data->mRawSize, - item); - } else { - auto& exactlyOnceCpt = data->mExactlyOnceCheckpoint; - int64_t hashKeySeqID = exactlyOnceCpt ? exactlyOnceCpt->data.sequence_id() : sdk::kInvalidHashKeySeqID; - req = sendClient->CreatePostLogStoreLogsRequest(mProject, - data->mLogstore, - ConvertCompressType(GetCompressType()), - data->mData, - data->mRawSize, - item, - data->mShardHashKey, - hashKeySeqID); + auto data = static_cast(item); +#ifdef __ENTERPRISE__ + if (BOOL_FLAG(send_prefer_real_ip)) { + data->mCurrentHost = EnterpriseSLSClientManager::GetInstance()->GetRealIp(mRegion); + if (data->mCurrentHost.empty()) { + auto info + = EnterpriseSLSClientManager::GetInstance()->GetCandidateHostsInfo(mRegion, mProject, mEndpointMode); + if (mCandidateHostsInfo.get() != info.get()) { + LOG_INFO(sLogger, + ("update candidate hosts info, region", mRegion)("project", mProject)("logstore", mLogstore)( + "from", EndpointModeToString(mCandidateHostsInfo->GetMode()))( + "to", EndpointModeToString(info->GetMode()))); + mCandidateHostsInfo = info; } + data->mCurrentHost = mCandidateHostsInfo->GetCurrentHost(); + data->mRealIpFlag = false; + } else { + data->mRealIpFlag = true; } } else { - if (data->mShardHashKey.empty()) - req = sendClient->CreatePostLogStoreLogPackageListRequest( - mProject, data->mLogstore, ConvertCompressType(GetCompressType()), data->mData, item); - else - req = sendClient->CreatePostLogStoreLogPackageListRequest(mProject, - data->mLogstore, - ConvertCompressType(GetCompressType()), - data->mData, - item, - data->mShardHashKey); + // in case local region endpoint mode is changed, we should always check before sending + auto info = EnterpriseSLSClientManager::GetInstance()->GetCandidateHostsInfo(mRegion, mProject, mEndpointMode); + if (mCandidateHostsInfo == nullptr) { + // TODO: temporarily used here, for send logtail alarm only, should be removed after alarm is refactored + mCandidateHostsInfo = info; + } + if (mCandidateHostsInfo.get() != info.get()) { + LOG_INFO(sLogger, + ("update candidate hosts info, region", mRegion)("project", mProject)("logstore", mLogstore)( + "from", EndpointModeToString(mCandidateHostsInfo->GetMode()))( + "to", EndpointModeToString(info->GetMode()))); + mCandidateHostsInfo = info; + } + data->mCurrentHost = mCandidateHostsInfo->GetCurrentHost(); } - if (!req) { + if (data->mCurrentHost.empty()) { + if (mCandidateHostsInfo->IsInitialized()) { + GetRegionConcurrencyLimiter(mRegion)->OnFail(chrono::system_clock::now()); + } + *errMsg = "failed to get available host"; *keepItem = true; return false; } +#else + static string host = mProject + "." + mEndpoint; + data->mCurrentHost = host; +#endif + + switch (mTelemetryType) { + case sls_logs::SLS_TELEMETRY_TYPE_LOGS: + req = CreatePostLogStoreLogsRequest(accessKeyId, accessKeySecret, type, data); + break; + case sls_logs::SLS_TELEMETRY_TYPE_METRICS: + req = CreatePostMetricStoreLogsRequest(accessKeyId, accessKeySecret, type, data); + break; + default: + break; + } return true; } @@ -664,31 +681,14 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) if (mSendDoneCnt) { mSendDoneCnt->Add(1); } - SLSResponse slsResponse; - if (AppConfig::GetInstance()->IsResponseVerificationEnabled() && !IsSLSResponse(response)) { - slsResponse.mStatusCode = 0; - slsResponse.mErrorCode = sdk::LOGE_REQUEST_ERROR; - slsResponse.mErrorMsg = "invalid response body"; - } else { - slsResponse.Parse(response); - - if (AppConfig::GetInstance()->EnableLogTimeAutoAdjust()) { - static uint32_t sCount = 0; - if (sCount++ % 10000 == 0 || slsResponse.mErrorCode == sdk::LOGE_REQUEST_TIME_EXPIRED) { - time_t serverTime = GetServerTime(response); - if (serverTime > 0) { - UpdateTimeDelta(serverTime); - } - } - } - } + SLSResponse slsResponse = ParseHttpResponse(response); auto data = static_cast(item); string configName = HasContext() ? GetContext().GetConfigName() : ""; bool isProfileData = GetProfileSender()->IsProfileData(mRegion, mProject, data->mLogstore); int32_t curTime = time(NULL); auto curSystemTime = chrono::system_clock::now(); - bool hasAuthError = false; + SendResult sendResult = SEND_OK; if (slsResponse.mStatusCode == 200) { auto& cpt = data->mExactlyOnceCheckpoint; if (cpt) { @@ -704,8 +704,8 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) + "ms")( "total send time", ToString(chrono::duration_cast(curSystemTime - item->mFirstEnqueTime).count()) - + "ms")("try cnt", data->mTryCnt)("endpoint", data->mCurrentEndpoint)("is profile data", - isProfileData)); + + "ms")("try cnt", data->mTryCnt)("endpoint", data->mCurrentHost)("is profile data", + isProfileData)); GetRegionConcurrencyLimiter(mRegion)->OnSuccess(curSystemTime); GetProjectConcurrencyLimiter(mProject)->OnSuccess(curSystemTime); GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnSuccess(curSystemTime); @@ -716,9 +716,8 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) DealSenderQueueItemAfterSend(item, false); } else { OperationOnFail operation; - SendResult sendResult = ConvertErrorCode(slsResponse.mErrorCode); + sendResult = ConvertErrorCode(slsResponse.mErrorCode); ostringstream failDetail, suggestion; - string failEndpoint = data->mCurrentEndpoint; if (sendResult == SEND_NETWORK_ERROR || sendResult == SEND_SERVER_ERROR) { if (sendResult == SEND_NETWORK_ERROR) { failDetail << "network error"; @@ -732,29 +731,19 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) } } suggestion << "check network connection to endpoint"; - if (BOOL_FLAG(send_prefer_real_ip) && data->mRealIpFlag) { +#ifdef __ENTERPRISE__ + if (data->mRealIpFlag) { // connect refused, use vip directly failDetail << ", real ip may be stale, force update"; - // just set force update flag - SLSClientManager::GetInstance()->ForceUpdateRealIp(mRegion); - } - if (sendResult == SEND_NETWORK_ERROR) { - // only set network stat when no real ip - if (!BOOL_FLAG(send_prefer_real_ip) || !data->mRealIpFlag) { - SLSClientManager::GetInstance()->UpdateEndpointStatus(mRegion, data->mCurrentEndpoint, false); - if (SLSClientManager::GetInstance()->GetServerSwitchPolicy() - == SLSClientManager::EndpointSwitchPolicy::DESIGNATED_FIRST) { - SLSClientManager::GetInstance()->ResetClientEndpoint(mAliuid, mRegion, curTime); - } - } + EnterpriseSLSClientManager::GetInstance()->UpdateOutdatedRealIpRegions(mRegion); } +#endif operation = data->mBufferOrNot ? OperationOnFail::RETRY_LATER : OperationOnFail::DISCARD; GetRegionConcurrencyLimiter(mRegion)->OnFail(curSystemTime); GetProjectConcurrencyLimiter(mProject)->OnSuccess(curSystemTime); GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnSuccess(curSystemTime); } else if (sendResult == SEND_QUOTA_EXCEED) { - BOOL_FLAG(global_network_success) = true; - if (slsResponse.mErrorCode == sdk::LOGE_SHARD_WRITE_QUOTA_EXCEED) { + if (slsResponse.mErrorCode == LOGE_SHARD_WRITE_QUOTA_EXCEED) { failDetail << "shard write quota exceed"; suggestion << "Split logstore shards. https://help.aliyun.com/zh/sls/user-guide/expansion-of-resources"; GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnFail(curSystemTime); @@ -786,8 +775,6 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) failDetail << "write unauthorized"; suggestion << "check access keys provided"; operation = OperationOnFail::RETRY_LATER; - BOOL_FLAG(global_network_success) = true; - hasAuthError = true; if (mUnauthErrorCnt) { mUnauthErrorCnt->Add(1); } @@ -837,7 +824,7 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) cpt->IncreaseSequenceID(); } while (0); } else if (AppConfig::GetInstance()->EnableLogTimeAutoAdjust() - && sdk::LOGE_REQUEST_TIME_EXPIRED == slsResponse.mErrorCode) { + && LOGE_REQUEST_TIME_EXPIRED == slsResponse.mErrorCode) { failDetail << "write request expired, will retry"; suggestion << "check local system time"; operation = OperationOnFail::RETRY_IMMEDIATELY; @@ -874,7 +861,7 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) ToString(chrono::duration_cast(curSystemTime - data->mLastSendTime).count()) \ + "ms")("total send time", \ ToString(chrono::duration_cast(curSystemTime - data->mFirstEnqueTime).count()) \ - + "ms")("endpoint", data->mCurrentEndpoint)("is profile data", isProfileData) + + "ms")("endpoint", data->mCurrentHost)("is profile data", isProfileData) switch (operation) { case OperationOnFail::RETRY_IMMEDIATELY: @@ -882,7 +869,7 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) FlusherRunner::GetInstance()->PushToHttpSink(item, false); break; case OperationOnFail::RETRY_LATER: - if (slsResponse.mErrorCode == sdk::LOGE_REQUEST_TIMEOUT + if (slsResponse.mErrorCode == LOGE_REQUEST_TIMEOUT || curTime - data->mLastLogWarningTime > ON_FAIL_LOG_WARNING_INTERVAL_SECOND) { LOG_WARNING(sLogger, LOG_PATTERN); data->mLastLogWarningTime = curTime; @@ -903,7 +890,7 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) + "\trequestId: " + slsResponse.mRequestId + "\tstatusCode: " + ToString(slsResponse.mStatusCode) + "\terrorCode: " + slsResponse.mErrorCode + "\terrorMessage: " + slsResponse.mErrorMsg - + "\tconfig: " + configName + "\tendpoint: " + data->mCurrentEndpoint, + + "\tconfig: " + configName + "\tendpoint: " + data->mCurrentHost, mProject, data->mLogstore, mRegion); @@ -913,10 +900,17 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) break; } } - SLSClientManager::GetInstance()->UpdateAccessKeyStatus(mAliuid, !hasAuthError); #ifdef __ENTERPRISE__ - static auto manager = static_cast(SLSClientManager::GetInstance()); - manager->UpdateProjectAnonymousWriteStatus(mProject, !hasAuthError); + bool hasNetworkError = (sendResult == SEND_NETWORK_ERROR || sendResult == SEND_SERVER_ERROR); + EnterpriseSLSClientManager::GetInstance()->UpdateHostStatus( + mProject, mCandidateHostsInfo->GetMode(), data->mCurrentHost, !hasNetworkError); + mCandidateHostsInfo->SelectBestHost(); + + if (!hasNetworkError) { + bool hasAuthError = sendResult == SEND_UNAUTHORIZED; + EnterpriseSLSClientManager::GetInstance()->UpdateAccessKeyStatus(mAliuid, !hasAuthError); + EnterpriseSLSClientManager::GetInstance()->UpdateProjectAnonymousWriteStatus(mProject, !hasAuthError); + } #endif } @@ -1171,7 +1165,7 @@ string FlusherSLS::GetShardHashKey(const BatchedEvents& g) const { key += "_"; } } - return sdk::CalcMD5(key); + return CalcMD5(key); } void FlusherSLS::AddPackId(BatchedEvents& g) const { @@ -1183,6 +1177,78 @@ void FlusherSLS::AddPackId(BatchedEvents& g) const { g.mTags.Insert(LOG_RESERVED_KEY_PACKAGE_ID, StringView(packId.data, packId.size)); } +unique_ptr FlusherSLS::CreatePostLogStoreLogsRequest(const string& accessKeyId, + const string& accessKeySecret, + SLSClientManager::AuthType type, + SLSSenderQueueItem* item) const { + optional seqId; + if (item->mExactlyOnceCheckpoint) { + seqId = item->mExactlyOnceCheckpoint->data.sequence_id(); + } + string path, query; + map header; + PreparePostLogStoreLogsRequest(accessKeyId, + accessKeySecret, + type, + item->mCurrentHost, + item->mRealIpFlag, + mProject, + item->mLogstore, + CompressTypeToString(mCompressor->GetCompressType()), + item->mType, + item->mData, + item->mRawSize, + item->mShardHashKey, + seqId, + path, + query, + header); + bool httpsFlag = SLSClientManager::GetInstance()->UsingHttps(mRegion); + return make_unique(HTTP_POST, + httpsFlag, + item->mCurrentHost, + httpsFlag ? 443 : 80, + path, + query, + header, + item->mData, + item, + INT32_FLAG(default_http_request_timeout_sec), + 1); +} + +unique_ptr FlusherSLS::CreatePostMetricStoreLogsRequest(const string& accessKeyId, + const string& accessKeySecret, + SLSClientManager::AuthType type, + SLSSenderQueueItem* item) const { + string path; + map header; + PreparePostMetricStoreLogsRequest(accessKeyId, + accessKeySecret, + type, + item->mCurrentHost, + item->mRealIpFlag, + mProject, + item->mLogstore, + CompressTypeToString(mCompressor->GetCompressType()), + item->mData, + item->mRawSize, + path, + header); + bool httpsFlag = SLSClientManager::GetInstance()->UsingHttps(mRegion); + return make_unique(HTTP_POST, + httpsFlag, + item->mCurrentHost, + httpsFlag ? 443 : 80, + path, + "", + header, + item->mData, + item, + INT32_FLAG(default_http_request_timeout_sec), + 1); +} + sls_logs::SlsCompressType ConvertCompressType(CompressType type) { sls_logs::SlsCompressType compressType = sls_logs::SLS_CMP_NONE; switch (type) { diff --git a/core/plugin/flusher/sls/FlusherSLS.h b/core/plugin/flusher/sls/FlusherSLS.h index 25291a7cc4..24c69629b9 100644 --- a/core/plugin/flusher/sls/FlusherSLS.h +++ b/core/plugin/flusher/sls/FlusherSLS.h @@ -30,7 +30,12 @@ #include "pipeline/batch/Batcher.h" #include "pipeline/limiter/ConcurrencyLimiter.h" #include "pipeline/plugin/interface/HttpFlusher.h" +#include "pipeline/queue/SLSSenderQueueItem.h" #include "pipeline/serializer/SLSSerializer.h" +#ifdef __ENTERPRISE__ +#include "plugin/flusher/sls/EnterpriseSLSClientManager.h" +#endif +#include "plugin/flusher/sls/SLSClientManager.h" #include "protobuf/sls/sls_logs.pb.h" namespace logtail { @@ -43,18 +48,14 @@ class FlusherSLS : public HttpFlusher { static std::shared_ptr GetRegionConcurrencyLimiter(const std::string& region); static void ClearInvalidConcurrencyLimiters(); + static void InitResource(); static void RecycleResourceIfNotUsed(); static std::string GetDefaultRegion(); static void SetDefaultRegion(const std::string& region); static std::string GetAllProjects(); - static bool IsRegionContainingConfig(const std::string& region); static std::string GetProjectRegion(const std::string& project); - // TODO: should be moved to enterprise config provider - static bool GetRegionStatus(const std::string& region); - static void UpdateRegionStatus(const std::string& region, bool status); - static const std::string sName; FlusherSLS(); @@ -66,7 +67,7 @@ class FlusherSLS : public HttpFlusher { bool Send(PipelineEventGroup&& g) override; bool Flush(size_t key) override; bool FlushAll() override; - bool BuildRequest(SenderQueueItem* item, std::unique_ptr& req, bool* keepItem) const override; + bool BuildRequest(SenderQueueItem* item, std::unique_ptr& req, bool* keepItem, std::string* errMsg) override; void OnSendDone(const HttpResponse& response, SenderQueueItem* item) override; CompressType GetCompressType() const { return mCompressor ? mCompressor->GetCompressType() : CompressType::NONE; } @@ -77,8 +78,11 @@ class FlusherSLS : public HttpFlusher { std::string mProject; std::string mLogstore; std::string mRegion; - std::string mEndpoint; std::string mAliuid; +#ifdef __ENTERPRISE__ + EndpointMode mEndpointMode = EndpointMode::DEFAULT; +#endif + std::string mEndpoint; sls_logs::SlsTelemetryType mTelemetryType = sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_LOGS; std::vector mShardHashKeys; uint32_t mMaxSendRate = 0; // preserved only for exactly once @@ -87,10 +91,6 @@ class FlusherSLS : public HttpFlusher { std::unique_ptr mCompressor; private: - static const std::unordered_set sNativeParam; - - static void InitResource(); - static void IncreaseProjectRegionReferenceCnt(const std::string& project, const std::string& region); static void DecreaseProjectRegionReferenceCnt(const std::string& project, const std::string& region); @@ -99,18 +99,15 @@ class FlusherSLS : public HttpFlusher { static std::unordered_map> sRegionConcurrencyLimiterMap; static std::unordered_map> sLogstoreConcurrencyLimiterMap; + static const std::unordered_set sNativeParam; + static std::mutex sDefaultRegionLock; static std::string sDefaultRegion; static std::mutex sProjectRegionMapLock; static std::unordered_map sProjectRefCntMap; - static std::unordered_map sRegionRefCntMap; static std::unordered_map sProjectRegionMap; - // TODO: should be moved to enterprise config provider - static std::mutex sRegionStatusLock; - static std::unordered_map sAllRegionStatus; - static bool sIsResourceInited; void GenerateGoPlugin(const Json::Value& config, Json::Value& res) const; @@ -121,9 +118,23 @@ class FlusherSLS : public HttpFlusher { std::string GetShardHashKey(const BatchedEvents& g) const; void AddPackId(BatchedEvents& g) const; + std::unique_ptr CreatePostLogStoreLogsRequest(const std::string& accessKeyId, + const std::string& accessKeySecret, + SLSClientManager::AuthType type, + SLSSenderQueueItem* item) const; + std::unique_ptr CreatePostMetricStoreLogsRequest(const std::string& accessKeyId, + const std::string& accessKeySecret, + SLSClientManager::AuthType type, + SLSSenderQueueItem* item) const; + Batcher mBatcher; std::unique_ptr mGroupSerializer; std::unique_ptr>> mGroupListSerializer; +#ifdef __ENTERPRISE__ + // This may not be cached. However, this provides a simple way to control the lifetime of a CandidateHostsInfo. + // Otherwise, timeout machanisim must be emplyed to clean up unused CandidateHostsInfo. + std::shared_ptr mCandidateHostsInfo; +#endif CounterPtr mSendCnt; CounterPtr mSendDoneCnt; diff --git a/core/plugin/flusher/sls/SLSClientManager.cpp b/core/plugin/flusher/sls/SLSClientManager.cpp index 7e7930a547..be9e57291e 100644 --- a/core/plugin/flusher/sls/SLSClientManager.cpp +++ b/core/plugin/flusher/sls/SLSClientManager.cpp @@ -18,658 +18,52 @@ #include #endif -#include - #include "app_config/AppConfig.h" -#include "common/EndpointUtil.h" #include "common/Flags.h" -#include "common/LogtailCommonFlags.h" +#include "common/HashUtil.h" #include "common/StringTools.h" -#include "common/TimeUtil.h" +#include "common/http/Constant.h" +#include "common/http/Curl.h" #include "common/version.h" #include "logger/Logger.h" #include "monitor/Monitor.h" #ifdef __ENTERPRISE__ #include "plugin/flusher/sls/EnterpriseSLSClientManager.h" #endif -#include "plugin/flusher/sls/FlusherSLS.h" -#include "plugin/flusher/sls/SendResult.h" -#include "sdk/Exception.h" - -// for windows compatability, to avoid conflict with the same function defined in windows.h -#ifdef SetPort -#undef SetPort -#endif +#include "plugin/flusher/sls/SLSConstant.h" +#include "plugin/flusher/sls/SLSUtil.h" -DEFINE_FLAG_STRING(data_endpoint_policy, - "policy for switching between data server endpoints, possible options include " - "'designated_first'(default) and 'designated_locked'", - "designated_first"); -DEFINE_FLAG_INT32(sls_host_update_interval, "seconds", 5); -DEFINE_FLAG_INT32(send_client_timeout_interval, "recycle clients avoid memory increment", 12 * 3600); -DEFINE_FLAG_INT32(test_network_normal_interval, "if last check is normal, test network again after seconds ", 30); -DEFINE_FLAG_INT32(test_unavailable_endpoint_interval, "test unavailable endpoint interval", 60); -DEFINE_FLAG_INT32(send_switch_real_ip_interval, "seconds", 60); -DEFINE_FLAG_BOOL(send_prefer_real_ip, "use real ip to send data", false); +DEFINE_FLAG_STRING(custom_user_agent, "custom user agent appended at the end of the exsiting ones", ""); DEFINE_FLAG_STRING(default_access_key_id, "", ""); DEFINE_FLAG_STRING(default_access_key, "", ""); -DEFINE_FLAG_STRING(custom_user_agent, "custom user agent appended at the end of the exsiting ones", ""); using namespace std; namespace logtail { -bool SLSClientManager::RegionEndpointsInfo::AddDefaultEndpoint(const std::string& endpoint, - const EndpointSourceType& endpointType, - bool& isDefault) { - if (mDefaultEndpoint.empty() - || (endpointType == EndpointSourceType::LOCAL && mDefaultEndpointType == EndpointSourceType::REMOTE)) { - mDefaultEndpoint = endpoint; - mDefaultEndpointType = endpointType; - isDefault = true; - } - return AddEndpoint(endpoint, true, false); -} - -bool SLSClientManager::RegionEndpointsInfo::AddEndpoint(const std::string& endpoint, bool status, bool proxy) { - if (mEndpointInfoMap.find(endpoint) == mEndpointInfoMap.end()) { - mEndpointInfoMap.emplace(std::make_pair(endpoint, EndpointInfo(status, proxy))); - return true; - } - return false; -} - -void SLSClientManager::RegionEndpointsInfo::UpdateEndpointInfo(const std::string& endpoint, - bool status, - std::optional latency, - bool createFlag) { - auto iter = mEndpointInfoMap.find(endpoint); - if (iter == mEndpointInfoMap.end()) { - if (createFlag) { - AddEndpoint(endpoint, status); - } - } else { - (iter->second).UpdateInfo(status, latency); - } -} - -void SLSClientManager::RegionEndpointsInfo::RemoveEndpoint(const std::string& endpoint) { - mEndpointInfoMap.erase(endpoint); - if (mDefaultEndpoint == endpoint) { - mDefaultEndpoint.clear(); - } -} - -std::string SLSClientManager::RegionEndpointsInfo::GetAvailableEndpointWithTopPriority() const { - if (!mDefaultEndpoint.empty()) { - auto iter = mEndpointInfoMap.find(mDefaultEndpoint); - if (iter != mEndpointInfoMap.end() && (iter->second).mValid) { - return mDefaultEndpoint; - } - } - std::string proxyEndpoint; - for (auto iter = mEndpointInfoMap.begin(); iter != mEndpointInfoMap.end(); ++iter) { - if (!(iter->second).mValid) { - continue; - } - if ((iter->second).mValid && !(iter->second).mProxy) { - return iter->first; - } - proxyEndpoint = iter->first; - } - if (!proxyEndpoint.empty()) { - return proxyEndpoint; - } - if (!mDefaultEndpoint.empty()) { - return mDefaultEndpoint; - } - if (!mEndpointInfoMap.empty()) { - return mEndpointInfoMap.begin()->first; - } - return mDefaultEndpoint; -} - SLSClientManager* SLSClientManager::GetInstance() { #ifdef __ENTERPRISE__ - static auto ptr = unique_ptr(new EnterpriseSLSClientManager()); + return EnterpriseSLSClientManager::GetInstance(); #else - static auto ptr = unique_ptr(new SLSClientManager()); + static SLSClientManager instance; + return &instance; #endif - return ptr.get(); } void SLSClientManager::Init() { - InitEndpointSwitchPolicy(); GenerateUserAgent(); - if (mDataServerSwitchPolicy == EndpointSwitchPolicy::DESIGNATED_FIRST) { - mProbeNetworkClient.reset(new sdk::Client("", - "", - INT32_FLAG(sls_client_send_timeout))); - mProbeNetworkClient->SetPort(AppConfig::GetInstance()->GetDataServerPort()); - mProbeNetworkThreadRes = async(launch::async, &SLSClientManager::ProbeNetworkThread, this); - } - if (BOOL_FLAG(send_prefer_real_ip)) { - mUpdateRealIpClient.reset(new sdk::Client("", - "", - INT32_FLAG(sls_client_send_timeout))); - mUpdateRealIpClient->SetPort(AppConfig::GetInstance()->GetDataServerPort()); - mUpdateRealIpThreadRes = async(launch::async, &SLSClientManager::UpdateRealIpThread, this); - } -} - -void SLSClientManager::Stop() { - if (mDataServerSwitchPolicy == EndpointSwitchPolicy::DESIGNATED_FIRST) { - lock_guard lock(mProbeNetworkThreadRunningMux); - mIsProbeNetworkThreadRunning = false; - } - if (BOOL_FLAG(send_prefer_real_ip)) { - lock_guard lock(mUpdateRealIpThreadRunningMux); - mIsUpdateRealIpThreadRunning = false; - } - mStopCV.notify_all(); - if (mDataServerSwitchPolicy == EndpointSwitchPolicy::DESIGNATED_FIRST && mProbeNetworkThreadRes.valid()) { - future_status s = mProbeNetworkThreadRes.wait_for(chrono::seconds(1)); - if (s == future_status::ready) { - LOG_INFO(sLogger, ("sls endpoint probe", "stopped successfully")); - } else { - LOG_WARNING(sLogger, ("sls endpoint probe", "forced to stopped")); - } - } - if (BOOL_FLAG(send_prefer_real_ip) && mUpdateRealIpThreadRes.valid()) { - future_status s = mUpdateRealIpThreadRes.wait_for(chrono::seconds(1)); - if (s == future_status::ready) { - LOG_INFO(sLogger, ("sls real ip update", "stopped successfully")); - } else { - LOG_WARNING(sLogger, ("sls real ip update", "forced to stopped")); - } - } -} - -void SLSClientManager::InitEndpointSwitchPolicy() { - if (STRING_FLAG(data_endpoint_policy) == "designated_locked") { - mDataServerSwitchPolicy = EndpointSwitchPolicy::DESIGNATED_LOCKED; - } else if (STRING_FLAG(data_endpoint_policy) == "designated_first") { - mDataServerSwitchPolicy = EndpointSwitchPolicy::DESIGNATED_FIRST; - } else { - LOG_WARNING(sLogger, - ("param data_endpoint_policy is invalid, action", "use default value instead")("default value", - "designated_first")); - } -} - -vector SLSClientManager::GetRegionAliuids(const std::string& region) { - lock_guard lock(mRegionAliuidRefCntMapLock); - vector aliuids; - for (const auto& item : mRegionAliuidRefCntMap[region]) { - aliuids.push_back(item.first); - } - return aliuids; -} - -void SLSClientManager::IncreaseAliuidReferenceCntForRegion(const std::string& region, const std::string& aliuid) { - lock_guard lock(mRegionAliuidRefCntMapLock); - ++mRegionAliuidRefCntMap[region][aliuid]; -} - -void SLSClientManager::DecreaseAliuidReferenceCntForRegion(const std::string& region, const std::string& aliuid) { - lock_guard lock(mRegionAliuidRefCntMapLock); - auto outerIter = mRegionAliuidRefCntMap.find(region); - if (outerIter == mRegionAliuidRefCntMap.end()) { - // should not happen - return; - } - auto innerIter = outerIter->second.find(aliuid); - if (innerIter == outerIter->second.end()) { - // should not happen - return; - } - if (--innerIter->second == 0) { - outerIter->second.erase(innerIter); - } - if (outerIter->second.empty()) { - mRegionAliuidRefCntMap.erase(outerIter); - } -} - -sdk::Client* SLSClientManager::GetClient(const string& region, const string& aliuid, bool createIfNotFound) { - string key = region + "_" + aliuid; - { - lock_guard lock(mClientMapMux); - auto iter = mClientMap.find(key); - if (iter != mClientMap.end()) { - (iter->second).second = time(NULL); - return (iter->second).first.get(); - } - } - if (!createIfNotFound) { - return nullptr; - } - - string endpoint = GetAvailableEndpointWithTopPriority(region); - auto client = make_unique(aliuid, - endpoint, - INT32_FLAG(sls_client_send_timeout)); - ResetClientPort(region, client.get()); - LOG_INFO(sLogger, - ("init endpoint for sender, region", region)("uid", aliuid)("hostname", GetHostFromEndpoint(endpoint))( - "use https", ToString(client->IsUsingHTTPS()))); - auto ptr = client.get(); - { - lock_guard lock(mClientMapMux); - mClientMap.insert(make_pair(key, make_pair(std::move(client), time(nullptr)))); - } - return ptr; -} - -bool SLSClientManager::ResetClientEndpoint(const string& aliuid, const string& region, time_t curTime) { - sdk::Client* sendClient = GetClient(region, aliuid, false); - if (sendClient == nullptr) { - return false; - } - if (curTime - sendClient->GetSlsHostUpdateTime() < INT32_FLAG(sls_host_update_interval)) { - return false; - } - sendClient->SetSlsHostUpdateTime(curTime); - string endpoint = GetAvailableEndpointWithTopPriority(region); - if (endpoint.empty()) { - return false; - } - string originalEndpoint = sendClient->GetRawSlsHost(); - if (originalEndpoint == endpoint) { - return false; - } - sendClient->SetSlsHost(endpoint); - ResetClientPort(region, sendClient); - LOG_INFO( - sLogger, - ("reset endpoint for sender, region", region)("uid", aliuid)("from", GetHostFromEndpoint(originalEndpoint))( - "to", GetHostFromEndpoint(endpoint))("use https", ToString(sendClient->IsUsingHTTPS()))); - return true; -} - -void SLSClientManager::ResetClientPort(const string& region, sdk::Client* sendClient) { - sendClient->SetPort(AppConfig::GetInstance()->GetDataServerPort()); - if (AppConfig::GetInstance()->GetDataServerPort() == 80) { - lock_guard lock(mRegionEndpointEntryMapLock); - auto iter = mRegionEndpointEntryMap.find(region); - if (iter != mRegionEndpointEntryMap.end()) { - const string& defaultEndpoint = iter->second.mDefaultEndpoint; - if (!defaultEndpoint.empty()) { - if (IsHttpsEndpoint(defaultEndpoint)) { - sendClient->SetPort(443); - } - } else { - if (IsHttpsEndpoint(sendClient->GetRawSlsHost())) { - sendClient->SetPort(443); - } - } - } - } -} - -void SLSClientManager::CleanTimeoutClient() { - lock_guard lock(mClientMapMux); - time_t curTime = time(nullptr); - for (auto iter = mClientMap.begin(); iter != mClientMap.end();) { - if ((curTime - (iter->second).second) > INT32_FLAG(send_client_timeout_interval)) { - iter = mClientMap.erase(iter); - } else { - ++iter; - } - } } -bool SLSClientManager::GetAccessKey(const std::string& aliuid, +bool SLSClientManager::GetAccessKey(const string& aliuid, AuthType& type, - std::string& accessKeyId, - std::string& accessKeySecret) { + string& accessKeyId, + string& accessKeySecret) { accessKeyId = STRING_FLAG(default_access_key_id); accessKeySecret = STRING_FLAG(default_access_key); type = AuthType::AK; return true; } -void SLSClientManager::AddEndpointEntry(const string& region, - const string& endpoint, - bool isProxy, - const EndpointSourceType& endpointType) { - lock_guard lock(mRegionEndpointEntryMapLock); - RegionEndpointsInfo& info = mRegionEndpointEntryMap[region]; - if (!isProxy) { - bool isDefault = false; - if (info.AddDefaultEndpoint(endpoint, endpointType, isDefault)) { - LOG_INFO(sLogger, - ("add data server endpoint, region", region)("endpoint", endpoint)( - "isDefault", isDefault ? "yes" : "no")("isProxy", "false")("#endpoint", - info.mEndpointInfoMap.size())); - } - } else { - if (info.AddEndpoint(endpoint, true, isProxy)) { - LOG_INFO(sLogger, - ("add data server endpoint, region", region)("endpoint", endpoint)("isProxy", ToString(isProxy))( - "#endpoint", info.mEndpointInfoMap.size())); - } - } -} - -void SLSClientManager::UpdateEndpointStatus(const string& region, - const string& endpoint, - bool status, - optional latency) { - lock_guard lock(mRegionEndpointEntryMapLock); - auto iter = mRegionEndpointEntryMap.find(region); - if (iter != mRegionEndpointEntryMap.end()) { - (iter->second).UpdateEndpointInfo(endpoint, status, latency, false); - } -} - -string SLSClientManager::GetAvailableEndpointWithTopPriority(const string& region) const { - static string emptyStr = ""; - lock_guard lock(mRegionEndpointEntryMapLock); - auto iter = mRegionEndpointEntryMap.find(region); - if (iter != mRegionEndpointEntryMap.end()) { - return (iter->second).GetAvailableEndpointWithTopPriority(); - } - return emptyStr; -} - -string SLSClientManager::GetRegionFromEndpoint(const string& endpoint) { - lock_guard lock(mRegionEndpointEntryMapLock); - for (auto iter = mRegionEndpointEntryMap.begin(); iter != mRegionEndpointEntryMap.end(); ++iter) { - for (auto epIter = ((iter->second).mEndpointInfoMap).begin(); epIter != ((iter->second).mEndpointInfoMap).end(); - ++epIter) { - if (epIter->first == endpoint) - return iter->first; - } - } - return STRING_FLAG(default_region_name); -} - -bool SLSClientManager::HasNetworkAvailable() { - static time_t lastCheckTime = time(nullptr); - time_t curTime = time(nullptr); - if (curTime - lastCheckTime >= 3600) { - lastCheckTime = curTime; - return true; - } - { - lock_guard lock(mRegionEndpointEntryMapLock); - for (auto iter = mRegionEndpointEntryMap.begin(); iter != mRegionEndpointEntryMap.end(); ++iter) { - for (auto epIter = ((iter->second).mEndpointInfoMap).begin(); - epIter != ((iter->second).mEndpointInfoMap).end(); - ++epIter) { - if ((epIter->second).mValid) { - return true; - } - } - } - } - return false; -} - -void SLSClientManager::ProbeNetworkThread() { - LOG_INFO(sLogger, ("sls endpoint probe", "started")); - // pair represents the weight of each endpoint - map>> unavaliableEndpoints; - set unavaliableRegions; - int32_t lastCheckAllTime = 0; - unique_lock lock(mProbeNetworkThreadRunningMux); - while (mIsProbeNetworkThreadRunning) { - unavaliableEndpoints.clear(); - unavaliableRegions.clear(); - { - lock_guard lock(mRegionEndpointEntryMapLock); - for (auto iter = mRegionEndpointEntryMap.begin(); iter != mRegionEndpointEntryMap.end(); ++iter) { - auto& endpoints = unavaliableEndpoints[iter->first]; - bool unavaliable = true; - for (auto epIter = ((iter->second).mEndpointInfoMap).begin(); - epIter != ((iter->second).mEndpointInfoMap).end(); - ++epIter) { - if (!(epIter->second).mValid) { - if (epIter->first == iter->second.mDefaultEndpoint) { - endpoints.emplace_back(0, epIter->first); - } else { - endpoints.emplace_back(10, epIter->first); - } - } else { - unavaliable = false; - } - } - sort(endpoints.begin(), endpoints.end()); - if (unavaliable) { - unavaliableRegions.insert(iter->first); - } - } - } - if (unavaliableEndpoints.empty()) { - if (mStopCV.wait_for(lock, chrono::seconds(INT32_FLAG(test_network_normal_interval)), [this]() { - return !mIsProbeNetworkThreadRunning; - })) { - break; - } - continue; - } - int32_t curTime = time(NULL); - // bool wakeUp = false; - for (const auto& value : unavaliableEndpoints) { - const string& region = value.first; - vector uids = GetRegionAliuids(region); - bool endpointChanged = false; - for (const auto& item : value.second) { - const string& endpoint = item.second; - const int32_t priority = item.first; - if (unavaliableRegions.find(region) == unavaliableRegions.end()) { - if (!endpointChanged && priority != 10) { - if (TestEndpoint(region, endpoint)) { - for (const auto& uid : uids) { - ResetClientEndpoint(uid, region, curTime); - } - endpointChanged = true; - } - } else { - if (curTime - lastCheckAllTime >= 1800) { - TestEndpoint(region, endpoint); - } - } - } else { - if (TestEndpoint(region, endpoint)) { - // wakeUp = true; - // Sender::GetInstance()->OnRegionRecover(region); - if (!endpointChanged) { - for (const auto& uid : uids) { - ResetClientEndpoint(uid, region, curTime); - } - endpointChanged = true; - } - } - } - } - } - // if (wakeUp && (!mIsSendingBuffer)) { - // mSenderQueue.Signal(); - // } - if (curTime - lastCheckAllTime >= 1800) { - lastCheckAllTime = curTime; - } - if (mStopCV.wait_for(lock, chrono::seconds(INT32_FLAG(test_unavailable_endpoint_interval)), [this]() { - return !mIsProbeNetworkThreadRunning; - })) { - break; - } - } -} - -bool SLSClientManager::TestEndpoint(const string& region, const string& endpoint) { - // TODO: this should be removed, since control-plane status is not the same as data-plane - if (!FlusherSLS::GetRegionStatus(region)) { - return false; - } - if (endpoint.empty()) { - return false; - } - mProbeNetworkClient->SetSlsHost(endpoint); - ResetClientPort(region, mProbeNetworkClient.get()); - - bool status = true; - uint64_t beginTime = GetCurrentTimeInMicroSeconds(); - try { - status = mProbeNetworkClient->TestNetwork(); - } catch (sdk::LOGException& ex) { - if (ConvertErrorCode(ex.GetErrorCode()) == SEND_NETWORK_ERROR) { - status = false; - } - } catch (...) { - LOG_ERROR(sLogger, ("test network", "send fail")("exception", "unknown")); - status = false; - } - uint32_t latency = (GetCurrentTimeInMicroSeconds() - beginTime) / 1000; - LOG_DEBUG(sLogger, ("TestEndpoint, region", region)("endpoint", endpoint)("status", status)("latency", latency)); - UpdateEndpointStatus(region, endpoint, status, latency); - return status; -} - -void SLSClientManager::ForceUpdateRealIp(const string& region) { - lock_guard lock(mRegionRealIpLock); - auto iter = mRegionRealIpMap.find(region); - if (iter != mRegionRealIpMap.end()) { - iter->second->mForceFlushFlag = true; - } -} - -void SLSClientManager::UpdateSendClientRealIp(sdk::Client* client, const string& region) { - string realIp; - RealIpInfo* pInfo = NULL; - { - lock_guard lock(mRegionRealIpLock); - auto iter = mRegionRealIpMap.find(region); - if (iter != mRegionRealIpMap.end()) { - pInfo = iter->second; - } else { - pInfo = new RealIpInfo; - mRegionRealIpMap.insert(make_pair(region, pInfo)); - } - realIp = pInfo->mRealIp; - } - if (!realIp.empty()) { - client->SetSlsHost(realIp); - client->SetSlsRealIpUpdateTime(time(NULL)); - } else if (pInfo->mLastUpdateTime >= client->GetSlsRealIpUpdateTime()) { - const string& defaultEndpoint = GetAvailableEndpointWithTopPriority(region); - if (!defaultEndpoint.empty()) { - client->SetSlsHost(defaultEndpoint); - client->SetSlsRealIpUpdateTime(time(NULL)); - } - } -} - -void SLSClientManager::UpdateRealIpThread() { - LOG_INFO(sLogger, ("sls real ip update", "started")); - int32_t lastUpdateRealIpTime = 0; - vector regionEndpointArray; - vector regionArray; - unique_lock lock(mUpdateRealIpThreadRunningMux); - while (mIsUpdateRealIpThreadRunning) { - int32_t curTime = time(NULL); - bool updateFlag = curTime - lastUpdateRealIpTime > INT32_FLAG(send_switch_real_ip_interval); - { - // check force update - lock_guard lock(mRegionRealIpLock); - auto iter = mRegionRealIpMap.begin(); - for (; iter != mRegionRealIpMap.end(); ++iter) { - if (iter->second->mForceFlushFlag) { - iter->second->mForceFlushFlag = false; - updateFlag = true; - LOG_INFO(sLogger, ("force update real ip", iter->first)); - } - } - } - if (updateFlag) { - LOG_DEBUG(sLogger, ("start update real ip", "")); - regionEndpointArray.clear(); - regionArray.clear(); - { - lock_guard lock(mRegionEndpointEntryMapLock); - auto iter = mRegionEndpointEntryMap.begin(); - for (; iter != mRegionEndpointEntryMap.end(); ++iter) { - regionEndpointArray.push_back((iter->second).GetAvailableEndpointWithTopPriority()); - regionArray.push_back(iter->first); - } - } - for (size_t i = 0; i < regionEndpointArray.size(); ++i) { - // no available endpoint - if (regionEndpointArray[i].empty()) { - continue; - } - - EndpointStatus status = UpdateRealIp(regionArray[i], regionEndpointArray[i]); - if (status == EndpointStatus::STATUS_ERROR) { - UpdateEndpointStatus(regionArray[i], regionEndpointArray[i], false); - } - } - lastUpdateRealIpTime = time(NULL); - } - if (mStopCV.wait_for(lock, chrono::seconds(1), [this]() { return !mIsUpdateRealIpThreadRunning; })) { - break; - } - } -} - -SLSClientManager::EndpointStatus SLSClientManager::UpdateRealIp(const string& region, const string& endpoint) { - mUpdateRealIpClient->SetSlsHost(endpoint); - EndpointStatus status = EndpointStatus::STATUS_ERROR; - int64_t beginTime = GetCurrentTimeInMicroSeconds(); - try { - sdk::GetRealIpResponse rsp; - rsp = mUpdateRealIpClient->GetRealIp(); - - if (!rsp.realIp.empty()) { - SetRealIp(region, rsp.realIp); - status = EndpointStatus::STATUS_OK_WITH_IP; - } else { - status = EndpointStatus::STATUS_OK_WITH_ENDPOINT; - static int32_t sUpdateRealIpWarningCount = 0; - if (sUpdateRealIpWarningCount++ % 100 == 0) { - sUpdateRealIpWarningCount %= 100; - LOG_WARNING(sLogger, - ("get real ip request succeeded but server did not give real ip, region", - region)("endpoint", endpoint)); - } - - // we should set real ip to empty string if server did not give real ip - SetRealIp(region, ""); - } - } - // GetRealIp's implement should not throw LOGException, but we catch it to hold implement changing - catch (sdk::LOGException& ex) { - const string& errorCode = ex.GetErrorCode(); - LOG_DEBUG(sLogger, ("get real ip", "send fail")("errorCode", errorCode)("errorMessage", ex.GetMessage())); - SendResult sendRst = ConvertErrorCode(errorCode); - if (sendRst == SEND_NETWORK_ERROR) - status = EndpointStatus::STATUS_ERROR; - } catch (...) { - LOG_ERROR(sLogger, ("get real ip", "send fail")("exception", "unknown")); - } - int64_t endTime = GetCurrentTimeInMicroSeconds(); - int32_t latency = int32_t((endTime - beginTime) / 1000); // ms - LOG_DEBUG(sLogger, - ("Get real ip, region", region)("endpoint", endpoint)("status", int(status))("latency", latency)); - return status; -} - -void SLSClientManager::SetRealIp(const string& region, const string& ip) { - lock_guard lock(mRegionRealIpLock); - RealIpInfo* pInfo = NULL; - auto iter = mRegionRealIpMap.find(region); - if (iter != mRegionRealIpMap.end()) { - pInfo = iter->second; - } else { - pInfo = new RealIpInfo; - mRegionRealIpMap.insert(make_pair(region, pInfo)); - } - LOG_DEBUG(sLogger, ("set real ip, last", pInfo->mRealIp)("now", ip)("region", region)); - pInfo->SetRealIp(ip); -} - void SLSClientManager::GenerateUserAgent() { string os; #if defined(__linux__) @@ -714,7 +108,7 @@ string SLSClientManager::GetRunningEnvironment() { // containers in K8S will possess the above env if (AppConfig::GetInstance()->IsPurageContainerMode()) { env = "K8S-Daemonset"; - } else if (TryCurlEndpoint("http://100.100.100.200/latest/meta-data")) { + } else if (PingEndpoint("100.100.100.200", "/latest/meta-data")) { // containers in ACK can be connected to the above address, see // https://help.aliyun.com/document_detail/108460.html#section-akf-lwh-1gb. // Note: we can not distinguish ACK from K8S built on ECS @@ -724,7 +118,7 @@ string SLSClientManager::GetRunningEnvironment() { } } else if (AppConfig::GetInstance()->IsPurageContainerMode() || getenv("ALIYUN_LOGTAIL_CONFIG")) { env = "Docker"; - } else if (TryCurlEndpoint("http://100.100.100.200/latest/meta-data")) { + } else if (PingEndpoint("100.100.100.200", "/latest/meta-data")) { env = "ECS"; } else { env = "Others"; @@ -732,37 +126,207 @@ string SLSClientManager::GetRunningEnvironment() { return env; } -bool SLSClientManager::TryCurlEndpoint(const string& endpoint) { - CURL* curl; - for (size_t retryTimes = 1; retryTimes <= 5; retryTimes++) { - curl = curl_easy_init(); - if (curl) { - break; - } - this_thread::sleep_for(chrono::seconds(1)); +bool SLSClientManager::PingEndpoint(const string& host, const string& path) { + map header; + HttpResponse response; + return SendHttpRequest(make_unique(HTTP_GET, false, host, 80, path, "", header, "", 3, 1, true), + response); +} + +void PreparePostLogStoreLogsRequest(const string& accessKeyId, + const string& accessKeySecret, + SLSClientManager::AuthType type, + const string& host, + bool isHostIp, + const string& project, + const string& logstore, + const string& compressType, + RawDataType dataType, + const string& body, + size_t rawSize, + const string& shardHashKey, + optional seqId, + string& path, + string& query, + map& header) { + path = LOGSTORES; + path.append("/").append(logstore); + if (shardHashKey.empty()) { + path.append("/shards/lb"); + } else { + path.append("/shards/route"); } - if (curl) { - curl_easy_setopt(curl, CURLOPT_URL, endpoint.c_str()); - curl_easy_setopt(curl, CURLOPT_NOBODY, 1L); - curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); - curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); - curl_easy_setopt(curl, CURLOPT_TIMEOUT, 3); - curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L); - curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0L); + if (isHostIp) { + header[HOST] = project + "." + host; + } else { + header[HOST] = host; + } + header[USER_AGENT] = SLSClientManager::GetInstance()->GetUserAgent(); + header[DATE] = GetDateString(); + header[CONTENT_TYPE] = TYPE_LOG_PROTOBUF; + header[CONTENT_LENGTH] = to_string(body.size()); + header[CONTENT_MD5] = CalcMD5(body); + header[X_LOG_APIVERSION] = LOG_API_VERSION; + header[X_LOG_SIGNATUREMETHOD] = HMAC_SHA1; + if (!compressType.empty()) { + header[X_LOG_COMPRESSTYPE] = compressType; + } + if (dataType == RawDataType::EVENT_GROUP) { + header[X_LOG_BODYRAWSIZE] = to_string(rawSize); + } else { + header[X_LOG_BODYRAWSIZE] = to_string(body.size()); + header[X_LOG_MODE] = LOG_MODE_BATCH_GROUP; + } + if (type == SLSClientManager::AuthType::ANONYMOUS) { + header[X_LOG_KEYPROVIDER] = MD5_SHA1_SALT_KEYPROVIDER; + } - if (curl_easy_perform(curl) != CURLE_OK) { - curl_easy_cleanup(curl); - return false; + map parameterList; + if (!shardHashKey.empty()) { + parameterList["key"] = shardHashKey; + if (seqId.has_value()) { + parameterList["seqid"] = to_string(seqId.value()); } - curl_easy_cleanup(curl); - return true; } - - LOG_WARNING( - sLogger, - ("curl handler cannot be initialized during user environment identification", "user agent may be mislabeled")); - return false; + query = GetQueryString(parameterList); + + string signature = GetUrlSignature(HTTP_POST, path, header, parameterList, body, accessKeySecret); + header[AUTHORIZATION] = LOG_HEADSIGNATURE_PREFIX + accessKeyId + ':' + signature; +} + +void PreparePostMetricStoreLogsRequest(const string& accessKeyId, + const string& accessKeySecret, + SLSClientManager::AuthType type, + const string& host, + bool isHostIp, + const string& project, + const string& logstore, + const string& compressType, + const string& body, + size_t rawSize, + string& path, + map& header) { + path = METRICSTORES; + path.append("/").append(project).append("/").append(logstore).append("/api/v1/write"); + + if (isHostIp) { + header[HOST] = project + "." + host; + } else { + header[HOST] = host; + } + header[USER_AGENT] = SLSClientManager::GetInstance()->GetUserAgent(); + header[DATE] = GetDateString(); + header[CONTENT_TYPE] = TYPE_LOG_PROTOBUF; + header[CONTENT_LENGTH] = to_string(body.size()); + header[CONTENT_MD5] = CalcMD5(body); + header[X_LOG_APIVERSION] = LOG_API_VERSION; + header[X_LOG_SIGNATUREMETHOD] = HMAC_SHA1; + if (!compressType.empty()) { + header[X_LOG_COMPRESSTYPE] = compressType; + } + header[X_LOG_BODYRAWSIZE] = to_string(rawSize); + if (type == SLSClientManager::AuthType::ANONYMOUS) { + header[X_LOG_KEYPROVIDER] = MD5_SHA1_SALT_KEYPROVIDER; + } + + map parameterList; + string signature = GetUrlSignature(HTTP_POST, path, header, parameterList, body, accessKeySecret); + header[AUTHORIZATION] = LOG_HEADSIGNATURE_PREFIX + accessKeyId + ':' + signature; +} + +SLSResponse PostLogStoreLogs(const string& accessKeyId, + const string& accessKeySecret, + SLSClientManager::AuthType type, + const string& host, + bool httpsFlag, + const string& project, + const string& logstore, + const string& compressType, + RawDataType dataType, + const string& body, + size_t rawSize, + const string& shardHashKey) { + string path, query; + map header; + PreparePostLogStoreLogsRequest(accessKeyId, + accessKeySecret, + type, + host, + false, // sync request always uses vip + project, + logstore, + compressType, + dataType, + body, + rawSize, + shardHashKey, + nullopt, // sync request does not support exactly-once + path, + query, + header); + HttpResponse response; + SendHttpRequest( + make_unique(HTTP_POST, httpsFlag, host, httpsFlag ? 443 : 80, path, query, header, body), + response); + return ParseHttpResponse(response); +} + +SLSResponse PostMetricStoreLogs(const string& accessKeyId, + const string& accessKeySecret, + SLSClientManager::AuthType type, + const string& host, + bool httpsFlag, + const string& project, + const string& logstore, + const string& compressType, + const string& body, + size_t rawSize) { + string path; + map header; + PreparePostMetricStoreLogsRequest(accessKeyId, + accessKeySecret, + type, + host, + false, // sync request always uses vip + project, + logstore, + compressType, + body, + rawSize, + path, + header); + HttpResponse response; + SendHttpRequest(make_unique(HTTP_POST, httpsFlag, host, httpsFlag ? 443 : 80, path, "", header, body), + response); + return ParseHttpResponse(response); +} + +SLSResponse PutWebTracking(const string& host, + bool httpsFlag, + const string& logstore, + const string& compressType, + const string& body, + size_t rawSize) { + string path = LOGSTORES; + path.append("/").append(logstore).append("/track"); + + map header; + header[HOST] = host; + header[USER_AGENT] = SLSClientManager::GetInstance()->GetUserAgent(); + header[DATE] = GetDateString(); + header[CONTENT_LENGTH] = to_string(body.size()); + header[X_LOG_APIVERSION] = LOG_API_VERSION; + // header[X_LOG_SIGNATUREMETHOD] = HMAC_SHA1; + if (!compressType.empty()) { + header[X_LOG_COMPRESSTYPE] = compressType; + } + header[X_LOG_BODYRAWSIZE] = to_string(rawSize); + + HttpResponse response; + SendHttpRequest(make_unique(HTTP_POST, httpsFlag, host, httpsFlag ? 443 : 80, path, "", header, body), + response); + return ParseHttpResponse(response); } } // namespace logtail diff --git a/core/plugin/flusher/sls/SLSClientManager.h b/core/plugin/flusher/sls/SLSClientManager.h index c82a8a8cb1..a49f3123a9 100644 --- a/core/plugin/flusher/sls/SLSClientManager.h +++ b/core/plugin/flusher/sls/SLSClientManager.h @@ -16,24 +16,18 @@ #pragma once -#include #include -#include -#include -#include +#include #include #include -#include -#include -#include "sdk/Client.h" +#include "pipeline/queue/SenderQueueItem.h" +#include "plugin/flusher/sls/SLSResponse.h" namespace logtail { class SLSClientManager { public: - enum class EndpointSourceType { LOCAL, REMOTE }; - enum class EndpointSwitchPolicy { DESIGNATED_FIRST, DESIGNATED_LOCKED }; enum class AuthType { ANONYMOUS, AK }; virtual ~SLSClientManager() = default; @@ -42,131 +36,87 @@ class SLSClientManager { static SLSClientManager* GetInstance(); - void Init(); - void Stop(); + virtual void Init(); + virtual void Stop() {}; - EndpointSwitchPolicy GetServerSwitchPolicy() const { return mDataServerSwitchPolicy; } const std::string& GetUserAgent() const { return mUserAgent; } - void IncreaseAliuidReferenceCntForRegion(const std::string& region, const std::string& aliuid); - void DecreaseAliuidReferenceCntForRegion(const std::string& region, const std::string& aliuid); - - sdk::Client* GetClient(const std::string& region, const std::string& aliuid, bool createIfNotFound = true); - bool ResetClientEndpoint(const std::string& aliuid, const std::string& region, time_t curTime); - void CleanTimeoutClient(); virtual bool GetAccessKey(const std::string& aliuid, AuthType& type, std::string& accessKeyId, std::string& accessKeySecret); - virtual void UpdateAccessKeyStatus(const std::string& aliuid, bool success) {} - - void AddEndpointEntry(const std::string& region, - const std::string& endpoint, - bool isProxy, - const EndpointSourceType& endpointType); - void UpdateEndpointStatus(const std::string& region, - const std::string& endpoint, - bool status, - std::optional latency = std::optional()); - void ForceUpdateRealIp(const std::string& region); - void UpdateSendClientRealIp(sdk::Client* client, const std::string& region); - - std::string GetRegionFromEndpoint(const std::string& endpoint); // for backward compatibility - bool HasNetworkAvailable(); // TODO: remove this function + virtual bool UsingHttps(const std::string& region) const { return true; } protected: SLSClientManager() = default; virtual std::string GetRunningEnvironment(); - bool TryCurlEndpoint(const std::string& endpoint); + bool PingEndpoint(const std::string& host, const std::string& path); std::string mUserAgent; private: - enum class EndpointStatus { STATUS_OK_WITH_IP, STATUS_OK_WITH_ENDPOINT, STATUS_ERROR }; - - struct EndpointInfo { - bool mValid = true; - std::optional mLatencyMs; - bool mProxy = false; - - EndpointInfo(bool valid, bool proxy) : mValid(valid), mProxy(proxy) {} - - void UpdateInfo(bool valid, std::optional latency) { - mValid = valid; - mLatencyMs = latency; - } - }; - - struct RegionEndpointsInfo { - std::unordered_map mEndpointInfoMap; - std::string mDefaultEndpoint; - EndpointSourceType mDefaultEndpointType; - - bool AddDefaultEndpoint(const std::string& endpoint, const EndpointSourceType& endpointType, bool& isDefault); - bool AddEndpoint(const std::string& endpoint, bool status, bool proxy = false); - void UpdateEndpointInfo(const std::string& endpoint, - bool status, - std::optional latency, - bool createFlag = true); - void RemoveEndpoint(const std::string& endpoint); - std::string GetAvailableEndpointWithTopPriority() const; - }; - - struct RealIpInfo { - std::string mRealIp; - time_t mLastUpdateTime = 0; - bool mForceFlushFlag = false; - - void SetRealIp(const std::string& realIp) { - mRealIp = realIp; - mLastUpdateTime = time(NULL); - mForceFlushFlag = false; - } - }; - virtual void GenerateUserAgent(); - void InitEndpointSwitchPolicy(); - std::vector GetRegionAliuids(const std::string& region); - - void ResetClientPort(const std::string& region, sdk::Client* sendClient); - std::string GetAvailableEndpointWithTopPriority(const std::string& region) const; - - void ProbeNetworkThread(); - bool TestEndpoint(const std::string& region, const std::string& endpoint); - - void UpdateRealIpThread(); - EndpointStatus UpdateRealIp(const std::string& region, const std::string& endpoint); - void SetRealIp(const std::string& region, const std::string& ip); - - mutable std::mutex mRegionAliuidRefCntMapLock; - std::unordered_map> mRegionAliuidRefCntMap; - - mutable std::mutex mClientMapMux; - std::unordered_map, time_t>> mClientMap; - // int32_t mLastCheckSendClientTime; - - mutable std::mutex mRegionEndpointEntryMapLock; - std::unordered_map mRegionEndpointEntryMap; - EndpointSwitchPolicy mDataServerSwitchPolicy = EndpointSwitchPolicy::DESIGNATED_FIRST; - std::unique_ptr mProbeNetworkClient; - - std::future mProbeNetworkThreadRes; - mutable std::mutex mProbeNetworkThreadRunningMux; - bool mIsProbeNetworkThreadRunning = true; - - mutable std::mutex mRegionRealIpLock; - std::unordered_map mRegionRealIpMap; - std::unique_ptr mUpdateRealIpClient; - - std::future mUpdateRealIpThreadRes; - mutable std::mutex mUpdateRealIpThreadRunningMux; - bool mIsUpdateRealIpThreadRunning = true; - - mutable std::condition_variable mStopCV; #ifdef APSARA_UNIT_TEST_MAIN - friend class FlusherSLSUnittest; + friend class SLSClientManagerUnittest; #endif }; +void PreparePostLogStoreLogsRequest(const std::string& accessKeyId, + const std::string& accessKeySecret, + SLSClientManager::AuthType type, + const std::string& host, + bool isHostIp, + const std::string& project, + const std::string& logstore, + const std::string& compressType, + RawDataType dataType, + const std::string& body, + size_t rawSize, + const std::string& shardHashKey, + std::optional seqId, + std::string& path, + std::string& query, + std::map& header); +void PreparePostMetricStoreLogsRequest(const std::string& accessKeyId, + const std::string& accessKeySecret, + SLSClientManager::AuthType type, + const std::string& host, + bool isHostIp, + const std::string& project, + const std::string& logstore, + const std::string& compressType, + const std::string& body, + size_t rawSize, + std::string& path, + std::map& header); +SLSResponse PostLogStoreLogs(const std::string& accessKeyId, + const std::string& accessKeySecret, + SLSClientManager::AuthType type, + const std::string& host, + bool httpsFlag, + const std::string& project, + const std::string& logstore, + const std::string& compressType, + RawDataType dataType, + const std::string& body, + size_t rawSize, + const std::string& shardHashKey); +SLSResponse PostMetricStoreLogs(const std::string& accessKeyId, + const std::string& accessKeySecret, + SLSClientManager::AuthType type, + const std::string& host, + bool httpsFlag, + const std::string& project, + const std::string& logstore, + const std::string& compressType, + const std::string& body, + size_t rawSize); +SLSResponse PutWebTracking(const std::string& host, + bool httpsFlag, + const std::string& logstore, + const std::string& compressType, + const std::string& body, + size_t rawSize); + } // namespace logtail diff --git a/core/plugin/flusher/sls/SLSConstant.cpp b/core/plugin/flusher/sls/SLSConstant.cpp new file mode 100644 index 0000000000..d81dbf284e --- /dev/null +++ b/core/plugin/flusher/sls/SLSConstant.cpp @@ -0,0 +1,98 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "plugin/flusher/sls/SLSConstant.h" + +using namespace std; + +namespace logtail { + +const string LOGSTORES = "/logstores"; +const string METRICSTORES = "/prometheus"; +const string HEALTH = "/health"; + +const string LOGTAIL_USER_AGENT = "ali-log-logtail"; + +const string CONTENT_MD5 = "Content-MD5"; + +const string LOG_HEADER_PREFIX = "x-log-"; +const string LOG_OLD_HEADER_PREFIX = "x-sls-"; +const string ACS_HEADER_PREFIX = "x-acs-"; +const string X_LOG_KEYPROVIDER = "x-log-keyprovider"; +const string X_LOG_APIVERSION = "x-log-apiversion"; +const string X_LOG_COMPRESSTYPE = "x-log-compresstype"; +const string X_LOG_BODYRAWSIZE = "x-log-bodyrawsize"; +const string X_LOG_SIGNATUREMETHOD = "x-log-signaturemethod"; +const string X_LOG_MODE = "x-log-mode"; +const string X_LOG_HOSTIP = "x-log-hostip"; +const string X_LOG_REQUEST_ID = "x-log-requestid"; +const string HMAC_SHA1 = "hmac-sha1"; +const string LOG_HEADSIGNATURE_PREFIX = "LOG "; +const string LOG_API_VERSION = "0.6.0"; +const string LOG_MODE_BATCH_GROUP = "batch_group"; + +const string MD5_SHA1_SALT_KEYPROVIDER = "md5-sha1-salt"; + +const string LOGE_REQUEST_ERROR = "RequestError"; +const string LOGE_INVALID_HOST = "InvalidHost"; +const string LOGE_UNKNOWN_ERROR = "UnknownError"; +const string LOGE_NOT_IMPLEMENTED = "NotImplemented"; +const string LOGE_SERVER_BUSY = "ServerBusy"; +const string LOGE_INTERNAL_SERVER_ERROR = "InternalServerError"; +const string LOGE_RESPONSE_SIG_ERROR = "ResponseSignatureError"; +const string LOGE_PARAMETER_INVALID = "ParameterInvalid"; +const string LOGE_MISSING_PARAMETER = "MissingParameter"; +const string LOGE_INVALID_METHOD = "InvalidMethod"; +const string LOGE_BAD_RESPONSE = "BadResponse"; +const string LOGE_UNAUTHORIZED = "Unauthorized"; +const string LOGE_QUOTA_EXCEED = "ExceedQuota"; +const string LOGE_REQUEST_TIMEOUT = "RequestTimeout"; +const string LOGE_CLIENT_OPERATION_TIMEOUT = "ClientOpertaionTimeout"; +const string LOGE_CLIENT_NETWORK_ERROR = "ClientNetworkError"; +const string LOGE_USER_NOT_EXIST = "UserNotExist"; +const string LOGE_CATEGORY_NOT_EXIST = "CategoryNotExist"; +const string LOGE_TOPIC_NOT_EXIST = "TopicNotExist"; +const string LOGE_POST_BODY_INVALID = "PostBodyInvalid"; +const string LOGE_INVALID_CONTENTTYPE = "InvalidContentType"; +const string LOGE_INVALID_CONTENLENGTH = "InvalidContentLength"; +const string LOGE_INVALID_APIVERSION = "InvalidAPIVersion"; +const string LOGE_PROJECT_NOT_EXIST = "ProjectNotExist"; +const string LOGE_MACHINEGROUP_NOT_EXIST = "MachineGroupNotExist"; +const string LOGE_MACHINEGROUP_ALREADY_EXIST = "MachineGroupAlreadyExist"; +const string LOGE_CONFIG_NOT_EXIST = "ConfigNotExist"; +const string LOGE_CONFIG_ALREADY_EXIST = "ConfigAlreadyExist"; +const string LOGE_LOGSTORE_NOT_EXIST = "LogStoreNotExist"; +const string LOGE_INVALID_ACCESSKEYID = "InvalidAccessKeyId"; +const string LOGE_SIGNATURE_NOT_MATCH = "SignatureNotMatch"; +const string LOGE_PROJECT_FORBIDDEN = "ProjectForbidden"; +const string LOGE_WRITE_QUOTA_EXCEED = "WriteQuotaExceed"; +const string LOGE_READ_QUOTA_EXCEED = "ReadQuotaExceed"; +const string LOGE_REQUEST_TIME_EXPIRED = "RequestTimeExpired"; +const string LOGE_INVALID_REQUEST_TIME = "InvalidRequestTime"; +const string LOGE_POST_BODY_TOO_LARGE = "PostBodyTooLarge"; +const string LOGE_INVALID_TIME_RANGE = "InvalidTimeRange"; +const string LOGE_INVALID_REVERSE = "InvalidReverse"; +const string LOGE_LOGSTORE_WITHOUT_SHARD = "LogStoreWithoutShard"; +const string LOGE_SHARD_WRITE_QUOTA_EXCEED = "ShardWriteQuotaExceed"; +const string LOGE_SHARD_READ_QUOTA_EXCEED = "ShardReadQuotaExceed"; +const string LOGE_INVALID_SEQUENCE_ID = "InvalidSequenceId"; +const string LOGE_NOT_SUPPORTED_ACCEPT_CONTENT_TYPE = "InvalidAcceptContentType"; +const string LOGE_NOT_SUPPORTED_ACCEPT_ENCODING = "InvalidAcceptEncoding"; +const string LOGE_SHARD_NOT_EXIST = "ShardNotExist"; +const string LOGE_INVALID_CURSOR = "InvalidCursor"; + +const string LOG_ERROR_CODE = "errorCode"; +const string LOG_ERROR_MESSAGE = "errorMessage"; + +} // namespace logtail diff --git a/core/plugin/flusher/sls/SLSConstant.h b/core/plugin/flusher/sls/SLSConstant.h new file mode 100644 index 0000000000..5874d5f2ec --- /dev/null +++ b/core/plugin/flusher/sls/SLSConstant.h @@ -0,0 +1,101 @@ +/* + * Copyright 2024 iLogtail Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +namespace logtail { + +extern const std::string LOGSTORES; +extern const std::string METRICSTORES; +extern const std::string HEALTH; + +extern const std::string CONTENT_MD5; + +extern const std::string LOGTAIL_USER_AGENT; + +extern const std::string LOG_HEADER_PREFIX; +extern const std::string LOG_OLD_HEADER_PREFIX; +extern const std::string ACS_HEADER_PREFIX; +extern const std::string X_LOG_KEYPROVIDER; +extern const std::string X_LOG_APIVERSION; +extern const std::string X_LOG_COMPRESSTYPE; +extern const std::string X_LOG_BODYRAWSIZE; +extern const std::string X_LOG_SIGNATUREMETHOD; +extern const std::string X_LOG_MODE; +extern const std::string X_LOG_HOSTIP; +extern const std::string X_LOG_REQUEST_ID; + +extern const std::string LOG_HEADSIGNATURE_PREFIX; +extern const std::string LOG_API_VERSION; +extern const std::string LOG_MODE_BATCH_GROUP; +extern const std::string HMAC_SHA1; +extern const std::string MD5_SHA1_SALT_KEYPROVIDER; + +extern const std::string LOGE_REQUEST_ERROR; +extern const std::string LOGE_INVALID_HOST; +extern const std::string LOGE_UNKNOWN_ERROR; +extern const std::string LOGE_NOT_IMPLEMENTED; +extern const std::string LOGE_SERVER_BUSY; +extern const std::string LOGE_INTERNAL_SERVER_ERROR; +extern const std::string LOGE_RESPONSE_SIG_ERROR; +extern const std::string LOGE_PARAMETER_INVALID; +extern const std::string LOGE_MISSING_PARAMETER; +extern const std::string LOGE_INVALID_METHOD; +extern const std::string LOGE_INVALID_CONTENTTYPE; +extern const std::string LOGE_INVALID_CONTENTLENGTH; +extern const std::string LOGE_BAD_RESPONSE; +extern const std::string LOGE_UNAUTHORIZED; +extern const std::string LOGE_QUOTA_EXCEED; +extern const std::string LOGE_REQUEST_TIMEOUT; +extern const std::string LOGE_CLIENT_OPERATION_TIMEOUT; +extern const std::string LOGE_CLIENT_NETWORK_ERROR; +extern const std::string LOGE_USER_NOT_EXIST; +extern const std::string LOGE_CATEGORY_NOT_EXIST; +extern const std::string LOGE_TOPIC_NOT_EXIST; +extern const std::string LOGE_POST_BODY_INVALID; +extern const std::string LOGE_INVALID_HOST; +extern const std::string LOGE_INVALID_APIVERSION; +extern const std::string LOGE_PROJECT_NOT_EXIST; +extern const std::string LOGE_MACHINEGROUP_NOT_EXIST; +extern const std::string LOGE_MACHINEGROUP_ALREADY_EXIST; +extern const std::string LOGE_CONFIG_NOT_EXIST; +extern const std::string LOGE_CONFIG_ALREADY_EXIST; +extern const std::string LOGE_LOGSTORE_NOT_EXIST; +extern const std::string LOGE_INVALID_ACCESSKEYID; +extern const std::string LOGE_SIGNATURE_NOT_MATCH; +extern const std::string LOGE_PROJECT_FORBIDDEN; +extern const std::string LOGE_WRITE_QUOTA_EXCEED; +extern const std::string LOGE_READ_QUOTA_EXCEED; +extern const std::string LOGE_REQUEST_TIME_EXPIRED; +extern const std::string LOGE_INVALID_REQUEST_TIME; +extern const std::string LOGE_POST_BODY_TOO_LARGE; +extern const std::string LOGE_INVALID_TIME_RANGE; +extern const std::string LOGE_INVALID_REVERSE; +extern const std::string LOGE_LOGSTORE_WITHOUT_SHARD; +extern const std::string LOGE_INVALID_SEQUENCE_ID; +extern const std::string LOGE_NOT_SUPPORTED_ACCEPT_CONTENT_TYPE; +extern const std::string LOGE_NOT_SUPPORTED_ACCEPT_ENCODING; +extern const std::string LOGE_SHARD_NOT_EXIST; +extern const std::string LOGE_INVALID_CURSOR; +extern const std::string LOGE_SHARD_WRITE_QUOTA_EXCEED; +extern const std::string LOGE_SHARD_READ_QUOTA_EXCEED; + +extern const std::string LOG_ERROR_CODE; +extern const std::string LOG_ERROR_MESSAGE; + +} // namespace logtail diff --git a/core/plugin/flusher/sls/SLSResponse.cpp b/core/plugin/flusher/sls/SLSResponse.cpp index d9fa405479..e13847618a 100644 --- a/core/plugin/flusher/sls/SLSResponse.cpp +++ b/core/plugin/flusher/sls/SLSResponse.cpp @@ -14,31 +14,80 @@ #include "plugin/flusher/sls/SLSResponse.h" +#include + +#include "app_config/AppConfig.h" #include "common/ErrorUtil.h" #include "common/StringTools.h" #include "common/TimeUtil.h" #include "logger/Logger.h" -#include "sdk/Common.h" -#include "sdk/Exception.h" -#include "sdk/Result.h" +#include "plugin/flusher/sls/SLSConstant.h" +#include "Exception.h" using namespace std; +using namespace logtail::sdk; namespace logtail { +void ExtractJsonResult(const string& response, rapidjson::Document& document) { + document.Parse(response.c_str()); + if (document.HasParseError()) { + throw JsonException("ParseException", "Fail to parse from json string"); + } +} + +void JsonMemberCheck(const rapidjson::Value& value, const char* name) { + if (!value.IsObject()) { + throw JsonException("InvalidObjectException", "response is not valid JSON object"); + } + if (!value.HasMember(name)) { + throw JsonException("NoMemberException", string("Member ") + name + " does not exist"); + } +} + +void ExtractJsonResult(const rapidjson::Value& value, const char* name, string& dst) { + JsonMemberCheck(value, name); + if (value[name].IsString()) { + dst = value[name].GetString(); + } else { + throw JsonException("ValueTypeException", string("Member ") + name + " is not string type"); + } +} + +void ErrorCheck(const string& response, const string& requestId, const int32_t httpCode) { + rapidjson::Document document; + try { + ExtractJsonResult(response, document); + + string errorCode; + ExtractJsonResult(document, LOG_ERROR_CODE.c_str(), errorCode); + + string errorMessage; + ExtractJsonResult(document, LOG_ERROR_MESSAGE.c_str(), errorMessage); + + throw LOGException(errorCode, errorMessage, requestId, httpCode); + } catch (JsonException& e) { + if (httpCode >= 500) { + throw LOGException(LOGE_INTERNAL_SERVER_ERROR, response, requestId, httpCode); + } else { + throw LOGException(LOGE_BAD_RESPONSE, string("Unextractable error:") + response, requestId, httpCode); + } + } +} + bool SLSResponse::Parse(const HttpResponse& response) { - const auto iter = response.GetHeader().find(sdk::X_LOG_REQUEST_ID); + const auto iter = response.GetHeader().find(X_LOG_REQUEST_ID); if (iter != response.GetHeader().end()) { mRequestId = iter->second; } mStatusCode = response.GetStatusCode(); if (mStatusCode == 0) { - mErrorCode = sdk::LOGE_REQUEST_TIMEOUT; + mErrorCode = LOGE_REQUEST_TIMEOUT; mErrorMsg = "Request timeout"; } else if (mStatusCode != 200) { try { - sdk::ErrorCheck(*response.GetBody(), mRequestId, response.GetStatusCode()); + ErrorCheck(*response.GetBody(), mRequestId, response.GetStatusCode()); } catch (sdk::LOGException& e) { mErrorCode = e.GetErrorCode(); mErrorMsg = e.GetMessage_(); @@ -47,8 +96,30 @@ bool SLSResponse::Parse(const HttpResponse& response) { return true; } +SLSResponse ParseHttpResponse(const HttpResponse& response) { + SLSResponse slsResponse; + if (AppConfig::GetInstance()->IsResponseVerificationEnabled() && !IsSLSResponse(response)) { + slsResponse.mStatusCode = 0; + slsResponse.mErrorCode = LOGE_REQUEST_ERROR; + slsResponse.mErrorMsg = "invalid response body"; + } else { + slsResponse.Parse(response); + + if (AppConfig::GetInstance()->EnableLogTimeAutoAdjust()) { + static uint32_t sCount = 0; + if (sCount++ % 10000 == 0 || slsResponse.mErrorCode == LOGE_REQUEST_TIME_EXPIRED) { + time_t serverTime = GetServerTime(response); + if (serverTime > 0) { + UpdateTimeDelta(serverTime); + } + } + } + } + return slsResponse; +} + bool IsSLSResponse(const HttpResponse& response) { - const auto iter = response.GetHeader().find(sdk::X_LOG_REQUEST_ID); + const auto iter = response.GetHeader().find(X_LOG_REQUEST_ID); if (iter == response.GetHeader().end()) { return false; } diff --git a/core/plugin/flusher/sls/SLSResponse.h b/core/plugin/flusher/sls/SLSResponse.h index 1388c7ef56..887c6f07f1 100644 --- a/core/plugin/flusher/sls/SLSResponse.h +++ b/core/plugin/flusher/sls/SLSResponse.h @@ -17,6 +17,7 @@ #pragma once #include +#include #include #include "common/http/HttpResponse.h" @@ -32,6 +33,7 @@ struct SLSResponse { bool Parse(const HttpResponse& response); }; +SLSResponse ParseHttpResponse(const HttpResponse& response); bool IsSLSResponse(const HttpResponse& response); time_t GetServerTime(const HttpResponse& response); diff --git a/core/plugin/flusher/sls/SLSUtil.cpp b/core/plugin/flusher/sls/SLSUtil.cpp new file mode 100644 index 0000000000..21e30d4ce6 --- /dev/null +++ b/core/plugin/flusher/sls/SLSUtil.cpp @@ -0,0 +1,307 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "plugin/flusher/sls/SLSUtil.h" + +#include "app_config/AppConfig.h" +#include "common/EncodingUtil.h" +#include "common/HashUtil.h" +#include "common/TimeUtil.h" +#include "common/http/Constant.h" +#include "plugin/flusher/sls/SLSConstant.h" + +using namespace std; + +namespace logtail { + +static string DATE_FORMAT_RFC822 = "%a, %d %b %Y %H:%M:%S GMT"; + +#define BIT_COUNT_WORDS 2 +#define BIT_COUNT_BYTES (BIT_COUNT_WORDS * sizeof(uint32_t)) + +/* + * define the rotate left (circular left shift) operation + */ +#define rotl(v, b) (((v) << (b)) | ((v) >> (32 - (b)))) + +/* + * Define the basic SHA-1 functions F1 ~ F4. Note that the exclusive-OR + * operation (^) in F1 and F3 may be replaced by a bitwise OR operation + * (|), which produce identical results. + * + * F1 is used in ROUND 0~19, F2 is used in ROUND 20~39 + * F3 is used in ROUND 40~59, F4 is used in ROUND 60~79 + */ +#define F1(B, C, D) (((B) & (C)) ^ (~(B) & (D))) +#define F2(B, C, D) ((B) ^ (C) ^ (D)) +#define F3(B, C, D) (((B) & (C)) ^ ((B) & (D)) ^ ((C) & (D))) +#define F4(B, C, D) ((B) ^ (C) ^ (D)) + +/* + * Use different K in different ROUND + */ +#define K00_19 0x5A827999 +#define K20_39 0x6ED9EBA1 +#define K40_59 0x8F1BBCDC +#define K60_79 0xCA62C1D6 + +/* + * Another implementation of the ROUND transformation: + * (here the T is a temp variable) + * For t=0 to 79: + * { + * T=rotl(A,5)+Func(B,C,D)+K+W[t]+E; + * E=D; D=C; C=rotl(B,30); B=A; A=T; + * } + */ +#define ROUND(t, A, B, C, D, E, Func, K) \ + E += rotl(A, 5) + Func(B, C, D) + W[t] + K; \ + B = rotl(B, 30); + +#define ROUND5(t, Func, K) \ + ROUND(t, A, B, C, D, E, Func, K); \ + ROUND(t + 1, E, A, B, C, D, Func, K); \ + ROUND(t + 2, D, E, A, B, C, Func, K); \ + ROUND(t + 3, C, D, E, A, B, Func, K); \ + ROUND(t + 4, B, C, D, E, A, Func, K) + +#define ROUND20(t, Func, K) \ + ROUND5(t, Func, K); \ + ROUND5(t + 5, Func, K); \ + ROUND5(t + 10, Func, K); \ + ROUND5(t + 15, Func, K) + +/* + * Define constant of the initial vector + */ +const uint32_t SHA1::IV[SHA1_DIGEST_WORDS] = {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0}; + +/* + * the message must be the big-endian32 (or left-most word) + * before calling the transform() function. + */ +const static uint32_t iii = 1; +const static bool littleEndian = *(uint8_t*)&iii != 0; + +inline uint32_t littleEndianToBig(uint32_t d) { + uint8_t* data = (uint8_t*)&d; + return data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3]; +} + +inline void make_big_endian32(uint32_t* data, unsigned n) { + if (!littleEndian) { + return; + } + for (; n > 0; ++data, --n) { + *data = littleEndianToBig(*data); + } +} + +inline size_t min(size_t a, size_t b) { + return a < b ? a : b; +} + +void SHA1::transform() { + uint32_t W[80]; + memcpy(W, M, SHA1_INPUT_BYTES); + memset((uint8_t*)W + SHA1_INPUT_BYTES, 0, sizeof(W) - SHA1_INPUT_BYTES); + for (unsigned t = 16; t < 80; t++) { + W[t] = rotl(W[t - 16] ^ W[t - 14] ^ W[t - 8] ^ W[t - 3], 1); + } + + uint32_t A = H[0]; + uint32_t B = H[1]; + uint32_t C = H[2]; + uint32_t D = H[3]; + uint32_t E = H[4]; + + ROUND20(0, F1, K00_19); + ROUND20(20, F2, K20_39); + ROUND20(40, F3, K40_59); + ROUND20(60, F4, K60_79); + + H[0] += A; + H[1] += B; + H[2] += C; + H[3] += D; + H[4] += E; +} + +void SHA1::add(const uint8_t* data, size_t data_len) { + unsigned mlen = (unsigned)((bits >> 3) % SHA1_INPUT_BYTES); + bits += (uint64_t)data_len << 3; + unsigned use = (unsigned)min((size_t)(SHA1_INPUT_BYTES - mlen), data_len); + memcpy(M + mlen, data, use); + mlen += use; + + while (mlen == SHA1_INPUT_BYTES) { + data_len -= use; + data += use; + make_big_endian32((uint32_t*)M, SHA1_INPUT_WORDS); + transform(); + use = (unsigned)min((size_t)SHA1_INPUT_BYTES, data_len); + memcpy(M, data, use); + mlen = use; + } +} + +uint8_t* SHA1::result() { + unsigned mlen = (unsigned)((bits >> 3) % SHA1_INPUT_BYTES), padding = SHA1_INPUT_BYTES - mlen; + M[mlen++] = 0x80; + if (padding > BIT_COUNT_BYTES) { + memset(M + mlen, 0x00, padding - BIT_COUNT_BYTES); + make_big_endian32((uint32_t*)M, SHA1_INPUT_WORDS - BIT_COUNT_WORDS); + } else { + memset(M + mlen, 0x00, SHA1_INPUT_BYTES - mlen); + make_big_endian32((uint32_t*)M, SHA1_INPUT_WORDS); + transform(); + memset(M, 0x00, SHA1_INPUT_BYTES - BIT_COUNT_BYTES); + } + + uint64_t temp = littleEndian ? bits << 32 | bits >> 32 : bits; + memcpy(M + SHA1_INPUT_BYTES - BIT_COUNT_BYTES, &temp, BIT_COUNT_BYTES); + transform(); + make_big_endian32(H, SHA1_DIGEST_WORDS); + return (uint8_t*)H; +} + +template +inline void axor(T* p1, const T* p2, size_t len) { + for (; len != 0; --len) + *p1++ ^= *p2++; +} + +HMAC::HMAC(const uint8_t* key, size_t lkey) { + init(key, lkey); +} + +void HMAC::init(const uint8_t* key, size_t lkey) { + in.init(); + out.init(); + + uint8_t ipad[SHA1_INPUT_BYTES]; + uint8_t opad[SHA1_INPUT_BYTES]; + memset(ipad, 0x36, sizeof(ipad)); + memset(opad, 0x5c, sizeof(opad)); + + if (lkey <= SHA1_INPUT_BYTES) { + axor(ipad, key, lkey); + axor(opad, key, lkey); + } else { + SHA1 tmp; + tmp.add(key, lkey); + const uint8_t* key2 = tmp.result(); + axor(ipad, key2, SHA1_DIGEST_BYTES); + axor(opad, key2, SHA1_DIGEST_BYTES); + } + + in.add((uint8_t*)ipad, sizeof(ipad)); + out.add((uint8_t*)opad, sizeof(opad)); +} + +string GetDateString() { + time_t now_time; + time(&now_time); + if (AppConfig::GetInstance()->EnableLogTimeAutoAdjust()) { + now_time += GetTimeDelta(); + } + char buffer[128] = {'\0'}; + tm timeInfo; +#if defined(__linux__) + gmtime_r(&now_time, &timeInfo); +#elif defined(_MSC_VER) + gmtime_s(&timeInfo, &now_time); +#endif + strftime(buffer, 128, DATE_FORMAT_RFC822.c_str(), &timeInfo); + return string(buffer); +} + +static bool StartWith(const std::string& input, const std::string& pattern) { + if (input.length() < pattern.length()) { + return false; + } + + size_t i = 0; + while (i < pattern.length() && input[i] == pattern[i]) { + i++; + } + + return i == pattern.length(); +} + +static std::string CalcSHA1(const std::string& message, const std::string& key) { + HMAC hmac(reinterpret_cast(key.data()), key.size()); + hmac.add(reinterpret_cast(message.data()), message.size()); + return string(reinterpret_cast(hmac.result()), SHA1_DIGEST_BYTES); +} + +string GetUrlSignature(const string& httpMethod, + const string& operationType, + map& httpHeader, + const map& parameterList, + const string& content, + const string& signKey) { + string contentMd5; + string signature; + string osstream; + if (!content.empty()) { + contentMd5 = CalcMD5(content); + } + string contentType; + map::iterator iter = httpHeader.find(CONTENT_TYPE); + if (iter != httpHeader.end()) { + contentType = iter->second; + } + map endingMap; + osstream.append(httpMethod); + osstream.append("\n"); + osstream.append(contentMd5); + osstream.append("\n"); + osstream.append(contentType); + osstream.append("\n"); + osstream.append(httpHeader[DATE]); + osstream.append("\n"); + for (map::const_iterator iter = httpHeader.begin(); iter != httpHeader.end(); ++iter) { + if (StartWith(iter->first, LOG_OLD_HEADER_PREFIX)) { + string key = iter->first; + endingMap.insert(make_pair(key.replace(0, LOG_OLD_HEADER_PREFIX.size(), LOG_HEADER_PREFIX), iter->second)); + } else if (StartWith(iter->first, LOG_HEADER_PREFIX) || StartWith(iter->first, ACS_HEADER_PREFIX)) { + endingMap.insert(make_pair(iter->first, iter->second)); + } + } + for (map::const_iterator it = endingMap.begin(); it != endingMap.end(); ++it) { + osstream.append(it->first); + osstream.append(":"); + osstream.append(it->second); + osstream.append("\n"); + } + osstream.append(operationType); + if (parameterList.size() > 0) { + osstream.append("?"); + for (map::const_iterator iter = parameterList.begin(); iter != parameterList.end(); ++iter) { + if (iter != parameterList.begin()) { + osstream.append("&"); + } + osstream.append(iter->first); + osstream.append("="); + osstream.append(iter->second); + } + } + + signature = Base64Enconde(CalcSHA1(osstream, signKey)); + + return signature; +} + +} // namespace logtail diff --git a/core/plugin/flusher/sls/SLSUtil.h b/core/plugin/flusher/sls/SLSUtil.h new file mode 100644 index 0000000000..a30288c90a --- /dev/null +++ b/core/plugin/flusher/sls/SLSUtil.h @@ -0,0 +1,82 @@ +/* + * Copyright 2024 iLogtail Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include +#include + +namespace logtail { + +#define SHA1_INPUT_WORDS 16 +#define SHA1_DIGEST_WORDS 5 +#define SHA1_INPUT_BYTES (SHA1_INPUT_WORDS * sizeof(uint32_t)) +#define SHA1_DIGEST_BYTES (SHA1_DIGEST_WORDS * sizeof(uint32_t)) + +class SHA1 { +public: + SHA1() : bits(0) { memcpy(H, IV, sizeof(H)); } + SHA1(const SHA1& s) { + bits = s.bits; + memcpy(H, s.H, sizeof(H)); + memcpy(M, s.M, sizeof(M)); + } + void init() { + bits = 0; + memcpy(H, IV, sizeof(H)); + } + void add(const uint8_t* data, size_t len); + uint8_t* result(); + +private: + uint64_t bits; + uint32_t H[SHA1_DIGEST_WORDS]; + uint8_t M[SHA1_INPUT_BYTES]; + + static const uint32_t IV[SHA1_DIGEST_WORDS]; + void transform(); +}; + +class HMAC { +public: + HMAC(const uint8_t* key, size_t lkey); + HMAC(const HMAC& hm) : in(hm.in), out(hm.out) {} + + void init(const uint8_t* key, size_t lkey); + + void add(const uint8_t* data, size_t len) { in.add(data, len); } + + uint8_t* result() { + out.add(in.result(), SHA1_DIGEST_BYTES); + return out.result(); + } + +private: + SHA1 in, out; +}; + +std::string GetDateString(); + +std::string GetUrlSignature(const std::string& httpMethod, + const std::string& operationType, + std::map& httpHeader, + const std::map& parameterList, + const std::string& content, + const std::string& signKey); + +} // namespace logtail diff --git a/core/plugin/flusher/sls/SendResult.cpp b/core/plugin/flusher/sls/SendResult.cpp index 955bff6a52..1f168ab261 100644 --- a/core/plugin/flusher/sls/SendResult.cpp +++ b/core/plugin/flusher/sls/SendResult.cpp @@ -14,23 +14,23 @@ #include "plugin/flusher/sls/SendResult.h" -#include "sdk/Common.h" +#include "plugin/flusher/sls/SLSConstant.h" namespace logtail { SendResult ConvertErrorCode(const std::string& errorCode) { - if (errorCode == sdk::LOGE_REQUEST_ERROR || errorCode == sdk::LOGE_CLIENT_OPERATION_TIMEOUT - || errorCode == sdk::LOGE_REQUEST_TIMEOUT) { + if (errorCode == LOGE_REQUEST_ERROR || errorCode == LOGE_CLIENT_OPERATION_TIMEOUT + || errorCode == LOGE_REQUEST_TIMEOUT) { return SEND_NETWORK_ERROR; - } else if (errorCode == sdk::LOGE_SERVER_BUSY || errorCode == sdk::LOGE_INTERNAL_SERVER_ERROR) { + } else if (errorCode == LOGE_SERVER_BUSY || errorCode == LOGE_INTERNAL_SERVER_ERROR) { return SEND_SERVER_ERROR; - } else if (errorCode == sdk::LOGE_WRITE_QUOTA_EXCEED || errorCode == sdk::LOGE_SHARD_WRITE_QUOTA_EXCEED) { + } else if (errorCode == LOGE_WRITE_QUOTA_EXCEED || errorCode == LOGE_SHARD_WRITE_QUOTA_EXCEED) { return SEND_QUOTA_EXCEED; - } else if (errorCode == sdk::LOGE_UNAUTHORIZED) { + } else if (errorCode == LOGE_UNAUTHORIZED) { return SEND_UNAUTHORIZED; - } else if (errorCode == sdk::LOGE_INVALID_SEQUENCE_ID) { + } else if (errorCode == LOGE_INVALID_SEQUENCE_ID) { return SEND_INVALID_SEQUENCE_ID; - } else if (errorCode == sdk::LOGE_PARAMETER_INVALID) { + } else if (errorCode == LOGE_PARAMETER_INVALID) { return SEND_PARAMETER_INVALID; } else { return SEND_DISCARD_ERROR; diff --git a/core/plugin/processor/ProcessorDesensitizeNative.cpp b/core/plugin/processor/ProcessorDesensitizeNative.cpp index 5cef5e64c9..47c94bfcf8 100644 --- a/core/plugin/processor/ProcessorDesensitizeNative.cpp +++ b/core/plugin/processor/ProcessorDesensitizeNative.cpp @@ -20,7 +20,7 @@ #include "models/LogEvent.h" #include "monitor/metric_constants/MetricConstants.h" #include "pipeline/plugin/instance/ProcessorInstance.h" -#include "sdk/Common.h" +#include "common/HashUtil.h" namespace logtail { @@ -230,7 +230,7 @@ void ProcessorDesensitizeNative::CastOneSensitiveWord(std::string* value) { // add : xxxx, psw destStr.append(pVal->substr(beginPos, beginOffset - beginPos)); // md5: 123abc - destStr.append(sdk::CalcMD5(pVal->substr(beginOffset, endOffset - beginOffset))); + destStr.append(CalcMD5(pVal->substr(beginOffset, endOffset - beginOffset))); beginPos = endOffset; // refine for : xxxx. psw=123abc if (endOffset >= maxSize) { diff --git a/core/prometheus/PrometheusInputRunner.cpp b/core/prometheus/PrometheusInputRunner.cpp index 052959bb15..cdb4ba46bf 100644 --- a/core/prometheus/PrometheusInputRunner.cpp +++ b/core/prometheus/PrometheusInputRunner.cpp @@ -26,14 +26,14 @@ #include "common/StringTools.h" #include "common/TimeUtil.h" #include "common/http/AsynCurlRunner.h" +#include "common/http/Constant.h" +#include "common/http/Curl.h" #include "common/timer/Timer.h" #include "logger/Logger.h" #include "monitor/metric_constants/MetricConstants.h" #include "plugin/flusher/sls/FlusherSLS.h" #include "prometheus/Constants.h" #include "prometheus/Utils.h" -#include "sdk/Common.h" -#include "sdk/Exception.h" using namespace std; @@ -49,7 +49,6 @@ PrometheusInputRunner::PrometheusInputRunner() mPodName(STRING_FLAG(_pod_name_)), mEventPool(true), mUnRegisterMs(0) { - mClient = std::make_unique(); mTimer = std::make_shared(); // self monitor @@ -151,17 +150,18 @@ void PrometheusInputRunner::Init() { int retry = 0; while (mIsThreadRunning.load()) { ++retry; - sdk::HttpMessage httpResponse = SendRegisterMessage(prometheus::REGISTER_COLLECTOR_PATH); - if (httpResponse.statusCode != 200) { + auto httpResponse = SendRegisterMessage(prometheus::REGISTER_COLLECTOR_PATH); + if (httpResponse.GetStatusCode() != 200) { mPromRegisterRetryTotal->Add(1); if (retry % 10 == 0) { - LOG_INFO(sLogger, ("register failed, retried", retry)("statusCode", httpResponse.statusCode)); + LOG_INFO(sLogger, + ("register failed, retried", retry)("statusCode", httpResponse.GetStatusCode())); } } else { // register success // response will be { "unRegisterMs": 30000 } - if (!httpResponse.content.empty()) { - string responseStr = httpResponse.content; + if (!httpResponse.GetBody()->empty()) { + string responseStr = *httpResponse.GetBody(); string errMsg; Json::Value responseJson; if (!ParseJsonTable(responseStr, responseJson, errMsg)) { @@ -222,9 +222,9 @@ void PrometheusInputRunner::Stop() { auto res = std::async(launch::async, [this]() { std::lock_guard lock(mRegisterMutex); for (int retry = 0; retry < 3; ++retry) { - sdk::HttpMessage httpResponse = SendRegisterMessage(prometheus::UNREGISTER_COLLECTOR_PATH); - if (httpResponse.statusCode != 200) { - LOG_ERROR(sLogger, ("unregister failed, statusCode", httpResponse.statusCode)); + auto httpResponse = SendRegisterMessage(prometheus::UNREGISTER_COLLECTOR_PATH); + if (httpResponse.GetStatusCode() != 200) { + LOG_ERROR(sLogger, ("unregister failed, statusCode", httpResponse.GetStatusCode())); } else { LOG_INFO(sLogger, ("Unregister Success", mPodName)); mPromRegisterState->Set(0); @@ -242,29 +242,18 @@ bool PrometheusInputRunner::HasRegisteredPlugins() const { return !mTargetSubscriberSchedulerMap.empty(); } -sdk::HttpMessage PrometheusInputRunner::SendRegisterMessage(const string& url) const { - map httpHeader; - httpHeader[sdk::X_LOG_REQUEST_ID] = prometheus::PROMETHEUS_PREFIX + mPodName; - sdk::HttpMessage httpResponse; - httpResponse.header[sdk::X_LOG_REQUEST_ID] = prometheus::PROMETHEUS_PREFIX + mPodName; +HttpResponse PrometheusInputRunner::SendRegisterMessage(const string& url) const { + HttpResponse httpResponse; #ifdef APSARA_UNIT_TEST_MAIN - httpResponse.statusCode = 200; + httpResponse.SetStatusCode(200); return httpResponse; #endif - try { - mClient->Send(sdk::HTTP_GET, - mServiceHost, - mServicePort, - url, - "pod_name=" + mPodName, - httpHeader, - "", - 10, - httpResponse, - "", - false); - } catch (const sdk::LOGException& e) { - LOG_ERROR(sLogger, ("curl error", e.what())("url", url)("pod_name", mPodName)); + map httpHeader; + if (!SendHttpRequest( + make_unique( + HTTP_GET, false, mServiceHost, mServicePort, url, "pod_name=" + mPodName, httpHeader, "", 10), + httpResponse)) { + LOG_ERROR(sLogger, ("curl error", "")("url", url)("pod_name", mPodName)); } return httpResponse; } diff --git a/core/prometheus/PrometheusInputRunner.h b/core/prometheus/PrometheusInputRunner.h index 996caf163e..b8e7719171 100644 --- a/core/prometheus/PrometheusInputRunner.h +++ b/core/prometheus/PrometheusInputRunner.h @@ -21,12 +21,11 @@ #include #include "common/Lock.h" +#include "common/http/HttpResponse.h" #include "common/timer/Timer.h" #include "monitor/metric_models/MetricTypes.h" #include "prometheus/schedulers/TargetSubscriberScheduler.h" #include "runner/InputRunner.h" -#include "sdk/Common.h" -#include "sdk/CurlImp.h" namespace logtail { @@ -56,7 +55,7 @@ class PrometheusInputRunner : public InputRunner { private: PrometheusInputRunner(); - sdk::HttpMessage SendRegisterMessage(const std::string& url) const; + HttpResponse SendRegisterMessage(const std::string& url) const; void CancelAllTargetSubscriber(); void SubscribeOnce(); @@ -74,7 +73,6 @@ class PrometheusInputRunner : public InputRunner { int32_t mServicePort; std::string mPodName; - std::unique_ptr mClient; std::shared_ptr mTimer; EventPool mEventPool; diff --git a/core/prometheus/schedulers/ScrapeConfig.cpp b/core/prometheus/schedulers/ScrapeConfig.cpp index 3946bf54a2..3bc05e9f65 100644 --- a/core/prometheus/schedulers/ScrapeConfig.cpp +++ b/core/prometheus/schedulers/ScrapeConfig.cpp @@ -10,7 +10,7 @@ #include "logger/Logger.h" #include "prometheus/Constants.h" #include "prometheus/Utils.h" -#include "sdk/Common.h" +#include "common/EncodingUtil.h" using namespace std; @@ -223,7 +223,7 @@ bool ScrapeConfig::InitBasicAuth(const Json::Value& basicAuth) { } auto token = username + ":" + password; - auto token64 = sdk::Base64Enconde(token); + auto token64 = Base64Enconde(token); mRequestHeaders[prometheus::A_UTHORIZATION] = prometheus::BASIC_PREFIX + token64; return true; } diff --git a/core/prometheus/schedulers/ScrapeScheduler.cpp b/core/prometheus/schedulers/ScrapeScheduler.cpp index 8596d6f36b..c37ebaf23f 100644 --- a/core/prometheus/schedulers/ScrapeScheduler.cpp +++ b/core/prometheus/schedulers/ScrapeScheduler.cpp @@ -23,6 +23,7 @@ #include "common/StringTools.h" #include "common/TimeUtil.h" +#include "common/http/Constant.h" #include "common/timer/HttpRequestTimerEvent.h" #include "logger/Logger.h" #include "pipeline/queue/ProcessQueueManager.h" @@ -32,7 +33,6 @@ #include "prometheus/async/PromFuture.h" #include "prometheus/async/PromHttpRequest.h" #include "prometheus/component/StreamScraper.h" -#include "sdk/Common.h" using namespace std; @@ -168,7 +168,7 @@ std::unique_ptr ScrapeScheduler::BuildScrapeTimerEvent(std::chrono:: } mPromStreamScraper.SetScrapeTime(mLatestScrapeTime); auto request = std::make_unique( - sdk::HTTP_GET, + HTTP_GET, mScrapeConfigPtr->mScheme == prometheus::HTTPS, mHost, mPort, diff --git a/core/prometheus/schedulers/TargetSubscriberScheduler.cpp b/core/prometheus/schedulers/TargetSubscriberScheduler.cpp index ca8a3ad97c..a5540c8958 100644 --- a/core/prometheus/schedulers/TargetSubscriberScheduler.cpp +++ b/core/prometheus/schedulers/TargetSubscriberScheduler.cpp @@ -20,10 +20,10 @@ #include #include -#include "Common.h" -#include "TimeUtil.h" #include "common/JsonUtil.h" #include "common/StringTools.h" +#include "common/TimeUtil.h" +#include "common/http/Constant.h" #include "common/timer/HttpRequestTimerEvent.h" #include "common/timer/Timer.h" #include "logger/Logger.h" @@ -295,7 +295,7 @@ TargetSubscriberScheduler::BuildSubscriberTimerEvent(std::chrono::steady_clock:: if (!mETag.empty()) { httpHeader[prometheus::IF_NONE_MATCH] = mETag; } - auto request = std::make_unique(sdk::HTTP_GET, + auto request = std::make_unique(HTTP_GET, false, mServiceHost, mServicePort, diff --git a/core/protobuf/sls/logtail_buffer_meta.proto b/core/protobuf/sls/logtail_buffer_meta.proto index dc2639e997..131e4099d7 100644 --- a/core/protobuf/sls/logtail_buffer_meta.proto +++ b/core/protobuf/sls/logtail_buffer_meta.proto @@ -17,10 +17,17 @@ package sls_logs; import "sls_logs.proto"; +enum EndpointMode +{ + DEFAULT = 0; + ACCELERATE = 1; + CUSTOM = 2; +} + message LogtailBufferMeta { required string project = 1; - required string endpoint = 2; + required string region = 2; required string aliuid = 3; optional string logstore = 4; optional int32 datatype = 5; @@ -28,4 +35,6 @@ message LogtailBufferMeta optional string shardhashkey = 7; optional SlsCompressType compresstype = 8; optional SlsTelemetryType telemetrytype = 9; + optional EndpointMode endpointmode = 10; + optional string endpoint = 11; } diff --git a/core/runner/FlusherRunner.cpp b/core/runner/FlusherRunner.cpp index 4a0ea254dc..1e985cecb2 100644 --- a/core/runner/FlusherRunner.cpp +++ b/core/runner/FlusherRunner.cpp @@ -26,12 +26,9 @@ #include "pipeline/queue/SenderQueueItem.h" #include "pipeline/queue/SenderQueueManager.h" #include "plugin/flusher/sls/DiskBufferWriter.h" -// TODO: temporarily used here -#include "plugin/flusher/sls/PackIdManager.h" -#include "plugin/flusher/sls/SLSClientManager.h" +#include "runner/sink/http/HttpSink.h" -DEFINE_FLAG_INT32(flusher_runner_exit_timeout_secs, "", 60); -DEFINE_FLAG_INT32(check_send_client_timeout_interval, "", 600); +DEFINE_FLAG_INT32(flusher_runner_exit_timeout_sec, "", 60); DECLARE_FLAG_INT32(discard_send_fail_interval); @@ -99,7 +96,7 @@ void FlusherRunner::Stop() { if (!mThreadRes.valid()) { return; } - future_status s = mThreadRes.wait_for(chrono::seconds(INT32_FLAG(flusher_runner_exit_timeout_secs))); + future_status s = mThreadRes.wait_for(chrono::seconds(INT32_FLAG(flusher_runner_exit_timeout_sec))); if (s == future_status::ready) { LOG_INFO(sLogger, ("flusher runner", "stopped successfully")); } else { @@ -121,7 +118,8 @@ void FlusherRunner::PushToHttpSink(SenderQueueItem* item, bool withLimit) { unique_ptr req; bool keepItem = false; - if (!static_cast(item->mFlusher)->BuildRequest(item, req, &keepItem)) { + string errMsg; + if (!static_cast(item->mFlusher)->BuildRequest(item, req, &keepItem, &errMsg)) { if (keepItem && chrono::duration_cast(chrono::system_clock::now() - item->mFirstEnqueTime).count() < INT32_FLAG(discard_send_fail_interval)) { @@ -129,10 +127,12 @@ void FlusherRunner::PushToHttpSink(SenderQueueItem* item, bool withLimit) { LOG_DEBUG(sLogger, ("failed to build request", "retry later")("item address", item)( "config-flusher-dst", QueueKeyManager::GetInstance()->GetName(item->mQueueKey))); + SenderQueueManager::GetInstance()->DecreaseConcurrencyLimiterInSendingCnt(item->mQueueKey); } else { LOG_WARNING(sLogger, ("failed to build request", "discard item")("item address", item)( "config-flusher-dst", QueueKeyManager::GetInstance()->GetName(item->mQueueKey))); + SenderQueueManager::GetInstance()->DecreaseConcurrencyLimiterInSendingCnt(item->mQueueKey); SenderQueueManager::GetInstance()->RemoveItem(item->mQueueKey, item); } return; @@ -189,12 +189,6 @@ void FlusherRunner::Run() { mTotalDelayMs->Add(chrono::system_clock::now() - curTime); } - // TODO: move the following logic to scheduler - if ((time(NULL) - mLastCheckSendClientTime) > INT32_FLAG(check_send_client_timeout_interval)) { - SLSClientManager::GetInstance()->CleanTimeoutClient(); - PackIdManager::GetInstance()->CleanTimeoutEntry(); - mLastCheckSendClientTime = time(NULL); - } if (mIsFlush && SenderQueueManager::GetInstance()->IsAllQueueEmpty()) { break; } diff --git a/core/runner/FlusherRunner.h b/core/runner/FlusherRunner.h index 3390a021b6..d433d6138b 100644 --- a/core/runner/FlusherRunner.h +++ b/core/runner/FlusherRunner.h @@ -24,7 +24,6 @@ #include "pipeline/plugin/interface/Flusher.h" #include "pipeline/queue/SenderQueueItem.h" #include "runner/sink/SinkType.h" -#include "runner/sink/http/HttpSink.h" namespace logtail { diff --git a/core/runner/ProcessorRunner.cpp b/core/runner/ProcessorRunner.cpp index ab88c517c6..aae72adad0 100644 --- a/core/runner/ProcessorRunner.cpp +++ b/core/runner/ProcessorRunner.cpp @@ -26,7 +26,7 @@ #include "queue/QueueKeyManager.h" DEFINE_FLAG_INT32(default_flush_merged_buffer_interval, "default flush merged buffer, seconds", 1); -DEFINE_FLAG_INT32(processor_runner_exit_timeout_secs, "", 60); +DEFINE_FLAG_INT32(processor_runner_exit_timeout_sec, "", 60); DECLARE_FLAG_INT32(max_send_log_group_size); @@ -59,7 +59,7 @@ void ProcessorRunner::Stop() { continue; } future_status s - = mThreadRes[threadNo].wait_for(chrono::seconds(INT32_FLAG(processor_runner_exit_timeout_secs))); + = mThreadRes[threadNo].wait_for(chrono::seconds(INT32_FLAG(processor_runner_exit_timeout_sec))); if (s == future_status::ready) { LOG_INFO(sLogger, ("processor runner", "stopped successfully")("threadNo", threadNo)); } else { diff --git a/core/runner/sink/http/HttpSink.cpp b/core/runner/sink/http/HttpSink.cpp index 0213edd166..49724c42c8 100644 --- a/core/runner/sink/http/HttpSink.cpp +++ b/core/runner/sink/http/HttpSink.cpp @@ -28,7 +28,7 @@ #include "unittest/pipeline/HttpSinkMock.h" #endif -DEFINE_FLAG_INT32(http_sink_exit_timeout_secs, "", 5); +DEFINE_FLAG_INT32(http_sink_exit_timeout_sec, "", 5); using namespace std; @@ -77,7 +77,7 @@ void HttpSink::Stop() { if (!mThreadRes.valid()) { return; } - future_status s = mThreadRes.wait_for(chrono::seconds(INT32_FLAG(http_sink_exit_timeout_secs))); + future_status s = mThreadRes.wait_for(chrono::seconds(INT32_FLAG(http_sink_exit_timeout_sec))); if (s == future_status::ready) { LOG_INFO(sLogger, ("http sink", "stopped successfully")); } else { @@ -134,6 +134,7 @@ bool HttpSink::AddRequestToClient(unique_ptr&& request) { AppConfig::GetInstance()->GetBindInterface()); if (curl == nullptr) { request->mItem->mStatus = SendingStatus::IDLE; + request->mResponse.SetNetworkStatus(NetworkCode::Other, "failed to init curl handler"); FlusherRunner::GetInstance()->DecreaseHttpSendingCnt(); mOutFailedItemsTotal->Add(1); LOG_ERROR(sLogger, @@ -146,11 +147,11 @@ bool HttpSink::AddRequestToClient(unique_ptr&& request) { request->mPrivateData = headers; curl_easy_setopt(curl, CURLOPT_PRIVATE, request.get()); - request->mLastSendTime = chrono::system_clock::now(); auto res = curl_multi_add_handle(mClient, curl); if (res != CURLM_OK) { request->mItem->mStatus = SendingStatus::IDLE; + request->mResponse.SetNetworkStatus(NetworkCode::Other, "failed to add the easy curl handle to multi_handle"); FlusherRunner::GetInstance()->DecreaseHttpSendingCnt(); curl_easy_cleanup(curl); mOutFailedItemsTotal->Add(1); @@ -252,18 +253,21 @@ void HttpSink::HandleCompletedRequests(int& runningHandlers) { curl_easy_getinfo(handler, CURLINFO_PRIVATE, &request); auto pipelinePlaceHolder = request->mItem->mPipeline; // keep pipeline alive auto responseTime = chrono::system_clock::now() - request->mLastSendTime; - auto responseTimeMs = chrono::duration_cast(responseTime).count(); + auto responseTimeMs = chrono::duration_cast(responseTime); switch (msg->data.result) { case CURLE_OK: { long statusCode = 0; curl_easy_getinfo(handler, CURLINFO_RESPONSE_CODE, &statusCode); + request->mResponse.SetNetworkStatus(NetworkCode::Ok, ""); request->mResponse.SetStatusCode(statusCode); - LOG_DEBUG( - sLogger, - ("send http request succeeded, item address", request->mItem)( - "config-flusher-dst", QueueKeyManager::GetInstance()->GetName(request->mItem->mQueueKey))( - "response time", ToString(responseTimeMs) + "ms")("try cnt", ToString(request->mTryCnt))( - "sending cnt", ToString(FlusherRunner::GetInstance()->GetSendingBufferCount()))); + request->mResponse.SetResponseTime(responseTimeMs); + LOG_DEBUG(sLogger, + ("send http request succeeded, item address", + request->mItem)("config-flusher-dst", + QueueKeyManager::GetInstance()->GetName(request->mItem->mQueueKey))( + "response time", ToString(responseTimeMs.count()) + "ms")("try cnt", + ToString(request->mTryCnt))( + "sending cnt", ToString(FlusherRunner::GetInstance()->GetSendingBufferCount()))); static_cast(request->mItem->mFlusher)->OnSendDone(request->mResponse, request->mItem); FlusherRunner::GetInstance()->DecreaseHttpSendingCnt(); mOutSuccessfulItemsTotal->Add(1); @@ -274,12 +278,11 @@ void HttpSink::HandleCompletedRequests(int& runningHandlers) { default: // considered as network error if (request->mTryCnt <= request->mMaxTryCnt) { - LOG_WARNING( - sLogger, - ("failed to send http request", "retry immediately")("item address", request->mItem)( - "config-flusher-dst", - QueueKeyManager::GetInstance()->GetName(request->mItem->mFlusher->GetQueueKey()))( - "try cnt", request->mTryCnt)("errMsg", curl_easy_strerror(msg->data.result))); + LOG_DEBUG(sLogger, + ("failed to send http request", "retry immediately")("item address", request->mItem)( + "config-flusher-dst", + QueueKeyManager::GetInstance()->GetName(request->mItem->mFlusher->GetQueueKey()))( + "try cnt", request->mTryCnt)("errMsg", curl_easy_strerror(msg->data.result))); // free first,becase mPrivateData will be reset in AddRequestToClient if (request->mPrivateData) { curl_slist_free_all((curl_slist*)request->mPrivateData); @@ -290,12 +293,14 @@ void HttpSink::HandleCompletedRequests(int& runningHandlers) { ++runningHandlers; requestReused = true; } else { + auto errMsg = curl_easy_strerror(msg->data.result); + request->mResponse.SetNetworkStatus(GetNetworkStatus(msg->data.result), errMsg); LOG_DEBUG(sLogger, ("failed to send http request", "abort")("item address", request->mItem)( "config-flusher-dst", QueueKeyManager::GetInstance()->GetName(request->mItem->mQueueKey))( - "response time", ToString(responseTimeMs) + "ms")("try cnt", - ToString(request->mTryCnt))( + "response time", ToString(responseTimeMs.count()) + "ms")( + "try cnt", ToString(request->mTryCnt))("errMsg", errMsg)( "sending cnt", ToString(FlusherRunner::GetInstance()->GetSendingBufferCount()))); static_cast(request->mItem->mFlusher) ->OnSendDone(request->mResponse, request->mItem); diff --git a/core/runner/sink/http/HttpSinkRequest.h b/core/runner/sink/http/HttpSinkRequest.h index f8220f7722..d9ed07ce41 100644 --- a/core/runner/sink/http/HttpSinkRequest.h +++ b/core/runner/sink/http/HttpSinkRequest.h @@ -33,7 +33,7 @@ struct HttpSinkRequest : public AsynHttpRequest { const std::map& header, const std::string& body, SenderQueueItem* item, - uint32_t timeout = static_cast(INT32_FLAG(default_http_request_timeout_secs)), + uint32_t timeout = static_cast(INT32_FLAG(default_http_request_timeout_sec)), uint32_t maxTryCnt = static_cast(INT32_FLAG(default_http_request_max_try_cnt)) ) : AsynHttpRequest(method, httpsFlag, host, port, url, query, header, body, HttpResponse(), timeout, maxTryCnt), mItem(item) {} diff --git a/core/sdk/Client.cpp b/core/sdk/Client.cpp deleted file mode 100644 index 053e6da40a..0000000000 --- a/core/sdk/Client.cpp +++ /dev/null @@ -1,483 +0,0 @@ -// Copyright 2022 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "Client.h" - -#include "Common.h" -#include "CurlImp.h" -#include "Exception.h" -#include "Result.h" -#include "app_config/AppConfig.h" -#include "common/Flags.h" -#include "logger/Logger.h" -#include "monitor/Monitor.h" -#include "plugin/flusher/sls/SLSClientManager.h" -#ifdef __ENTERPRISE__ -#include "plugin/flusher/sls/EnterpriseSLSClientManager.h" -#endif - -namespace logtail { -namespace sdk { - - using namespace std; - - Client::Client(const string& aliuid, const string& slsHost, int32_t timeout) - : mTimeout(timeout), mHostFieldSuffix(""), mIsHostRawIp(false), mPort(80), mUsingHTTPS(false), mAliuid(aliuid) { - mClient = new CurlClient(); - mSlsHostUpdateTime = 0; - mSlsRealIpUpdateTime = 0; - SetSlsHost(slsHost); - if (mTimeout <= 0) { - mTimeout = LOG_REQUEST_TIMEOUT; - } - } - - Client::~Client() throw() { - if (mClient != NULL) { - delete mClient; - } - } - - void Client::SetPort(int32_t port) { - mPort = port; - mUsingHTTPS = (443 == mPort); - } - - string Client::GetSlsHost() { - mSpinLock.lock(); - string slsHost = mSlsHost; - mSpinLock.unlock(); - return slsHost; - } - - string Client::GetRawSlsHost() { - mSpinLock.lock(); - string rawSlsHost = mRawSlsHost; - mSpinLock.unlock(); - return rawSlsHost; - } - - string Client::GetHostFieldSuffix() { - mSpinLock.lock(); - string hostFieldSuffix = mHostFieldSuffix; - mSpinLock.unlock(); - return hostFieldSuffix; - } - - bool Client::GetRawSlsHostFlag() { - return mIsHostRawIp; - } - - void Client::SetSlsHost(const string& slsHost) { - mSpinLock.lock(); - if (slsHost == mRawSlsHost) { - mSpinLock.unlock(); - return; - } - mRawSlsHost = slsHost; - size_t bpos = slsHost.find("://"); - if (bpos == string::npos) - bpos = 0; - else - bpos += 3; - string tmpstr = slsHost.substr(bpos); - size_t epos = tmpstr.find_first_of("/"); - if (epos == string::npos) - epos = tmpstr.length(); - string host = tmpstr.substr(0, epos); - - mSlsHost = host; - - mHostFieldSuffix = "." + host; - size_t i = 0; - for (; i < host.length(); ++i) { - if ((host[i] >= 'a' && host[i] <= 'z') || (host[i] >= 'A' && host[i] <= 'Z')) - break; - } - if (i == host.length()) - mIsHostRawIp = true; - else - mIsHostRawIp = false; - mSpinLock.unlock(); - } - - - string Client::GetHost(const string& project) { - if (mIsHostRawIp || project.empty()) { - return GetSlsHost(); - } else { - return project + GetHostFieldSuffix(); - } - } - - GetRealIpResponse Client::GetRealIp() { - static string project = "logtail-real-ip-project"; - static string logstore = "logtail-real-ip-logstore"; - GetRealIpResponse rsp; - try { - PingSLSServer(project, logstore, &rsp.realIp); - } catch (const LOGException&) { - } - return rsp; - } - - bool Client::TestNetwork() { - static string project = "logtail-test-network-project"; - static string logstore = "logtail-test-network-logstore"; - PingSLSServer(project, logstore); - return true; - } - - PostLogStoreLogsResponse Client::PostLogStoreLogs(const std::string& project, - const std::string& logstore, - sls_logs::SlsCompressType compressType, - const std::string& compressedLogGroup, - uint32_t rawSize, - const std::string& hashKey, - bool isTimeSeries) { - map httpHeader; - httpHeader[CONTENT_TYPE] = TYPE_LOG_PROTOBUF; - httpHeader[X_LOG_BODYRAWSIZE] = std::to_string(rawSize); - httpHeader[X_LOG_COMPRESSTYPE] = Client::GetCompressTypeString(compressType); - if (isTimeSeries) { - return SynPostMetricStoreLogs(project, logstore, compressedLogGroup, httpHeader); - } else { - return SynPostLogStoreLogs(project, logstore, compressedLogGroup, httpHeader, hashKey); - } - } - - PostLogStoreLogsResponse Client::PostLogStoreLogPackageList(const std::string& project, - const std::string& logstore, - sls_logs::SlsCompressType compressType, - const std::string& packageListData, - const std::string& hashKey) { - map httpHeader; - httpHeader[CONTENT_TYPE] = TYPE_LOG_PROTOBUF; - httpHeader[X_LOG_MODE] = LOG_MODE_BATCH_GROUP; - httpHeader[X_LOG_BODYRAWSIZE] = std::to_string(packageListData.size()); - httpHeader[X_LOG_COMPRESSTYPE] = Client::GetCompressTypeString(compressType); - return SynPostLogStoreLogs(project, logstore, packageListData, httpHeader, hashKey); - } - - unique_ptr Client::CreatePostLogStoreLogsRequest(const std::string& project, - const std::string& logstore, - sls_logs::SlsCompressType compressType, - const std::string& compressedLogGroup, - uint32_t rawSize, - SenderQueueItem* item, - const std::string& hashKey, - int64_t hashKeySeqID, - bool isTimeSeries) { - map httpHeader; - httpHeader[CONTENT_TYPE] = TYPE_LOG_PROTOBUF; - httpHeader[X_LOG_BODYRAWSIZE] = std::to_string(rawSize); - httpHeader[X_LOG_COMPRESSTYPE] = Client::GetCompressTypeString(compressType); - if (isTimeSeries) { - return CreateAsynPostMetricStoreLogsRequest(project, logstore, compressedLogGroup, httpHeader, item); - } else { - return CreateAsynPostLogStoreLogsRequest( - project, logstore, compressedLogGroup, httpHeader, hashKey, hashKeySeqID, item); - } - } - - - unique_ptr Client::CreatePostLogStoreLogPackageListRequest(const std::string& project, - const std::string& logstore, - sls_logs::SlsCompressType compressType, - const std::string& packageListData, - SenderQueueItem* item, - const std::string& hashKey) { - map httpHeader; - httpHeader[CONTENT_TYPE] = TYPE_LOG_PROTOBUF; - httpHeader[X_LOG_MODE] = LOG_MODE_BATCH_GROUP; - httpHeader[X_LOG_BODYRAWSIZE] = std::to_string(packageListData.size()); - httpHeader[X_LOG_COMPRESSTYPE] = Client::GetCompressTypeString(compressType); - return CreateAsynPostLogStoreLogsRequest( - project, logstore, packageListData, httpHeader, hashKey, kInvalidHashKeySeqID, item); - } - - void Client::SendRequest(const std::string& project, - const std::string& httpMethod, - const std::string& url, - const std::string& body, - const std::map& parameterList, - std::map& header, - HttpMessage& httpMessage, - std::string* realIpPtr) { - SLSClientManager::AuthType type; - string accessKeyId, accessKeySecret; - if (!SLSClientManager::GetInstance()->GetAccessKey(mAliuid, type, accessKeyId, accessKeySecret)) { -#ifdef __ENTERPRISE__ - static auto* manager = static_cast(SLSClientManager::GetInstance()); - if (!manager->GetAccessKeyIfProjectSupportsAnonymousWrite(project, type, accessKeyId, accessKeySecret)) { - throw LOGException(LOGE_UNAUTHORIZED, ""); - } -#endif - } - if (type == SLSClientManager::AuthType::ANONYMOUS) { - header[X_LOG_KEYPROVIDER] = MD5_SHA1_SALT_KEYPROVIDER; - } - - string host = GetHost(project); - SetCommonHeader(header, (int32_t)(body.length()), project); - string signature = GetUrlSignature(httpMethod, url, header, parameterList, body, accessKeySecret); - header[AUTHORIZATION] = LOG_HEADSIGNATURE_PREFIX + accessKeyId + ':' + signature; - - string queryString; - GetQueryString(parameterList, queryString); - - int32_t port = mPort; - if (mPort == 80 && mUsingHTTPS) { - port = 443; - } - mClient->Send(httpMethod, - host, - port, - url, - queryString, - header, - body, - mTimeout, - httpMessage, - AppConfig::GetInstance()->GetBindInterface(), - mUsingHTTPS); - - if (httpMessage.statusCode != 200) { - if (realIpPtr != NULL) { - *realIpPtr = httpMessage.header[X_LOG_HOSTIP]; - } - ErrorCheck(httpMessage.content, httpMessage.header[X_LOG_REQUEST_ID], httpMessage.statusCode); - } - } - - std::unique_ptr - Client::CreateAsynPostMetricStoreLogsRequest(const std::string& project, - const std::string& logstore, - const std::string& body, - std::map& httpHeader, - SenderQueueItem* item) { - SLSClientManager::AuthType type; - string accessKeyId, accessKeySecret; - if (!SLSClientManager::GetInstance()->GetAccessKey(mAliuid, type, accessKeyId, accessKeySecret)) { -#ifdef __ENTERPRISE__ - static auto* manager = static_cast(SLSClientManager::GetInstance()); - if (!manager->GetAccessKeyIfProjectSupportsAnonymousWrite(project, type, accessKeyId, accessKeySecret)) { - return nullptr; - } -#endif - } - if (type == SLSClientManager::AuthType::ANONYMOUS) { - httpHeader[X_LOG_KEYPROVIDER] = MD5_SHA1_SALT_KEYPROVIDER; - } - - string operation = METRICSTORES; - operation.append("/").append(project).append("/").append(logstore).append("/api/v1/write"); - httpHeader[CONTENT_MD5] = CalcMD5(body); - map parameterList; - string host = GetSlsHost(); - SetCommonHeader(httpHeader, (int32_t)(body.length()), ""); - string signature = GetUrlSignature(HTTP_POST, operation, httpHeader, parameterList, body, accessKeySecret); - httpHeader[AUTHORIZATION] = LOG_HEADSIGNATURE_PREFIX + accessKeyId + ':' + signature; - return make_unique(HTTP_POST, mUsingHTTPS, host, mPort, operation, "", httpHeader, body, item, INT32_FLAG(default_http_request_timeout_secs), LOG_REQUEST_TRY_TIMES); - } - - unique_ptr - Client::CreateAsynPostLogStoreLogsRequest(const std::string& project, - const std::string& logstore, - const std::string& body, - std::map& httpHeader, - const std::string& hashKey, - int64_t hashKeySeqID, - SenderQueueItem* item) { - SLSClientManager::AuthType type; - string accessKeyId, accessKeySecret; - if (!SLSClientManager::GetInstance()->GetAccessKey(mAliuid, type, accessKeyId, accessKeySecret)) { -#ifdef __ENTERPRISE__ - static auto* manager = static_cast(SLSClientManager::GetInstance()); - if (!manager->GetAccessKeyIfProjectSupportsAnonymousWrite(project, type, accessKeyId, accessKeySecret)) { - return nullptr; - } -#endif - } - if (type == SLSClientManager::AuthType::ANONYMOUS) { - httpHeader[X_LOG_KEYPROVIDER] = MD5_SHA1_SALT_KEYPROVIDER; - } - - string operation = LOGSTORES; - operation.append("/").append(logstore); - if (hashKey.empty()) - operation.append("/shards/lb"); - else - operation.append("/shards/route"); - - httpHeader[CONTENT_MD5] = CalcMD5(body); - - map parameterList; - if (!hashKey.empty()) { - parameterList["key"] = hashKey; - if (hashKeySeqID != kInvalidHashKeySeqID) { - parameterList["seqid"] = std::to_string(hashKeySeqID); - } - } - - string host = GetHost(project); - SetCommonHeader(httpHeader, (int32_t)(body.length()), project); - string signature = GetUrlSignature(HTTP_POST, operation, httpHeader, parameterList, body, accessKeySecret); - httpHeader[AUTHORIZATION] = LOG_HEADSIGNATURE_PREFIX + accessKeyId + ':' + signature; - - string queryString; - GetQueryString(parameterList, queryString); - - return make_unique( - HTTP_POST, mUsingHTTPS, host, mPort, operation, queryString, httpHeader, body, item, INT32_FLAG(default_http_request_timeout_secs), LOG_REQUEST_TRY_TIMES); - } - - PostLogStoreLogsResponse - Client::PingSLSServer(const std::string& project, const std::string& logstore, std::string* realIpPtr) { - sls_logs::LogGroup logGroup; - logGroup.set_source(LoongCollectorMonitor::mIpAddr); - auto serializeData = logGroup.SerializeAsString(); - - std::map httpHeader; - httpHeader[CONTENT_TYPE] = TYPE_LOG_PROTOBUF; - httpHeader[X_LOG_BODYRAWSIZE] = std::to_string(serializeData.size()); - return SynPostLogStoreLogs(project, logstore, serializeData, httpHeader, "", realIpPtr); - } - - PostLogStoreLogsResponse Client::SynPostLogStoreLogs(const std::string& project, - const std::string& logstore, - const std::string& body, - std::map& httpHeader, - const std::string& hashKey, - std::string* realIpPtr) { - string operation = LOGSTORES; - operation.append("/").append(logstore); - if (hashKey.empty()) - operation.append("/shards/lb"); - else - operation.append("/shards/route"); - - httpHeader[CONTENT_MD5] = CalcMD5(body); - - map parameterList; - if (!hashKey.empty()) - parameterList["key"] = hashKey; - - HttpMessage httpResponse; - SendRequest(project, HTTP_POST, operation, body, parameterList, httpHeader, httpResponse, realIpPtr); - - PostLogStoreLogsResponse ret; - ret.bodyBytes = (int32_t)body.size(); - ret.statusCode = httpResponse.statusCode; - ret.requestId = httpResponse.header[X_LOG_REQUEST_ID]; - return ret; - } - - PostLogStoreLogsResponse Client::SynPostMetricStoreLogs(const std::string& project, - const std::string& logstore, - const std::string& body, - std::map& httpHeader, - std::string* realIpPtr) { - string operation = METRICSTORES; - operation.append("/").append(project).append("/").append(logstore).append("/api/v1/write"); - httpHeader[CONTENT_MD5] = CalcMD5(body); - map parameterList; - HttpMessage httpResponse; - SendRequest(project, HTTP_POST, operation, body, parameterList, httpHeader, httpResponse, realIpPtr); - PostLogStoreLogsResponse ret; - ret.bodyBytes = (int32_t)body.size(); - ret.statusCode = httpResponse.statusCode; - ret.requestId = httpResponse.header[X_LOG_REQUEST_ID]; - return ret; - } - - PostLogStoreLogsResponse Client::PostLogUsingWebTracking(const std::string& project, - const std::string& logstore, - sls_logs::SlsCompressType compressType, - const std::string& compressedLog, - uint32_t rawSize) { - map httpHeader; - httpHeader[X_LOG_COMPRESSTYPE] = Client::GetCompressTypeString(compressType); - httpHeader[X_LOG_BODYRAWSIZE] = std::to_string(rawSize); - SetCommonHeader(httpHeader, (int32_t)(compressedLog.length()), project); - - string operation = LOGSTORES; - operation.append("/").append(logstore).append("/track"); - - string host = GetHost(project); - int32_t port = mPort; - if (mPort == 80 && mUsingHTTPS) { - port = 443; - } - - HttpMessage httpResponse; - mClient->Send(HTTP_POST, - host, - port, - operation, - "", - httpHeader, - compressedLog, - mTimeout, - httpResponse, - AppConfig::GetInstance()->GetBindInterface(), - mUsingHTTPS); - - PostLogStoreLogsResponse ret; - ret.bodyBytes = (int32_t)compressedLog.length(); - ret.statusCode = httpResponse.statusCode; - ret.requestId = httpResponse.header[X_LOG_REQUEST_ID]; - return ret; - } - - void Client::SetCommonHeader(map& httpHeader, int32_t contentLength, const string& project) { - if (!project.empty()) { - httpHeader[HOST] = project + GetHostFieldSuffix(); - } else { - httpHeader[HOST] = GetSlsHost(); - } - - httpHeader[USER_AGENT] = SLSClientManager::GetInstance()->GetUserAgent(); - httpHeader[X_LOG_APIVERSION] = LOG_API_VERSION; - httpHeader[X_LOG_SIGNATUREMETHOD] = HMAC_SHA1; - httpHeader[DATE] = GetDateString(); - httpHeader[CONTENT_LENGTH] = std::to_string(contentLength); - } - - std::string Client::GetCompressTypeString(sls_logs::SlsCompressType compressType) { - switch (compressType) { - case sls_logs::SLS_CMP_NONE: - return ""; - case sls_logs::SLS_CMP_LZ4: - return LOG_LZ4; - case sls_logs::SLS_CMP_ZSTD: - return LOG_ZSTD; - default: - return LOG_LZ4; - } - } - - sls_logs::SlsCompressType Client::GetCompressType(std::string compressTypeString, - sls_logs::SlsCompressType defaultType) { - if (compressTypeString == "none") { - return sls_logs::SLS_CMP_NONE; - } else if (compressTypeString == LOG_LZ4) { - return sls_logs::SLS_CMP_LZ4; - } else if (compressTypeString == LOG_ZSTD) { - return sls_logs::SLS_CMP_ZSTD; - } - return defaultType; - } -} // namespace sdk -} // namespace logtail diff --git a/core/sdk/Client.h b/core/sdk/Client.h deleted file mode 100644 index 073f39b657..0000000000 --- a/core/sdk/Client.h +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright 2022 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include - -#include "Common.h" -#include "CurlImp.h" -#include "protobuf/sls/sls_logs.pb.h" -#include "runner/sink/http/HttpSinkRequest.h" - -namespace logtail { -namespace sdk { - - class Client { - public: - /** Constructor needs at least three parameters. - * @param LOGHost LOG service address, for example:http://cn-hangzhou.log.aliyuncs.com. - * @param timeout Timeout time of one operation. - */ - Client(const std::string& aliuid, - const std::string& slsHost, - int32_t timeout = LOG_REQUEST_TIMEOUT); - ~Client() throw(); - - void SetPort(int32_t port); - - GetRealIpResponse GetRealIp(); - bool TestNetwork(); - - std::string GetHost(const std::string& project); - - void SetSlsHost(const std::string& slsHost); - std::string GetSlsHost(); - std::string GetRawSlsHost(); - std::string GetHostFieldSuffix(); - bool GetRawSlsHostFlag(); - - void SetSlsHostUpdateTime(int32_t uptime) { mSlsHostUpdateTime = uptime; } - int32_t GetSlsHostUpdateTime() { return mSlsHostUpdateTime; } - - void SetSlsRealIpUpdateTime(int32_t uptime) { mSlsRealIpUpdateTime = uptime; } - int32_t GetSlsRealIpUpdateTime() { return mSlsRealIpUpdateTime; } - bool IsUsingHTTPS() { return mUsingHTTPS; } - - /////////////////////////////////////Internal Interface For Logtail//////////////////////////////////////// - /** Sync Put data to LOG service. Unsuccessful opertaion will cause an LOGException. - * @param project The project name - * @param logstore The logstore name - * @param compressedLogGroup serialized data of logGroup, LZ4 or ZSTD comressed - * @param rawSize before compress - * @param compressType compression type - * @return request_id. - */ - PostLogStoreLogsResponse PostLogStoreLogs(const std::string& project, - const std::string& logstore, - sls_logs::SlsCompressType compressType, - const std::string& compressedLogGroup, - uint32_t rawSize, - const std::string& hashKey = "", - bool isTimeSeries = false); - - PostLogStoreLogsResponse PostMetricStoreLogs(const std::string& project, - const std::string& logstore, - sls_logs::SlsCompressType compressType, - const std::string& compressedLogGroup, - uint32_t rawSize) { - return PostLogStoreLogs(project, logstore, compressType, compressedLogGroup, rawSize, "", true); - } - - - /** Sync Put data to LOG service. Unsuccessful opertaion will cause an LOGException. - * @param project The project name - * @param logstore The logstore name - * @param packageListData data of logPackageList, consist of several LogGroup - * @return request_id. - */ - PostLogStoreLogsResponse PostLogStoreLogPackageList(const std::string& project, - const std::string& logstore, - sls_logs::SlsCompressType compressType, - const std::string& packageListData, - const std::string& hashKey = ""); - /** Async Put data to LOG service. Unsuccessful opertaion will cause an LOGException. - * @param project The project name - * @param logstore The logstore name - * @param compressedLogGroup data of logGroup, LZ4 comressed - * @param rawSize before compress - * @param compressType compression type - * @return request_id. - */ - std::unique_ptr CreatePostLogStoreLogsRequest(const std::string& project, - const std::string& logstore, - sls_logs::SlsCompressType compressType, - const std::string& compressedLogGroup, - uint32_t rawSize, - SenderQueueItem* item, - const std::string& hashKey = "", - int64_t hashKeySeqID = kInvalidHashKeySeqID, - bool isTimeSeries = false); - /** Async Put metrics data to SLS metricstore. Unsuccessful opertaion will cause an LOGException. - * @param project The project name - * @param logstore The logstore name - * @param compressedLogGroup data of logGroup, LZ4 comressed - * @param rawSize before compress - * @param compressType compression type - * @return request_id. - */ - std::unique_ptr CreatePostMetricStoreLogsRequest(const std::string& project, - const std::string& logstore, - sls_logs::SlsCompressType compressType, - const std::string& compressedLogGroup, - uint32_t rawSize, - SenderQueueItem* item) { - return CreatePostLogStoreLogsRequest( - project, logstore, compressType, compressedLogGroup, rawSize, item, "", kInvalidHashKeySeqID, true); - } - - - /** Async Put data to LOG service. Unsuccessful opertaion will cause an LOGException. - * @param project The project name - * @param logstore The logstore name - * @param packageListData data of logPackageList, consist of several LogGroup - * @return request_id. - */ - std::unique_ptr CreatePostLogStoreLogPackageListRequest(const std::string& project, - const std::string& logstore, - sls_logs::SlsCompressType compressType, - const std::string& packageListData, - SenderQueueItem* item, - const std::string& hashKey = ""); - - PostLogStoreLogsResponse PostLogUsingWebTracking(const std::string& project, - const std::string& logstore, - sls_logs::SlsCompressType compressType, - const std::string& compressedLogGroup, - uint32_t rawSize); - ///////////////////////////////////////////////////////////////////////////////////////////////// - - static std::string GetCompressTypeString(sls_logs::SlsCompressType compressType); - static sls_logs::SlsCompressType GetCompressType(std::string compressTypeString, - sls_logs::SlsCompressType defaultType = sls_logs::SLS_CMP_LZ4); - - protected: - void SendRequest(const std::string& project, - const std::string& httpMethod, - const std::string& url, - const std::string& body, - const std::map& parameterList, - std::map& header, - HttpMessage& httpMessage, - std::string* realIpPtr = NULL); - - std::unique_ptr - CreateAsynPostLogStoreLogsRequest(const std::string& project, - const std::string& logstore, - const std::string& body, - std::map& httpHeader, - const std::string& hashKey, - int64_t hashKeySeqID, - SenderQueueItem* item); - - std::unique_ptr - CreateAsynPostMetricStoreLogsRequest(const std::string& project, - const std::string& logstore, - const std::string& body, - std::map& httpHeader, - SenderQueueItem* item); - - // PingSLSServer sends a trivial data packet to SLS for some inner purposes. - PostLogStoreLogsResponse - PingSLSServer(const std::string& project, const std::string& logstore, std::string* realIpPtr = NULL); - - PostLogStoreLogsResponse SynPostLogStoreLogs(const std::string& project, - const std::string& logstore, - const std::string& body, - std::map& httpHeader, - const std::string& hashKey, - std::string* realIpPtr = NULL); - - PostLogStoreLogsResponse SynPostMetricStoreLogs(const std::string& project, - const std::string& logstore, - const std::string& body, - std::map& httpHeader, - std::string* realIpPtr = NULL); - - void SetCommonHeader(std::map& httpHeader, - int32_t contentLength, - const std::string& project = ""); - - protected: - int32_t mSlsHostUpdateTime; - int32_t mSlsRealIpUpdateTime; - std::string mRawSlsHost; - std::string mSlsHost; - int32_t mTimeout; - std::string mHostFieldSuffix; - bool mIsHostRawIp; - int32_t mPort; - bool mUsingHTTPS; - std::string mAliuid; - - SpinLock mSpinLock; - - CurlClient* mClient; - }; - -} // namespace sdk - -} // namespace logtail diff --git a/core/sdk/Common.cpp b/core/sdk/Common.cpp deleted file mode 100644 index 8f3a1a7890..0000000000 --- a/core/sdk/Common.cpp +++ /dev/null @@ -1,883 +0,0 @@ -// Copyright 2022 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "Common.h" -#include "app_config/AppConfig.h" -#include "common/TimeUtil.h" -#include "common/StringTools.h" -#include "common/ErrorUtil.h" -#include "logger/Logger.h" - -using namespace std; -using namespace logtail::sdk; - -namespace logtail { -namespace sdk { - const char* const LOG_HEADSIGNATURE_PREFIX = "LOG "; - const char* const LOGE_REQUEST_ERROR = "RequestError"; - const char* const LOGE_INVALID_HOST = "InvalidHost"; - const char* const LOGE_UNKNOWN_ERROR = "UnknownError"; - const char* const LOGE_NOT_IMPLEMENTED = "NotImplemented"; - const char* const LOGE_SERVER_BUSY = "ServerBusy"; - const char* const LOGE_INTERNAL_SERVER_ERROR = "InternalServerError"; - const char* const LOGE_RESPONSE_SIG_ERROR = "ResponseSignatureError"; - const char* const LOGE_PARAMETER_INVALID = "ParameterInvalid"; - const char* const LOGE_MISSING_PARAMETER = "MissingParameter"; - const char* const LOGE_INVALID_METHOD = "InvalidMethod"; - const char* const LOGE_BAD_RESPONSE = "BadResponse"; - const char* const LOGE_UNAUTHORIZED = "Unauthorized"; - const char* const LOGE_QUOTA_EXCEED = "ExceedQuota"; - const char* const LOGE_REQUEST_TIMEOUT = "RequestTimeout"; - const char* const LOGE_CLIENT_OPERATION_TIMEOUT = "ClientOpertaionTimeout"; - const char* const LOGE_CLIENT_NETWORK_ERROR = "ClientNetworkError"; - const char* const LOGE_USER_NOT_EXIST = "UserNotExist"; - const char* const LOGE_CATEGORY_NOT_EXIST = "CategoryNotExist"; - const char* const LOGE_TOPIC_NOT_EXIST = "TopicNotExist"; - const char* const LOGE_POST_BODY_INVALID = "PostBodyInvalid"; - const char* const LOGE_INVALID_CONTENTTYPE = "InvalidContentType"; - const char* const LOGE_INVALID_CONTENLENGTH = "InvalidContentLength"; - const char* const LOGE_INVALID_APIVERSION = "InvalidAPIVersion"; - const char* const LOGE_PROJECT_NOT_EXIST = "ProjectNotExist"; - const char* const LOGE_MACHINEGROUP_NOT_EXIST = "MachineGroupNotExist"; - const char* const LOGE_MACHINEGROUP_ALREADY_EXIST = "MachineGroupAlreadyExist"; - const char* const LOGE_CONFIG_NOT_EXIST = "ConfigNotExist"; - const char* const LOGE_CONFIG_ALREADY_EXIST = "ConfigAlreadyExist"; - const char* const LOGE_LOGSTORE_NOT_EXIST = "LogStoreNotExist"; - const char* const LOGE_INVALID_ACCESSKEYID = "InvalidAccessKeyId"; - const char* const LOGE_SIGNATURE_NOT_MATCH = "SignatureNotMatch"; - const char* const LOGE_PROJECT_FORBIDDEN = "ProjectForbidden"; - const char* const LOGE_WRITE_QUOTA_EXCEED = "WriteQuotaExceed"; - const char* const LOGE_READ_QUOTA_EXCEED = "ReadQuotaExceed"; - const char* const LOGE_REQUEST_TIME_EXPIRED = "RequestTimeExpired"; - const char* const LOGE_INVALID_REQUEST_TIME = "InvalidRequestTime"; - const char* const LOGE_POST_BODY_TOO_LARGE = "PostBodyTooLarge"; - const char* const LOGE_INVALID_TIME_RANGE = "InvalidTimeRange"; - const char* const LOGE_INVALID_REVERSE = "InvalidReverse"; - const char* const LOGE_LOGSTORE_WITHOUT_SHARD = "LogStoreWithoutShard"; - const char* const LOGE_SHARD_WRITE_QUOTA_EXCEED = "ShardWriteQuotaExceed"; - const char* const LOGE_SHARD_READ_QUOTA_EXCEED = "ShardReadQuotaExceed"; - const char* const LOGE_INVALID_SEQUENCE_ID = "InvalidSequenceId"; - - const char* const LOGSTORES = "/logstores"; - const char* const METRICSTORES = "/prometheus"; - const char* const SHARDS = "/shards"; - const char* const INDEX = "/index"; - const char* const CONFIGS = "/configs"; - const char* const MACHINES = "/machines"; - const char* const MACHINEGROUPS = "/machinegroups"; - const char* const ACLS = "/acls"; - const char* const CONFIGSERVERAGENT = "/Agent"; - - const char* const HTTP_GET = "GET"; - const char* const HTTP_POST = "POST"; - const char* const HTTP_PUT = "PUT"; - const char* const HTTP_DELETE = "DELETE"; - - const char* const HOST = "Host"; - const char* const DATE = "Date"; - const char* const USER_AGENT = "User-Agent"; - const char* const LOG_OLD_HEADER_PREFIX = "x-sls-"; - const char* const LOG_HEADER_PREFIX = "x-log-"; - const char* const ACS_HEADER_PREFIX = "x-acs-"; - const char* const X_LOG_KEYPROVIDER = "x-log-keyprovider"; - const char* const X_LOG_APIVERSION = "x-log-apiversion"; - const char* const X_LOG_COMPRESSTYPE = "x-log-compresstype"; - const char* const X_LOG_BODYRAWSIZE = "x-log-bodyrawsize"; - const char* const X_LOG_SIGNATUREMETHOD = "x-log-signaturemethod"; - const char* const X_ACS_SECURITY_TOKEN = "x-acs-security-token"; - const char* const X_LOG_CURSOR = "x-log-cursor"; - const char* const X_LOG_REQUEST_ID = "x-log-requestid"; - const char* const X_LOG_MODE = "x-log-mode"; - - const char* const X_LOG_PROGRESS = "x-log-progress"; - const char* const X_LOG_COUNT = "x-log-count"; - const char* const X_LOG_HOSTIP = "x-log-hostip"; - - const char* const HTTP_ACCEPT = "accept"; - const char* const DEFLATE = "deflate"; - const char* const HMAC_SHA1 = "hmac-sha1"; - const char* const CONTENT_TYPE = "Content-Type"; - const char* const CONTENT_LENGTH = "Content-Length"; - const char* const CONTENT_MD5 = "Content-MD5"; - const char* const AUTHORIZATION = "Authorization"; - const char* const SIGNATURE = "Signature"; - const char* const ACCEPT_ENCODING = "Accept-Encoding"; - const char* const ENCONDING_GZIP = "gzip"; - const char* const TYPE_LOG_PROTOBUF = "application/x-protobuf"; - const char* const TYPE_LOG_JSON = "application/json"; - const char* const LOG_MODE_BATCH_GROUP = "batch_group"; - const char* const LOGITEM_TIME_STAMP_LABEL = "__time__"; - const char* const LOGITEM_SOURCE_LABEL = "__source__"; - const char* const LOG_API_VERSION = "0.6.0"; - const char* const LOGTAIL_USER_AGENT = "ali-log-logtail"; - const char* const MD5_SHA1_SALT_KEYPROVIDER = "md5-sha1-salt"; - const char* const LOG_TYPE_CURSOR = "cursor"; - const char* const LOG_TYPE = "type"; - const char* const LOGE_NOT_SUPPORTED_ACCEPT_CONTENT_TYPE = "InvalidAcceptContentType"; - const char* const LOGE_NOT_SUPPORTED_ACCEPT_ENCODING = "InvalidAcceptEncoding"; - const char* const LOGE_SHARD_NOT_EXIST = "ShardNotExist"; - const char* const LOGE_INVALID_CURSOR = "InvalidCursor"; - const char* const LOG_LZ4 = "lz4"; - const char* const LOG_DEFLATE = "deflate"; - const char* const LOG_ZSTD = "zstd"; - const char* const LOG_ERROR_CODE = "errorCode"; - const char* const LOG_ERROR_MESSAGE = "errorMessage"; - - const char* const LOG_SHARD_STATUS_READWRITE = "readwrite"; - const char* const LOG_SHARD_STATUS_READONLY = "readonly"; - - bool caseInsensitiveComp(const char lhs, const char rhs) { - return tolower(lhs) < tolower(rhs); - } - - bool compareHeader(const std::string& lhs, const std::string& rhs) { - return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), rhs.end(), caseInsensitiveComp); - } - - bool HttpMessage::IsLogServiceResponse() const { - if (!AppConfig::GetInstance()->IsResponseVerificationEnabled()) { - return true; - } - - const auto iter = header.find(X_LOG_REQUEST_ID); - if (iter == header.end()) { - return false; - } - return !iter->second.empty(); - } - - static unsigned char ToHex(unsigned char x) { - return x > 9 ? x + 55 : x + 48; - } - - static unsigned char FromHex(unsigned char x) { - unsigned char y = '\0'; - if (x >= 'A' && x <= 'Z') - y = x - 'A' + 10; - else if (x >= 'a' && x <= 'z') - y = x - 'a' + 10; - else if (x >= '0' && x <= '9') - y = x - '0'; - else - assert(0); - return y; - } - - - static std::string HexToString(const uint8_t md5[16]) { - static const char* table = "0123456789ABCDEF"; - std::string ss(32, 'a'); - for (int i = 0; i < 16; ++i) { - ss[i * 2] = table[md5[i] >> 4]; - ss[i * 2 + 1] = table[md5[i] & 0x0F]; - } - return ss; - } - - std::string CalcMD5(const std::string& message) { - uint8_t md5[MD5_BYTES]; - DoMd5((const uint8_t*)message.data(), message.length(), md5); - return HexToString(md5); - } - - std::string CalcSHA1(const std::string& message, const std::string& key) { - HMAC hmac(reinterpret_cast(key.data()), key.size()); - hmac.add(reinterpret_cast(message.data()), message.size()); - return string(reinterpret_cast(hmac.result()), SHA1_DIGEST_BYTES); - } - - - void Base64Encoding(std::istream& is, std::ostream& os, char makeupChar, const char* alphabet) { - int out[4]; - int remain = 0; - while (!is.eof()) { - int byte1 = is.get(); - if (byte1 < 0) { - break; - } - int byte2 = is.get(); - int byte3; - if (byte2 < 0) { - byte2 = 0; - byte3 = 0; - remain = 1; - } else { - byte3 = is.get(); - if (byte3 < 0) { - byte3 = 0; - remain = 2; - } - } - out[0] = static_cast(byte1) >> 2; - out[1] = ((byte1 & 0x03) << 4) + (static_cast(byte2) >> 4); - out[2] = ((byte2 & 0x0F) << 2) + (static_cast(byte3) >> 6); - out[3] = byte3 & 0x3F; - - if (remain == 1) { - os.put(out[0] = alphabet[out[0]]); - os.put(out[1] = alphabet[out[1]]); - os.put(makeupChar); - os.put(makeupChar); - } else if (remain == 2) { - os.put(out[0] = alphabet[out[0]]); - os.put(out[1] = alphabet[out[1]]); - os.put(out[2] = alphabet[out[2]]); - os.put(makeupChar); - } else { - os.put(out[0] = alphabet[out[0]]); - os.put(out[1] = alphabet[out[1]]); - os.put(out[2] = alphabet[out[2]]); - os.put(out[3] = alphabet[out[3]]); - } - } - } - - - std::string Base64Enconde(const std::string& message) { - std::istringstream iss(message); - std::ostringstream oss; - Base64Encoding(iss, oss); - return oss.str(); - } - - - std::string UrlEncode(const std::string& str) { - std::string strTemp; - size_t length = str.length(); - for (size_t i = 0; i < length; i++) { - if (isalnum((unsigned char)str[i]) || (str[i] == '-') || (str[i] == '_') || (str[i] == '.') - || (str[i] == '~')) - strTemp += str[i]; - else if (str[i] == ' ') - strTemp += "+"; - else { - strTemp += '%'; - strTemp += ToHex((unsigned char)str[i] >> 4); - strTemp += ToHex((unsigned char)str[i] % 16); - } - } - return strTemp; - } - - - std::string UrlDecode(const std::string& str) { - std::string strTemp = ""; - size_t length = str.length(); - for (size_t i = 0; i < length; i++) { - if (str[i] == '+') - strTemp += ' '; - else if (str[i] == '%') { - assert(i + 2 < length); - unsigned char high = FromHex((unsigned char)str[++i]); - unsigned char low = FromHex((unsigned char)str[++i]); - strTemp += high * 16 + low; - } else - strTemp += str[i]; - } - return strTemp; - } - - std::string GetDateString(const std::string& dateFormat) { - time_t now_time; - time(&now_time); - if (AppConfig::GetInstance()->EnableLogTimeAutoAdjust()) { - now_time += GetTimeDelta(); - } - char buffer[128] = {'\0'}; - tm timeInfo; -#if defined(__linux__) - gmtime_r(&now_time, &timeInfo); -#elif defined(_MSC_VER) - gmtime_s(&timeInfo, &now_time); -#endif - strftime(buffer, 128, dateFormat.c_str(), &timeInfo); - return string(buffer); - } - - std::string GetDateString() { - return GetDateString(DATE_FORMAT_RFC822); - } - - time_t DecodeDateString(const std::string dateString, const std::string& dateFormat) { - return 0; - // struct tm t; - // memset(&t, 0, sizeof(t)); - // t.tm_sec = -1; - // strptime(dateString.c_str(), dateFormat.c_str(),&t); - // if(t.tm_sec == -1) - // { - // throw LOGException(LOGE_PARAMETER_INVALID, string("Invalid date string:") + dateString + ",format:" + - // dateFormat); - // } - // struct timezone tz; - // struct timeval tv; - // gettimeofday(&tv, &tz); - // return mktime(&t)-tz.tz_minuteswest*60; - } - - bool StartWith(const std::string& input, const std::string& pattern) { - if (input.length() < pattern.length()) { - return false; - } - - size_t i = 0; - while (i < pattern.length() && input[i] == pattern[i]) { - i++; - } - - return i == pattern.length(); - } - - void GetQueryString(const map& parameterList, string& queryString) { - queryString.clear(); - for (map::const_iterator iter = parameterList.begin(); iter != parameterList.end(); ++iter) { - if (iter != parameterList.begin()) { - queryString.append("&"); - } - queryString.append(iter->first); - queryString.append("="); - queryString.append(UrlEncode(iter->second)); - } - } - - string GetUrlSignature(const string& httpMethod, - const string& operationType, - map& httpHeader, - const map& parameterList, - const string& content, - const string& signKey) { - string contentMd5; - string signature; - string osstream; - if (!content.empty()) { - contentMd5 = CalcMD5(content); - } - string contentType; - map::iterator iter = httpHeader.find(CONTENT_TYPE); - if (iter != httpHeader.end()) { - contentType = iter->second; - } - std::map endingMap; - osstream.append(httpMethod); - osstream.append("\n"); - osstream.append(contentMd5); - osstream.append("\n"); - osstream.append(contentType); - osstream.append("\n"); - osstream.append(httpHeader[DATE]); - osstream.append("\n"); - for (map::const_iterator iter = httpHeader.begin(); iter != httpHeader.end(); ++iter) { - if (StartWith(iter->first, LOG_OLD_HEADER_PREFIX)) { - std::string key = iter->first; - endingMap.insert(std::make_pair(key.replace(0, std::strlen(LOG_OLD_HEADER_PREFIX), LOG_HEADER_PREFIX), - iter->second)); - } else if (StartWith(iter->first, LOG_HEADER_PREFIX) || StartWith(iter->first, ACS_HEADER_PREFIX)) { - endingMap.insert(std::make_pair(iter->first, iter->second)); - } - } - for (map::const_iterator it = endingMap.begin(); it != endingMap.end(); ++it) { - osstream.append(it->first); - osstream.append(":"); - osstream.append(it->second); - osstream.append("\n"); - } - osstream.append(operationType); - if (parameterList.size() > 0) { - osstream.append("?"); - for (map::const_iterator iter = parameterList.begin(); iter != parameterList.end(); - ++iter) { - if (iter != parameterList.begin()) { - osstream.append("&"); - } - osstream.append(iter->first); - osstream.append("="); - osstream.append(iter->second); - } - } - - signature = Base64Enconde(CalcSHA1(osstream, signKey)); - - return signature; - } - - -/////////////////////////////////////////////// MACRO ////////////////////////////////////////////////// -#define SHIFT_LEFT(a, b) ((a) << (b) | (a) >> (32 - b)) - -/** - * each operation - */ -#define F(b, c, d) (((b) & (c)) | ((~(b)) & (d))) -#define G(b, c, d) (((d) & (b)) | ((~(d)) & (c))) -#define H(b, c, d) ((b) ^ (c) ^ (d)) -#define I(b, c, d) ((c) ^ ((b) | (~(d)))) - -/** - * each round - */ -#define FF(a, b, c, d, word, shift, k) \ - { \ - (a) += F((b), (c), (d)) + (word) + (k); \ - (a) = SHIFT_LEFT((a), (shift)); \ - (a) += (b); \ - } -#define GG(a, b, c, d, word, shift, k) \ - { \ - (a) += G((b), (c), (d)) + (word) + (k); \ - (a) = SHIFT_LEFT((a), (shift)); \ - (a) += (b); \ - } -#define HH(a, b, c, d, word, shift, k) \ - { \ - (a) += H((b), (c), (d)) + (word) + (k); \ - (a) = SHIFT_LEFT((a), (shift)); \ - (a) += (b); \ - } -#define II(a, b, c, d, word, shift, k) \ - { \ - (a) += I((b), (c), (d)) + (word) + (k); \ - (a) = SHIFT_LEFT((a), (shift)); \ - (a) += (b); \ - } - ////////////////////////////////////////////////////////// GLOBAL VARIABLE - //////////////////////////////////////////////////////////// - const uint8_t gPadding[64] = {0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; - - - //////////////////////////////////////////////////////// LOCAL DECLEARATION - ///////////////////////////////////////////////////////////// - struct Md5Block { - uint32_t word[16]; - }; - /** - * copy a pool into a block, using little endian - */ - void CopyBytesToBlock(const uint8_t* poolIn, struct Md5Block& block) { - uint32_t j = 0; - for (uint32_t i = 0; i < 16; ++i, j += 4) { - block.word[i] = ((uint32_t)poolIn[j]) | (((uint32_t)poolIn[j + 1]) << 8) | (((uint32_t)poolIn[j + 2]) << 16) - | (((uint32_t)poolIn[j + 3]) << 24); - } - } - - /** - * calculate md5 hash value from a block - */ - void CalMd5(struct Md5Block block, uint32_t h[4]) { - uint32_t a = h[0]; - uint32_t b = h[1]; - uint32_t c = h[2]; - uint32_t d = h[3]; - - // Round 1 - FF(a, b, c, d, block.word[0], 7, 0xd76aa478); - FF(d, a, b, c, block.word[1], 12, 0xe8c7b756); - FF(c, d, a, b, block.word[2], 17, 0x242070db); - FF(b, c, d, a, block.word[3], 22, 0xc1bdceee); - FF(a, b, c, d, block.word[4], 7, 0xf57c0faf); - FF(d, a, b, c, block.word[5], 12, 0x4787c62a); - FF(c, d, a, b, block.word[6], 17, 0xa8304613); - FF(b, c, d, a, block.word[7], 22, 0xfd469501); - FF(a, b, c, d, block.word[8], 7, 0x698098d8); - FF(d, a, b, c, block.word[9], 12, 0x8b44f7af); - FF(c, d, a, b, block.word[10], 17, 0xffff5bb1); - FF(b, c, d, a, block.word[11], 22, 0x895cd7be); - FF(a, b, c, d, block.word[12], 7, 0x6b901122); - FF(d, a, b, c, block.word[13], 12, 0xfd987193); - FF(c, d, a, b, block.word[14], 17, 0xa679438e); - FF(b, c, d, a, block.word[15], 22, 0x49b40821); - - // Round 2 - GG(a, b, c, d, block.word[1], 5, 0xf61e2562); - GG(d, a, b, c, block.word[6], 9, 0xc040b340); - GG(c, d, a, b, block.word[11], 14, 0x265e5a51); - GG(b, c, d, a, block.word[0], 20, 0xe9b6c7aa); - GG(a, b, c, d, block.word[5], 5, 0xd62f105d); - GG(d, a, b, c, block.word[10], 9, 0x2441453); - GG(c, d, a, b, block.word[15], 14, 0xd8a1e681); - GG(b, c, d, a, block.word[4], 20, 0xe7d3fbc8); - GG(a, b, c, d, block.word[9], 5, 0x21e1cde6); - GG(d, a, b, c, block.word[14], 9, 0xc33707d6); - GG(c, d, a, b, block.word[3], 14, 0xf4d50d87); - GG(b, c, d, a, block.word[8], 20, 0x455a14ed); - GG(a, b, c, d, block.word[13], 5, 0xa9e3e905); - GG(d, a, b, c, block.word[2], 9, 0xfcefa3f8); - GG(c, d, a, b, block.word[7], 14, 0x676f02d9); - GG(b, c, d, a, block.word[12], 20, 0x8d2a4c8a); - - // Round 3 - HH(a, b, c, d, block.word[5], 4, 0xfffa3942); - HH(d, a, b, c, block.word[8], 11, 0x8771f681); - HH(c, d, a, b, block.word[11], 16, 0x6d9d6122); - HH(b, c, d, a, block.word[14], 23, 0xfde5380c); - HH(a, b, c, d, block.word[1], 4, 0xa4beea44); - HH(d, a, b, c, block.word[4], 11, 0x4bdecfa9); - HH(c, d, a, b, block.word[7], 16, 0xf6bb4b60); - HH(b, c, d, a, block.word[10], 23, 0xbebfbc70); - HH(a, b, c, d, block.word[13], 4, 0x289b7ec6); - HH(d, a, b, c, block.word[0], 11, 0xeaa127fa); - HH(c, d, a, b, block.word[3], 16, 0xd4ef3085); - HH(b, c, d, a, block.word[6], 23, 0x4881d05); - HH(a, b, c, d, block.word[9], 4, 0xd9d4d039); - HH(d, a, b, c, block.word[12], 11, 0xe6db99e5); - HH(c, d, a, b, block.word[15], 16, 0x1fa27cf8); - HH(b, c, d, a, block.word[2], 23, 0xc4ac5665); - - // Round 4 - II(a, b, c, d, block.word[0], 6, 0xf4292244); - II(d, a, b, c, block.word[7], 10, 0x432aff97); - II(c, d, a, b, block.word[14], 15, 0xab9423a7); - II(b, c, d, a, block.word[5], 21, 0xfc93a039); - II(a, b, c, d, block.word[12], 6, 0x655b59c3); - II(d, a, b, c, block.word[3], 10, 0x8f0ccc92); - II(c, d, a, b, block.word[10], 15, 0xffeff47d); - II(b, c, d, a, block.word[1], 21, 0x85845dd1); - II(a, b, c, d, block.word[8], 6, 0x6fa87e4f); - II(d, a, b, c, block.word[15], 10, 0xfe2ce6e0); - II(c, d, a, b, block.word[6], 15, 0xa3014314); - II(b, c, d, a, block.word[13], 21, 0x4e0811a1); - II(a, b, c, d, block.word[4], 6, 0xf7537e82); - II(d, a, b, c, block.word[11], 10, 0xbd3af235); - II(c, d, a, b, block.word[2], 15, 0x2ad7d2bb); - II(b, c, d, a, block.word[9], 21, 0xeb86d391); - - // Add to hash value - h[0] += a; - h[1] += b; - h[2] += c; - h[3] += d; - } - - - void DoMd5Little(const uint8_t* poolIn, const uint64_t inputBytesNum, uint8_t hash[16]) { - struct Md5Block block; - - /// initialize hash value - uint32_t h[4]; - h[0] = 0x67452301; - h[1] = 0xEFCDAB89; - h[2] = 0x98BADCFE; - h[3] = 0x10325476; - - /// padding and divide input data into blocks - uint64_t fullLen = (inputBytesNum >> 6) << 6; /// complete blocked length - uint64_t partLen = inputBytesNum & 0x3F; /// length remained - - uint32_t i; - for (i = 0; i < fullLen; i += 64) { - /// copy input data into block - memcpy(block.word, &(poolIn[i]), 64); - - /// calculate Md5 - CalMd5(block, h); - } - - - if (partLen > 55) /// append two more blocks - { - /// copy input data into block and pad - memcpy(block.word, &(poolIn[i]), partLen); - memcpy(((uint8_t*)&(block.word[partLen >> 2])) + (partLen & 0x3), gPadding, (64 - partLen)); - - /// calculate Md5 - CalMd5(block, h); - - /// set rest byte to 0x0 - memset(block.word, 0x0, 64); - } else /// append one more block - { - /// copy input data into block and pad - memcpy(block.word, &(poolIn[i]), partLen); - memcpy(((uint8_t*)&(block.word[partLen >> 2])) + (partLen & 0x3), gPadding, (64 - partLen)); - } - - /// append length (bits) - uint64_t bitsNum = inputBytesNum * 8; - memcpy(&(block.word[14]), &bitsNum, 8); - - /// calculate Md5 - CalMd5(block, h); - - /// clear sensitive information - memset(block.word, 0, 64); - - /// fill hash value - memcpy(&(hash[0]), &(h[0]), 16); - } /// DoMd5Little - - void DoMd5Big(const uint8_t* poolIn, const uint64_t inputBytesNum, uint8_t hash[16]) { - struct Md5Block block; - uint8_t tempBlock[64]; - - /// initialize hash value - uint32_t h[4]; - h[0] = 0x67452301; - h[1] = 0xEFCDAB89; - h[2] = 0x98BADCFE; - h[3] = 0x10325476; - - /// padding and divide input data into blocks - uint64_t fullLen = (inputBytesNum >> 6) << 6; - uint64_t partLen = inputBytesNum & 0x3F; - - uint32_t i; - for (i = 0; i < fullLen; i += 64) { - /// copy input data into block, in little endian - CopyBytesToBlock(&(poolIn[i]), block); - - /// calculate Md5 - CalMd5(block, h); - } - - /// append two more blocks - if (partLen > 55) { - /// put input data into a temporary block - memcpy(tempBlock, &(poolIn[i]), partLen); - memcpy(&(tempBlock[partLen]), gPadding, (64 - partLen)); - - /// copy temporary data into block, in little endian - CopyBytesToBlock(tempBlock, block); - - /// calculate Md5 - CalMd5(block, h); - - memset(tempBlock, 0x0, 64); - } - /// append one more block - else { - memcpy(tempBlock, &(poolIn[i]), partLen); - memcpy(&(tempBlock[partLen]), gPadding, (64 - partLen)); - } - /// append length (bits) - uint64_t bitsNum = inputBytesNum * 8; - memcpy(&(tempBlock[56]), &bitsNum, 8); - - /// copy temporary data into block, in little endian - CopyBytesToBlock(tempBlock, block); - - /// calculate Md5 - CalMd5(block, h); - - /// clear sensitive information - memset(block.word, 0, 64); - memset(tempBlock, 0, 64); - - /// fill hash value - memcpy(&(hash[0]), &(h[0]), 16); - } /// DoMd5Big - - void DoMd5(const uint8_t* poolIn, const uint64_t inputBytesNum, uint8_t md5[16]) { - /// detect big or little endian - union { - uint32_t a; - uint8_t b; - } symbol; - - symbol.a = 1; - - /// for little endian - if (symbol.b == 1) { - DoMd5Little(poolIn, inputBytesNum, md5); - } - /// for big endian - else { - DoMd5Big(poolIn, inputBytesNum, md5); - } - } /// DoMd5 - -/* - * define the rotate left (circular left shift) operation - */ -#define rotl(v, b) (((v) << (b)) | ((v) >> (32 - (b)))) - -/* - * Define the basic SHA-1 functions F1 ~ F4. Note that the exclusive-OR - * operation (^) in F1 and F3 may be replaced by a bitwise OR operation - * (|), which produce identical results. - * - * F1 is used in ROUND 0~19, F2 is used in ROUND 20~39 - * F3 is used in ROUND 40~59, F4 is used in ROUND 60~79 - */ -#define F1(B, C, D) (((B) & (C)) ^ (~(B) & (D))) -#define F2(B, C, D) ((B) ^ (C) ^ (D)) -#define F3(B, C, D) (((B) & (C)) ^ ((B) & (D)) ^ ((C) & (D))) -#define F4(B, C, D) ((B) ^ (C) ^ (D)) - -/* - * Use different K in different ROUND - */ -#define K00_19 0x5A827999 -#define K20_39 0x6ED9EBA1 -#define K40_59 0x8F1BBCDC -#define K60_79 0xCA62C1D6 - -/* - * Another implementation of the ROUND transformation: - * (here the T is a temp variable) - * For t=0 to 79: - * { - * T=rotl(A,5)+Func(B,C,D)+K+W[t]+E; - * E=D; D=C; C=rotl(B,30); B=A; A=T; - * } - */ -#define ROUND(t, A, B, C, D, E, Func, K) \ - E += rotl(A, 5) + Func(B, C, D) + W[t] + K; \ - B = rotl(B, 30); - -#define ROUND5(t, Func, K) \ - ROUND(t, A, B, C, D, E, Func, K); \ - ROUND(t + 1, E, A, B, C, D, Func, K); \ - ROUND(t + 2, D, E, A, B, C, Func, K); \ - ROUND(t + 3, C, D, E, A, B, Func, K); \ - ROUND(t + 4, B, C, D, E, A, Func, K) - -#define ROUND20(t, Func, K) \ - ROUND5(t, Func, K); \ - ROUND5(t + 5, Func, K); \ - ROUND5(t + 10, Func, K); \ - ROUND5(t + 15, Func, K) - - /* - * Define constant of the initial vector - */ - const uint32_t SHA1::IV[SHA1_DIGEST_WORDS] = {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0}; - - /* - * the message must be the big-endian32 (or left-most word) - * before calling the transform() function. - */ - const static uint32_t iii = 1; - const static bool littleEndian = *(uint8_t*)&iii != 0; - - inline uint32_t littleEndianToBig(uint32_t d) { - uint8_t* data = (uint8_t*)&d; - return data[0] << 24 | data[1] << 16 | data[2] << 8 | data[3]; - } - - inline void make_big_endian32(uint32_t* data, unsigned n) { - if (!littleEndian) { - return; - } - for (; n > 0; ++data, --n) { - *data = littleEndianToBig(*data); - } - } - - inline size_t min(size_t a, size_t b) { - return a < b ? a : b; - } - - void SHA1::transform() { - uint32_t W[80]; - memcpy(W, M, SHA1_INPUT_BYTES); - memset((uint8_t*)W + SHA1_INPUT_BYTES, 0, sizeof(W) - SHA1_INPUT_BYTES); - for (unsigned t = 16; t < 80; t++) { - W[t] = rotl(W[t - 16] ^ W[t - 14] ^ W[t - 8] ^ W[t - 3], 1); - } - - uint32_t A = H[0]; - uint32_t B = H[1]; - uint32_t C = H[2]; - uint32_t D = H[3]; - uint32_t E = H[4]; - - ROUND20(0, F1, K00_19); - ROUND20(20, F2, K20_39); - ROUND20(40, F3, K40_59); - ROUND20(60, F4, K60_79); - - H[0] += A; - H[1] += B; - H[2] += C; - H[3] += D; - H[4] += E; - } - - void SHA1::add(const uint8_t* data, size_t data_len) { - unsigned mlen = (unsigned)((bits >> 3) % SHA1_INPUT_BYTES); - bits += (uint64_t)data_len << 3; - unsigned use = (unsigned)min((size_t)(SHA1_INPUT_BYTES - mlen), data_len); - memcpy(M + mlen, data, use); - mlen += use; - - while (mlen == SHA1_INPUT_BYTES) { - data_len -= use; - data += use; - make_big_endian32((uint32_t*)M, SHA1_INPUT_WORDS); - transform(); - use = (unsigned)min((size_t)SHA1_INPUT_BYTES, data_len); - memcpy(M, data, use); - mlen = use; - } - } - - uint8_t* SHA1::result() { - unsigned mlen = (unsigned)((bits >> 3) % SHA1_INPUT_BYTES), padding = SHA1_INPUT_BYTES - mlen; - M[mlen++] = 0x80; - if (padding > BIT_COUNT_BYTES) { - memset(M + mlen, 0x00, padding - BIT_COUNT_BYTES); - make_big_endian32((uint32_t*)M, SHA1_INPUT_WORDS - BIT_COUNT_WORDS); - } else { - memset(M + mlen, 0x00, SHA1_INPUT_BYTES - mlen); - make_big_endian32((uint32_t*)M, SHA1_INPUT_WORDS); - transform(); - memset(M, 0x00, SHA1_INPUT_BYTES - BIT_COUNT_BYTES); - } - - uint64_t temp = littleEndian ? bits << 32 | bits >> 32 : bits; - memcpy(M + SHA1_INPUT_BYTES - BIT_COUNT_BYTES, &temp, BIT_COUNT_BYTES); - transform(); - make_big_endian32(H, SHA1_DIGEST_WORDS); - return (uint8_t*)H; - } - - template - inline void axor(T* p1, const T* p2, size_t len) { - for (; len != 0; --len) - *p1++ ^= *p2++; - } - - HMAC::HMAC(const uint8_t* key, size_t lkey) { - init(key, lkey); - } - - void HMAC::init(const uint8_t* key, size_t lkey) { - in.init(); - out.init(); - - uint8_t ipad[SHA1_INPUT_BYTES]; - uint8_t opad[SHA1_INPUT_BYTES]; - memset(ipad, 0x36, sizeof(ipad)); - memset(opad, 0x5c, sizeof(opad)); - - if (lkey <= SHA1_INPUT_BYTES) { - axor(ipad, key, lkey); - axor(opad, key, lkey); - } else { - SHA1 tmp; - tmp.add(key, lkey); - const uint8_t* key2 = tmp.result(); - axor(ipad, key2, SHA1_DIGEST_BYTES); - axor(opad, key2, SHA1_DIGEST_BYTES); - } - - in.add((uint8_t*)ipad, sizeof(ipad)); - out.add((uint8_t*)opad, sizeof(opad)); - } - -} // namespace sdk -} // namespace logtail diff --git a/core/sdk/Common.h b/core/sdk/Common.h deleted file mode 100644 index e37d2efa61..0000000000 --- a/core/sdk/Common.h +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Copyright 2022 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace logtail { -namespace sdk { - - const int64_t kInvalidHashKeySeqID = 0; - const int64_t kFirstHashKeySeqID = 1; - - const uint32_t LOG_REQUEST_TIMEOUT = 20; - const uint32_t LOG_REQUEST_TRY_TIMES = 1; - - const uint32_t MD5_BYTES = 16; -#define DATE_FORMAT_RFC822 "%a, %d %b %Y %H:%M:%S GMT" ///< RFC822 date formate, GMT time. - - extern const char* const LOG_HEADSIGNATURE_PREFIX; ///< ""; - - extern const char* const LOGE_REQUEST_ERROR; //= "RequestError"; - extern const char* const LOGE_INVALID_HOST; //= "InvalidHost" - extern const char* const LOGE_UNKNOWN_ERROR; //= "UnknownError"; - extern const char* const LOGE_NOT_IMPLEMENTED; //= "NotImplemented"; - extern const char* const LOGE_SERVER_BUSY; //= "ServerBusy"; - extern const char* const LOGE_INTERNAL_SERVER_ERROR; //= "InternalServerError"; - extern const char* const LOGE_RESPONSE_SIG_ERROR; //= "ResponseSignatureError"; - extern const char* const LOGE_PARAMETER_INVALID; //= "ParameterInvalid"; - extern const char* const LOGE_MISSING_PARAMETER; //= "MissingParameter"; - extern const char* const LOGE_INVALID_METHOD; //= "InvalidMethod"; - extern const char* const LOGE_INVALID_CONTENTTYPE; //= "InvalidContentType"; - extern const char* const LOGE_INVALID_CONTENTLENGTH; //= "InvalidContentLength"; - extern const char* const LOGE_BAD_RESPONSE; //= "BadResponse"; - extern const char* const LOGE_UNAUTHORIZED; //= "Unauthorized"; - extern const char* const LOGE_QUOTA_EXCEED; //= "ExceedQuota"; - extern const char* const LOGE_REQUEST_TIMEOUT; //= "RequestTimeout"; - extern const char* const LOGE_CLIENT_OPERATION_TIMEOUT; //= "ClientOpertaionTimeout"; - extern const char* const LOGE_CLIENT_NETWORK_ERROR; //= "ClientNetworkError"; - extern const char* const LOGE_USER_NOT_EXIST; //= "UserNotExist"; - extern const char* const LOGE_CATEGORY_NOT_EXIST; //= "CategoryNotExist"; - extern const char* const LOGE_TOPIC_NOT_EXIST; //= "TopicNotExist"; - extern const char* const LOGE_POST_BODY_INVALID; //= "PostBodyInvalid"; - extern const char* const LOGE_INVALID_HOST; //= "InvalidHost"; - extern const char* const LOGE_INVALID_APIVERSION; //="InvalidAPIVersion"; - extern const char* const LOGE_PROJECT_NOT_EXIST; //="ProjectNotExist"; - extern const char* const LOGE_MACHINEGROUP_NOT_EXIST; //="MachineGroupNotExist"; - extern const char* const LOGE_MACHINEGROUP_ALREADY_EXIST; //="MachineGroupAlreadyExist"; - extern const char* const LOGE_CONFIG_NOT_EXIST; //="ConfigNotExist"; - extern const char* const LOGE_CONFIG_ALREADY_EXIST; //="ConfigAlreadyExist"; - extern const char* const LOGE_LOGSTORE_NOT_EXIST; //="LogStoreNotExist"; - extern const char* const LOGE_INVALID_ACCESSKEYID; //="InvalidAccessKeyId"; - extern const char* const LOGE_SIGNATURE_NOT_MATCH; //="SignatureNotMatch"; - extern const char* const LOGE_PROJECT_FORBIDDEN; //="ProjectForbidden"; - extern const char* const LOGE_WRITE_QUOTA_EXCEED; //="WriteQuotaExceed"; - extern const char* const LOGE_READ_QUOTA_EXCEED; //="ReadQuotaExceed"; - extern const char* const LOGE_REQUEST_TIME_EXPIRED; //="RequestTimeExpired"; - extern const char* const LOGE_INVALID_REQUEST_TIME; //="InvalidRequestTime"; - extern const char* const LOGE_POST_BODY_TOO_LARGE; //="PostBodyTooLarge"; - extern const char* const LOGE_INVALID_TIME_RANGE; //="InvalidTimeRange"; - extern const char* const LOGE_INVALID_REVERSE; //="InvalidReverse"; - extern const char* const LOGE_LOGSTORE_WITHOUT_SHARD; //="LogStoreWithoutShard"; - extern const char* const LOGE_INVALID_SEQUENCE_ID; //="InvalidSequenceId"; - - extern const char* const LOGSTORES; //= "/logstores" - extern const char* const METRICSTORES; //= "/prometheus" - extern const char* const SHARDS; //= "/shards" - extern const char* const INDEX; //= "/index" - extern const char* const CONFIGS; //= "/configs" - extern const char* const MACHINES; //= "/machines" - extern const char* const MACHINEGROUPS; //= "/machinegroups" - extern const char* const ACLS; //= "/acls" - extern const char* const CONFIGSERVERAGENT; //= "/Agent" - - extern const char* const HTTP_GET; //= "GET"; - extern const char* const HTTP_POST; //= "POST"; - extern const char* const HTTP_PUT; //= "PUT"; - extern const char* const HTTP_DELETE; //= "DELETE"; - - extern const char* const HOST; //= "Host"; - extern const char* const DATE; //= "Date"; - extern const char* const USER_AGENT; //= "User-Agent"; - extern const char* const LOG_HEADER_PREFIX; //= "x-log-"; - extern const char* const LOG_OLD_HEADER_PREFIX; //= "x-sls-"; - extern const char* const X_LOG_KEYPROVIDER; // = "x-log-keyprovider"; - extern const char* const X_LOG_APIVERSION; // = "x-log-apiversion"; - extern const char* const X_LOG_COMPRESSTYPE; // = "x-log-compresstype"; - extern const char* const X_LOG_BODYRAWSIZE; // = "x-log-bodyrawsize"; - extern const char* const X_LOG_SIGNATUREMETHOD; // = "x-log-signaturemethod"; - extern const char* const X_ACS_SECURITY_TOKEN; // = "x-acs-security-token"; - extern const char* const X_LOG_CURSOR; // = "cursor"; - extern const char* const X_LOG_REQUEST_ID; // = "x-log-requestid"; - extern const char* const X_LOG_MODE; // = "x-log-mode"; - - extern const char* const X_LOG_PROGRESS; // = "x-log-progress"; - extern const char* const X_LOG_COUNT; // = "x-log-count"; - extern const char* const X_LOG_HOSTIP; // = "x-log-hostip" - - extern const char* const HTTP_ACCEPT; // = "accept"; - extern const char* const DEFLATE; //= "deflate"; - extern const char* const HMAC_SHA1; //= "hmac-sha1"; - extern const char* const CONTENT_LENGTH; //= "Content-Length"; - extern const char* const CONTENT_TYPE; //= "Content-Type"; - extern const char* const CONTENT_MD5; //= "Content-MD5"; - extern const char* const AUTHORIZATION; //= "Authorization"; - extern const char* const SIGNATURE; //= "Signature"; - extern const char* const ACCEPT_ENCODING; // = "Accept-Encoding"; - extern const char* const ENCONDING_GZIP; // = "gzip"; - extern const char* const TYPE_LOG_PROTOBUF; //="application/x-protobuf"; - extern const char* const TYPE_LOG_JSON; //="application/json"; - extern const char* const LOG_MODE_BATCH_GROUP; //="batch_group"; - extern const char* const LOGITEM_TIME_STAMP_LABEL; //="__time__"; - extern const char* const LOGITEM_SOURCE_LABEL; //="__source__"; - extern const char* const LOG_API_VERSION; // = "0.6.0"; - extern const char* const LOGTAIL_USER_AGENT; // = "ali-sls-logtail"; - extern const char* const MD5_SHA1_SALT_KEYPROVIDER; // = "md5-shal-salt"; - extern const char* const LOG_TYPE_CURSOR; // = "cursor"; - extern const char* const LOG_TYPE; // = "type"; - extern const char* const LOGE_NOT_SUPPORTED_ACCEPT_CONTENT_TYPE; - extern const char* const LOGE_NOT_SUPPORTED_ACCEPT_ENCODING; - extern const char* const LOGE_SHARD_NOT_EXIST; - extern const char* const LOGE_INVALID_CURSOR; - extern const char* const LOGE_SHARD_WRITE_QUOTA_EXCEED; - extern const char* const LOGE_SHARD_READ_QUOTA_EXCEED; - extern const char* const LOG_LZ4; //= "lz4"; - extern const char* const LOG_DEFLATE; //= "deflate"; - extern const char* const LOG_ZSTD; //= "zstd"; - - extern const char* const LOG_ERROR_CODE; //= "errorCode"; - extern const char* const LOG_ERROR_MESSAGE; //= "errorMessage"; - - extern const char* const LOG_SHARD_STATUS_READWRITE; // "readwrite"; - extern const char* const LOG_SHARD_STATUS_READONLY; // "readonly"; - - bool caseInsensitiveComp(const char lhs, const char rhs); - - bool compareHeader(const std::string& lhs, const std::string& rhs); - - /** - * HTTP message structure includes three parts: http status code, http header, and http content. - */ - struct HttpMessage { - int32_t statusCode = 0; ///< Http status code - std::map - header; ///< Only contains the header lines which have key:value pair - std::string content; ///< Http content - /** Constructor with no parameter. - * @param void None. - * @return The objcect pointer. - */ - HttpMessage() : header(compareHeader) {} - /** Constructor with header and content. - * @param para_header A map structure which contains the key:value pair of http header lines. - Those header lines which do not contains key:value pair are not included. - * @param para_content A string which contains the content of http request. - * @return The objcect pointer. - */ - HttpMessage(const std::map& para_header, const std::string& para_content) - : header(para_header.begin(), para_header.end(), compareHeader), content(para_content) {} - /** Constructor with status code, header and content. - * @param para_statusCode Http status code. - * @param para_header A map structure which contains the key:value pair of http header lines. - Those header lines which do not contains key:value pair are not included. - * @param para_content A string which contains the http content of http content. - * @return The objcect pointer. - */ - HttpMessage(const int32_t para_statusCode, - const std::map& para_header, - const std::string& para_content) - : statusCode(para_statusCode), - header(para_header.begin(), para_header.end(), compareHeader), - content(para_content) {} - - bool IsLogServiceResponse() const; - }; - - /* - * Responses - */ - struct Response { - int32_t statusCode = 0; ///< Http status code - std::string requestId; - - virtual ~Response() {} - }; - - struct PostLogStoreLogsResponse : public Response { - int32_t bodyBytes; - }; - - struct GetRealIpResponse : public Response { - std::string realIp; - }; - -#define SHA1_INPUT_WORDS 16 -#define SHA1_DIGEST_WORDS 5 -#define SHA1_INPUT_BYTES (SHA1_INPUT_WORDS * sizeof(uint32_t)) -#define SHA1_DIGEST_BYTES (SHA1_DIGEST_WORDS * sizeof(uint32_t)) - -#define BIT_COUNT_WORDS 2 -#define BIT_COUNT_BYTES (BIT_COUNT_WORDS * sizeof(uint32_t)) - - /** Calculate Md5 for a byte stream, - * result is stored in md5[16] - * - * @param poolIn Input data - * @param inputBytesNum Length of input data - * @param md5[16] A 128-bit pool for storing md5 - */ - void DoMd5(const uint8_t* poolIn, const uint64_t inputBytesNum, uint8_t md5[16]); - void Base64Encoding(std::istream&, - std::ostream&, - char makeupChar = '=', - const char* alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"); - - std::string CalcMD5(const std::string& message); - std::string CalcSHA1(const std::string& message, const std::string& key); - std::string Base64Enconde(const std::string& message); - - std::string UrlEncode(const std::string& str); - std::string UrlDecode(const std::string& str); - - std::string GetDateString(const std::string& dateFormat); - std::string GetDateString(); - time_t DecodeDateString(const std::string dateString, const std::string& dateFormat = DATE_FORMAT_RFC822); - - bool StartWith(const std::string& input, const std::string& pattern); - - void GetQueryString(const std::map& parameterList, std::string& queryString); - - std::string GetUrlSignature(const std::string& httpMethod, - const std::string& operationType, - std::map& httpHeader, - const std::map& parameterList, - const std::string& content, - const std::string& signKey); - - class SHA1 { - public: - SHA1() : bits(0) { memcpy(H, IV, sizeof(H)); } - SHA1(const SHA1& s) { - bits = s.bits; - memcpy(H, s.H, sizeof(H)); - memcpy(M, s.M, sizeof(M)); - } - void init() { - bits = 0; - memcpy(H, IV, sizeof(H)); - } - void add(const uint8_t* data, size_t len); - uint8_t* result(); - - private: - uint64_t bits; - uint32_t H[SHA1_DIGEST_WORDS]; - uint8_t M[SHA1_INPUT_BYTES]; - - static const uint32_t IV[SHA1_DIGEST_WORDS]; - void transform(); - }; - - class HMAC { - public: - HMAC(const uint8_t* key, size_t lkey); - HMAC(const HMAC& hm) : in(hm.in), out(hm.out) {} - - void init(const uint8_t* key, size_t lkey); - - void add(const uint8_t* data, size_t len) { in.add(data, len); } - - uint8_t* result() { - out.add(in.result(), SHA1_DIGEST_BYTES); - return out.result(); - } - - private: - SHA1 in, out; - }; - - class SpinLock { - std::atomic_flag locked = ATOMIC_FLAG_INIT; - - SpinLock(const SpinLock&) = delete; - SpinLock& operator=(const SpinLock&) = delete; - - public: - SpinLock() {} - - void lock() { - while (locked.test_and_set(std::memory_order_acquire)) { - ; - } - } - - void unlock() { locked.clear(std::memory_order_release); } - }; - - using ScopedSpinLock = std::lock_guard; - -} // namespace sdk -} // namespace logtail diff --git a/core/sdk/CurlImp.cpp b/core/sdk/CurlImp.cpp deleted file mode 100644 index 9cea4bb9d6..0000000000 --- a/core/sdk/CurlImp.cpp +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2022 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "CurlImp.h" - -#include - -#include "DNSCache.h" -#include "Exception.h" -#include "app_config/AppConfig.h" -#include "common/http/Curl.h" - -using namespace std; - -namespace logtail { -namespace sdk { - - static CURLcode globalInitCode = curl_global_init(CURL_GLOBAL_ALL); - - // callback function to store the response - static size_t data_write_callback(char* buffer, size_t size, size_t nmemb, string* write_buf) { - unsigned long sizes = size * nmemb; - - if (buffer == NULL) { - return 0; - } - - write_buf->append(buffer, sizes); - return sizes; - } - - static size_t header_write_callback(char* buffer, - size_t size, - size_t nmemb, - map* write_buf) { - unsigned long sizes = size * nmemb; - - if (buffer == NULL) { - return 0; - } - unsigned long colonIndex; - for (colonIndex = 1; colonIndex < sizes - 2; colonIndex++) { - if (buffer[colonIndex] == ':') - break; - } - if (colonIndex < sizes - 2) { - unsigned long leftSpaceNum, rightSpaceNum; - for (leftSpaceNum = 0; leftSpaceNum < colonIndex - 1; leftSpaceNum++) { - if (buffer[colonIndex - leftSpaceNum - 1] != ' ') - break; - } - for (rightSpaceNum = 0; rightSpaceNum < sizes - colonIndex - 1 - 2; rightSpaceNum++) { - if (buffer[colonIndex + rightSpaceNum + 1] != ' ') - break; - } - (*write_buf)[string(buffer, 0, colonIndex - leftSpaceNum)] - = string(buffer, colonIndex + 1 + rightSpaceNum, sizes - colonIndex - 1 - 2 - rightSpaceNum); - } - return sizes; - } - - CURL* PackCurlRequest(const std::string& httpMethod, - const std::string& host, - const int32_t port, - const std::string& url, - const std::string& queryString, - const std::map& header, - const std::string& body, - const int32_t timeout, - HttpMessage& httpMessage, - const std::string& intf, - const bool httpsFlag, - curl_slist*& headers) { - static DnsCache* dnsCache = DnsCache::GetInstance(); - - CURL* curl = curl_easy_init(); - if (curl == NULL) - return NULL; - - string totalUrl = httpsFlag ? "https://" : "http://"; - std::string hostIP; - if (AppConfig::GetInstance()->IsHostIPReplacePolicyEnabled() && dnsCache->GetIPFromDnsCache(host, hostIP)) { - totalUrl.append(hostIP); - } else { - totalUrl.append(host); - } - totalUrl.append(url); - if (!queryString.empty()) { - totalUrl.append("?").append(queryString); - } - curl_easy_setopt(curl, CURLOPT_URL, totalUrl.c_str()); - for (const auto& iter : header) { - headers = curl_slist_append(headers, (iter.first + ":" + iter.second).c_str()); - } - if (headers != NULL) { - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); - } - curl_easy_setopt(curl, CURLOPT_PORT, port); - curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, httpMethod.c_str()); - if (!body.empty()) { - curl_easy_setopt(curl, CURLOPT_POSTFIELDS, (void*)body.c_str()); - curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, body.size()); - } - - curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); - curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 1); - curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1); - curl_easy_setopt(curl, CURLOPT_NETRC, CURL_NETRC_IGNORED); - - if (httpsFlag) { - curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L); - curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0L); - } - curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); - if (!intf.empty()) { - curl_easy_setopt(curl, CURLOPT_INTERFACE, intf.c_str()); - } - // curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, &(httpMessage.content)); - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, data_write_callback); - curl_easy_setopt(curl, CURLOPT_HEADERDATA, &(httpMessage.header)); - curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, header_write_callback); - return curl; - } - - void CurlClient::Send(const std::string& httpMethod, - const std::string& host, - const int32_t port, - const std::string& url, - const std::string& queryString, - const std::map& header, - const std::string& body, - const int32_t timeout, - HttpMessage& httpMessage, - const std::string& intf, - const bool httpsFlag) { - curl_slist* headers = NULL; - CURL* curl = PackCurlRequest( - httpMethod, host, port, url, queryString, header, body, timeout, httpMessage, intf, httpsFlag, headers); - if (curl == NULL) { - throw LOGException(LOGE_UNKNOWN_ERROR, "Init curl instance error."); - } - - CURLcode res = curl_easy_perform(curl); - if (headers != NULL) { - curl_slist_free_all(headers); - } - - switch (res) { - case CURLE_OK: - break; - case CURLE_OPERATION_TIMEDOUT: - curl_easy_cleanup(curl); - throw LOGException(LOGE_CLIENT_OPERATION_TIMEOUT, "Request operation timeout."); - break; - case CURLE_COULDNT_CONNECT: - curl_easy_cleanup(curl); - throw LOGException(LOGE_REQUEST_TIMEOUT, "Can not connect to server."); - break; - default: - curl_easy_cleanup(curl); - throw LOGException(LOGE_REQUEST_ERROR, - string("Request operation failed, curl error code : ") + curl_easy_strerror(res)); - break; - } - - long http_code = 0; - if ((res = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code)) != CURLE_OK) { - curl_easy_cleanup(curl); - throw LOGException(LOGE_UNKNOWN_ERROR, - string("Get curl response code error, curl error code : ") + curl_easy_strerror(res)); - } - httpMessage.statusCode = (int32_t)http_code; - curl_easy_cleanup(curl); - if (!httpMessage.IsLogServiceResponse()) { - throw LOGException(LOGE_REQUEST_ERROR, "Get invalid response"); - } - } - -} // namespace sdk -} // namespace logtail diff --git a/core/sdk/CurlImp.h b/core/sdk/CurlImp.h deleted file mode 100644 index cf020e6669..0000000000 --- a/core/sdk/CurlImp.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2022 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include - -#include "Common.h" - -namespace logtail { -namespace sdk { - - class CurlClient { - public: - void Send(const std::string& httpMethod, - const std::string& host, - const int32_t port, - const std::string& url, - const std::string& queryString, - const std::map& header, - const std::string& body, - const int32_t timeout, - HttpMessage& httpMessage, - const std::string& intf, - const bool httpsFlag); - }; - -} // namespace sdk -} // namespace logtail diff --git a/core/sdk/Result.cpp b/core/sdk/Result.cpp deleted file mode 100644 index 4caf9fc4c5..0000000000 --- a/core/sdk/Result.cpp +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2022 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "Result.h" -#include "Exception.h" - -namespace logtail { - -namespace sdk { - - using namespace std; - - /************************ common method ***********************/ - /************************ json method *************************/ - void ExtractJsonResult(const string& response, rapidjson::Document& document) { - document.Parse(response.c_str()); - if (document.HasParseError()) { - throw JsonException("ParseException", "Fail to parse from json string"); - } - } - - void JsonMemberCheck(const rapidjson::Value& value, const char* name) { - if (!value.IsObject()) { - throw JsonException("InvalidObjectException", "response is not valid JSON object"); - } - if (!value.HasMember(name)) { - throw JsonException("NoMemberException", string("Member ") + name + " does not exist"); - } - } - - void ExtractJsonResult(const rapidjson::Value& value, const char* name, int32_t& number) { - JsonMemberCheck(value, name); - if (value[name].IsInt()) { - number = value[name].GetInt(); - } else { - throw JsonException("ValueTypeException", string("Member ") + name + " is not int type"); - } - } - - void ExtractJsonResult(const rapidjson::Value& value, const char* name, uint32_t& number) { - JsonMemberCheck(value, name); - if (value[name].IsUint()) { - number = value[name].GetUint(); - } else { - throw JsonException("ValueTypeException", string("Member ") + name + " is not uint type"); - } - } - - void ExtractJsonResult(const rapidjson::Value& value, const char* name, int64_t& number) { - JsonMemberCheck(value, name); - if (value[name].IsInt64()) { - number = value[name].GetInt64(); - } else { - throw JsonException("ValueTypeException", string("Member ") + name + " is not int type"); - } - } - - void ExtractJsonResult(const rapidjson::Value& value, const char* name, uint64_t& number) { - JsonMemberCheck(value, name); - if (value[name].IsUint64()) { - number = value[name].GetUint64(); - } else { - throw JsonException("ValueTypeException", string("Member ") + name + " is not uint type"); - } - } - - void ExtractJsonResult(const rapidjson::Value& value, const char* name, bool& boolean) { - JsonMemberCheck(value, name); - if (value[name].IsBool()) { - boolean = value[name].GetBool(); - } else { - throw JsonException("ValueTypeException", string("Member ") + name + " is not boolean type"); - } - } - - void ExtractJsonResult(const rapidjson::Value& value, const char* name, string& dst) { - JsonMemberCheck(value, name); - if (value[name].IsString()) { - dst = value[name].GetString(); - } else { - throw JsonException("ValueTypeException", string("Member ") + name + " is not string type"); - } - } - - const rapidjson::Value& GetJsonValue(const rapidjson::Value& value, const char* name) { - JsonMemberCheck(value, name); - if (value[name].IsObject() || value[name].IsArray()) { - return value[name]; - } else { - throw JsonException("ValueTypeException", string("Member ") + name + " is not json value type"); - } - } - - - void ErrorCheck(const string& response, const string& requestId, const int32_t httpCode) { - rapidjson::Document document; - try { - ExtractJsonResult(response, document); - - string errorCode; - ExtractJsonResult(document, LOG_ERROR_CODE, errorCode); - - string errorMessage; - ExtractJsonResult(document, LOG_ERROR_MESSAGE, errorMessage); - - throw LOGException(errorCode, errorMessage, requestId, httpCode); - } catch (JsonException& e) { - if (httpCode >= 500) { - throw LOGException(LOGE_INTERNAL_SERVER_ERROR, response, requestId, httpCode); - } else { - throw LOGException(LOGE_BAD_RESPONSE, string("Unextractable error:") + response, requestId, httpCode); - } - } - } - -} // namespace sdk -} // namespace logtail diff --git a/core/sdk/Result.h b/core/sdk/Result.h deleted file mode 100644 index d99b2fc135..0000000000 --- a/core/sdk/Result.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2022 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include "Common.h" -#include "rapidjson/document.h" - -namespace logtail { -namespace sdk { - - void ExtractJsonResult(const std::string& response, rapidjson::Document& document); - - void JsonMemberCheck(const rapidjson::Value& value, const char* name); - - void ExtractJsonResult(const rapidjson::Value& value, const char* name, int32_t& number); - - void ExtractJsonResult(const rapidjson::Value& value, const char* name, uint32_t& number); - - void ExtractJsonResult(const rapidjson::Value& value, const char* name, int64_t& number); - - void ExtractJsonResult(const rapidjson::Value& value, const char* name, uint64_t& number); - - void ExtractJsonResult(const rapidjson::Value& value, const char* name, bool& boolean); - - void ExtractJsonResult(const rapidjson::Value& value, const char* name, std::string& dst); - - const rapidjson::Value& GetJsonValue(const rapidjson::Value& value, const char* name); - - void ErrorCheck(const std::string& response, const std::string& requestId, const int32_t httpCode); - -} // namespace sdk -} // namespace logtail diff --git a/core/unittest/CMakeLists.txt b/core/unittest/CMakeLists.txt index 0c7f5b504a..6e51752723 100644 --- a/core/unittest/CMakeLists.txt +++ b/core/unittest/CMakeLists.txt @@ -51,7 +51,6 @@ macro(add_core_subdir) add_subdirectory(provider) add_subdirectory(queue) add_subdirectory(reader) - add_subdirectory(sdk) add_subdirectory(sender) add_subdirectory(serializer) add_subdirectory(prometheus) diff --git a/core/unittest/common/CMakeLists.txt b/core/unittest/common/CMakeLists.txt index 6ddd095c73..80de5c490b 100644 --- a/core/unittest/common/CMakeLists.txt +++ b/core/unittest/common/CMakeLists.txt @@ -54,9 +54,6 @@ target_link_libraries(timer_unittest ${UT_BASE_TARGET}) add_executable(curl_unittest http/CurlUnittest.cpp) target_link_libraries(curl_unittest ${UT_BASE_TARGET}) -add_executable(http_response_unittest http/HttpResponseUnittest.cpp) -target_link_libraries(http_response_unittest ${UT_BASE_TARGET}) - include(GoogleTest) gtest_discover_tests(common_simple_utils_unittest) gtest_discover_tests(common_logfileoperator_unittest) @@ -69,5 +66,3 @@ gtest_discover_tests(safe_queue_unittest) gtest_discover_tests(http_request_timer_event_unittest) gtest_discover_tests(timer_unittest) gtest_discover_tests(curl_unittest) -gtest_discover_tests(http_response_unittest) - diff --git a/core/unittest/common/http/HttpResponseUnittest.cpp b/core/unittest/common/http/HttpResponseUnittest.cpp deleted file mode 100644 index 2df46b7d86..0000000000 --- a/core/unittest/common/http/HttpResponseUnittest.cpp +++ /dev/null @@ -1,27 +0,0 @@ - -#include "common/http/HttpResponse.h" -#include "unittest/Unittest.h" - -using namespace std; - -namespace logtail { -class HttpResponseUnittest : public ::testing::Test { -public: - void TestNetworkStatus(); -}; - -void HttpResponseUnittest::TestNetworkStatus() { - HttpResponse resp; - resp.SetNetworkStatus(CURLE_OK); - APSARA_TEST_EQUAL(resp.GetNetworkStatus().mCode, NetworkCode::Ok); - - resp.SetNetworkStatus(CURLE_RECV_ERROR); - APSARA_TEST_EQUAL(resp.GetNetworkStatus().mCode, NetworkCode::RecvDataFailed); - - resp.SetNetworkStatus(CURLE_FAILED_INIT); - APSARA_TEST_EQUAL(resp.GetNetworkStatus().mCode, NetworkCode::Other); -} - -UNIT_TEST_CASE(HttpResponseUnittest, TestNetworkStatus); -} // namespace logtail -UNIT_TEST_MAIN diff --git a/core/unittest/flusher/CMakeLists.txt b/core/unittest/flusher/CMakeLists.txt index 76ac1afd6d..177a86fcc1 100644 --- a/core/unittest/flusher/CMakeLists.txt +++ b/core/unittest/flusher/CMakeLists.txt @@ -16,14 +16,21 @@ cmake_minimum_required(VERSION 3.22) project(flusher_unittest) add_executable(flusher_sls_unittest FlusherSLSUnittest.cpp) +if (ENABLE_ENTERPRISE) + target_sources(flusher_sls_unittest PRIVATE SLSNetworkRequestMock.cpp) +endif () target_link_libraries(flusher_sls_unittest ${UT_BASE_TARGET}) add_executable(pack_id_manager_unittest PackIdManagerUnittest.cpp) target_link_libraries(pack_id_manager_unittest ${UT_BASE_TARGET}) +add_executable(sls_client_manager_unittest SLSClientManagerUnittest.cpp) +target_link_libraries(sls_client_manager_unittest ${UT_BASE_TARGET}) + if (ENABLE_ENTERPRISE) - add_executable(enterprise_sls_client_manager_unittest EnterpriseSLSClientManagerUnittest.cpp) + add_executable(enterprise_sls_client_manager_unittest EnterpriseSLSClientManagerUnittest.cpp SLSNetworkRequestMock.cpp) target_link_libraries(enterprise_sls_client_manager_unittest ${UT_BASE_TARGET}) + add_executable(enterprise_flusher_sls_monitor_unittest EnterpriseFlusherSLSMonitorUnittest.cpp) target_link_libraries(enterprise_flusher_sls_monitor_unittest ${UT_BASE_TARGET}) endif () @@ -31,6 +38,7 @@ endif () include(GoogleTest) gtest_discover_tests(flusher_sls_unittest) gtest_discover_tests(pack_id_manager_unittest) +gtest_discover_tests(sls_client_manager_unittest) if (ENABLE_ENTERPRISE) gtest_discover_tests(enterprise_sls_client_manager_unittest) gtest_discover_tests(enterprise_flusher_sls_monitor_unittest) diff --git a/core/unittest/flusher/FlusherSLSUnittest.cpp b/core/unittest/flusher/FlusherSLSUnittest.cpp index 73dc1e97e3..3639b7b44c 100644 --- a/core/unittest/flusher/FlusherSLSUnittest.cpp +++ b/core/unittest/flusher/FlusherSLSUnittest.cpp @@ -17,8 +17,10 @@ #include #include +#include "app_config/AppConfig.h" #include "common/JsonUtil.h" #include "common/LogtailCommonFlags.h" +#include "common/http/Constant.h" #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" #endif @@ -30,16 +32,26 @@ #include "pipeline/queue/QueueKeyManager.h" #include "pipeline/queue/SLSSenderQueueItem.h" #include "pipeline/queue/SenderQueueManager.h" +#ifdef __ENTERPRISE__ +#include "plugin/flusher/sls/EnterpriseSLSClientManager.h" +#endif #include "plugin/flusher/sls/FlusherSLS.h" #include "plugin/flusher/sls/PackIdManager.h" #include "plugin/flusher/sls/SLSClientManager.h" +#include "plugin/flusher/sls/SLSConstant.h" #include "unittest/Unittest.h" +#ifdef __ENTERPRISE__ +#include "unittest/flusher/SLSNetworkRequestMock.h" +#endif DECLARE_FLAG_INT32(batch_send_interval); DECLARE_FLAG_INT32(merge_log_count_limit); DECLARE_FLAG_INT32(batch_send_metric_size); DECLARE_FLAG_INT32(max_send_log_group_size); DECLARE_FLAG_DOUBLE(sls_serialize_size_expansion_ratio); +DECLARE_FLAG_BOOL(send_prefer_real_ip); +DECLARE_FLAG_STRING(default_access_key_id); +DECLARE_FLAG_STRING(default_access_key); using namespace std; @@ -50,6 +62,7 @@ class FlusherSLSUnittest : public testing::Test { void OnSuccessfulInit(); void OnFailedInit(); void OnPipelineUpdate(); + void TestBuildRequest(); void TestSend(); void TestFlush(); void TestFlushAll(); @@ -57,6 +70,14 @@ class FlusherSLSUnittest : public testing::Test { void OnGoPipelineSend(); protected: + static void SetUpTestCase() { +#ifdef __ENTERPRISE__ + EnterpriseSLSClientManager::GetInstance()->mDoProbeNetwork = ProbeNetworkMock::DoProbeNetwork; + EnterpriseSLSClientManager::GetInstance()->mGetEndpointRealIp = GetRealIpMock::GetEndpointRealIp; + EnterpriseSLSClientManager::GetInstance()->mGetAccessKeyFromSLS = GetAccessKeyMock::DoGetAccessKey; +#endif + } + void SetUp() override { ctx.SetConfigName("test_config"); ctx.SetPipeline(pipeline); @@ -67,6 +88,9 @@ class FlusherSLSUnittest : public testing::Test { QueueKeyManager::GetInstance()->Clear(); SenderQueueManager::GetInstance()->Clear(); ExactlyOnceQueueManager::GetInstance()->Clear(); +#ifdef __ENTERPRISE__ + EnterpriseSLSClientManager::GetInstance()->Clear(); +#endif } private: @@ -89,7 +113,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); #ifndef __ENTERPRISE__ - configJson["Endpoint"] = "cn-hangzhou.log.aliyuncs.com"; + configJson["Endpoint"] = "test_region.log.aliyuncs.com"; #endif flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); @@ -100,17 +124,16 @@ void FlusherSLSUnittest::OnSuccessfulInit() { APSARA_TEST_EQUAL("test_logstore", flusher->mLogstore); APSARA_TEST_EQUAL(STRING_FLAG(default_region_name), flusher->mRegion); #ifndef __ENTERPRISE__ - APSARA_TEST_EQUAL("cn-hangzhou.log.aliyuncs.com", flusher->mEndpoint); -#else - APSARA_TEST_EQUAL("", flusher->mEndpoint); + APSARA_TEST_EQUAL("test_region.log.aliyuncs.com", flusher->mEndpoint); #endif APSARA_TEST_EQUAL("", flusher->mAliuid); APSARA_TEST_EQUAL(sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_LOGS, flusher->mTelemetryType); APSARA_TEST_TRUE(flusher->mShardHashKeys.empty()); APSARA_TEST_EQUAL(static_cast(INT32_FLAG(merge_log_count_limit)), flusher->mBatcher.GetEventFlushStrategy().GetMinCnt()); - APSARA_TEST_EQUAL(static_cast(INT32_FLAG(max_send_log_group_size) / DOUBLE_FLAG(sls_serialize_size_expansion_ratio)), - flusher->mBatcher.GetEventFlushStrategy().GetMaxSizeBytes()); + APSARA_TEST_EQUAL( + static_cast(INT32_FLAG(max_send_log_group_size) / DOUBLE_FLAG(sls_serialize_size_expansion_ratio)), + flusher->mBatcher.GetEventFlushStrategy().GetMaxSizeBytes()); APSARA_TEST_EQUAL(static_cast(INT32_FLAG(batch_send_metric_size)), flusher->mBatcher.GetEventFlushStrategy().GetMinSizeBytes()); uint32_t timeout = static_cast(INT32_FLAG(batch_send_interval)) / 2; @@ -136,8 +159,8 @@ void FlusherSLSUnittest::OnSuccessfulInit() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "Aliuid": "123456789", "TelemetryType": "metrics", "ShardHashKeys": [ @@ -146,19 +169,24 @@ void FlusherSLSUnittest::OnSuccessfulInit() { } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); +#ifndef __ENTERPRISE__ + configJson["EndpointMode"] = "default"; +#endif flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL("cn-hangzhou", flusher->mRegion); + APSARA_TEST_EQUAL("test_region", flusher->mRegion); #ifdef __ENTERPRISE__ APSARA_TEST_EQUAL("123456789", flusher->mAliuid); + APSARA_TEST_EQUAL(EndpointMode::DEFAULT, flusher->mEndpointMode); #else - APSARA_TEST_EQUAL("cn-hangzhou.log.aliyuncs.com", flusher->mEndpoint); APSARA_TEST_EQUAL("", flusher->mAliuid); #endif + APSARA_TEST_EQUAL("test_region.log.aliyuncs.com", flusher->mEndpoint); APSARA_TEST_EQUAL(sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_METRICS, flusher->mTelemetryType); APSARA_TEST_EQUAL(1U, flusher->mShardHashKeys.size()); + APSARA_TEST_EQUAL("__source__", flusher->mShardHashKeys[0]); SenderQueueManager::GetInstance()->Clear(); // invalid optional param @@ -175,9 +203,10 @@ void FlusherSLSUnittest::OnSuccessfulInit() { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); #ifdef __ENTERPRISE__ + configJson["EndpointMode"] = true; configJson["Endpoint"] = true; #else - configJson["Endpoint"] = "cn-hangzhou.log.aliyuncs.com"; + configJson["Endpoint"] = "test_region.log.aliyuncs.com"; #endif flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); @@ -185,6 +214,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(STRING_FLAG(default_region_name), flusher->mRegion); #ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(EndpointMode::DEFAULT, flusher->mEndpointMode); APSARA_TEST_EQUAL("", flusher->mEndpoint); #endif APSARA_TEST_EQUAL("", flusher->mAliuid); @@ -200,8 +230,8 @@ void FlusherSLSUnittest::OnSuccessfulInit() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com" + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com" } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); @@ -214,14 +244,94 @@ void FlusherSLSUnittest::OnSuccessfulInit() { SenderQueueManager::GetInstance()->Clear(); #endif +#ifdef __ENTERPRISE__ + // EndpointMode && Endpoint + EnterpriseSLSClientManager::GetInstance()->Clear(); + // Endpoint ignored in acclerate mode + configStr = R"( + { + "Type": "flusher_sls", + "Project": "test_project", + "Logstore": "test_logstore", + "Region": "test_region", + "EndpointMode": "accelerate", + "Endpoint": " test_region.log.aliyuncs.com " + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + flusher.reset(new FlusherSLS()); + flusher->SetContext(ctx); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); + APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(EndpointMode::ACCELERATE, flusher->mEndpointMode); + APSARA_TEST_EQUAL(EnterpriseSLSClientManager::GetInstance()->mRegionCandidateEndpointsMap.end(), + EnterpriseSLSClientManager::GetInstance()->mRegionCandidateEndpointsMap.find("test_region")); + APSARA_TEST_EQUAL(flusher->mProject, flusher->mCandidateHostsInfo->GetProject()); + APSARA_TEST_EQUAL(flusher->mRegion, flusher->mCandidateHostsInfo->GetRegion()); + APSARA_TEST_EQUAL(EndpointMode::ACCELERATE, flusher->mCandidateHostsInfo->GetMode()); + SenderQueueManager::GetInstance()->Clear(); + + // Endpoint should be added to region remote endpoints if not existed + configStr = R"( + { + "Type": "flusher_sls", + "Project": "test_project", + "Logstore": "test_logstore", + "Region": "test_region", + "EndpointMode": "unknown", + "Endpoint": " test_region.log.aliyuncs.com " + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + flusher.reset(new FlusherSLS()); + flusher->SetContext(ctx); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); + APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(EndpointMode::DEFAULT, flusher->mEndpointMode); + auto& endpoints + = EnterpriseSLSClientManager::GetInstance()->mRegionCandidateEndpointsMap["test_region"].mRemoteEndpoints; + APSARA_TEST_EQUAL(1U, endpoints.size()); + APSARA_TEST_EQUAL("test_region.log.aliyuncs.com", endpoints[0]); + APSARA_TEST_EQUAL(flusher->mProject, flusher->mCandidateHostsInfo->GetProject()); + APSARA_TEST_EQUAL(flusher->mRegion, flusher->mCandidateHostsInfo->GetRegion()); + APSARA_TEST_EQUAL(EndpointMode::DEFAULT, flusher->mCandidateHostsInfo->GetMode()); + SenderQueueManager::GetInstance()->Clear(); + + // Endpoint should be ignored when region remote endpoints existed + configStr = R"( + { + "Type": "flusher_sls", + "Project": "test_project", + "Logstore": "test_logstore", + "Region": "test_region", + "EndpointMode": "default", + "Endpoint": " test_region-intranet.log.aliyuncs.com " + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + flusher.reset(new FlusherSLS()); + flusher->SetContext(ctx); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); + APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); + APSARA_TEST_EQUAL(EndpointMode::DEFAULT, flusher->mEndpointMode); + endpoints = EnterpriseSLSClientManager::GetInstance()->mRegionCandidateEndpointsMap["test_region"].mRemoteEndpoints; + APSARA_TEST_EQUAL(1U, endpoints.size()); + APSARA_TEST_EQUAL("test_region.log.aliyuncs.com", endpoints[0]); + APSARA_TEST_EQUAL(flusher->mProject, flusher->mCandidateHostsInfo->GetProject()); + APSARA_TEST_EQUAL(flusher->mRegion, flusher->mCandidateHostsInfo->GetRegion()); + APSARA_TEST_EQUAL(EndpointMode::DEFAULT, flusher->mCandidateHostsInfo->GetMode()); + SenderQueueManager::GetInstance()->Clear(); +#endif + +#ifndef __ENTERPRISE__ // Endpoint configStr = R"( { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": " cn-hangzhou.log.aliyuncs.com " + "Region": "test_region", + "Endpoint": " test_region.log.aliyuncs.com " } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); @@ -229,12 +339,9 @@ void FlusherSLSUnittest::OnSuccessfulInit() { flusher->SetContext(ctx); flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL("cn-hangzhou.log.aliyuncs.com", flusher->mEndpoint); - auto iter = SLSClientManager::GetInstance()->mRegionEndpointEntryMap.find("cn-hangzhou"); - APSARA_TEST_NOT_EQUAL(SLSClientManager::GetInstance()->mRegionEndpointEntryMap.end(), iter); - APSARA_TEST_NOT_EQUAL(iter->second.mEndpointInfoMap.end(), - iter->second.mEndpointInfoMap.find("http://cn-hangzhou.log.aliyuncs.com")); + APSARA_TEST_EQUAL("test_region.log.aliyuncs.com", flusher->mEndpoint); SenderQueueManager::GetInstance()->Clear(); +#endif // TelemetryType configStr = R"( @@ -242,9 +349,9 @@ void FlusherSLSUnittest::OnSuccessfulInit() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", - "TelemetryType": "logs" + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", + "TelemetryType": "metrics" } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); @@ -252,7 +359,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { flusher->SetContext(ctx); flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_LOGS, flusher->mTelemetryType); + APSARA_TEST_EQUAL(sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_METRICS, flusher->mTelemetryType); SenderQueueManager::GetInstance()->Clear(); configStr = R"( @@ -260,8 +367,8 @@ void FlusherSLSUnittest::OnSuccessfulInit() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "TelemetryType": "unknown" } )"; @@ -279,8 +386,8 @@ void FlusherSLSUnittest::OnSuccessfulInit() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "ShardHashKeys": [ "__source__" ] @@ -302,8 +409,8 @@ void FlusherSLSUnittest::OnSuccessfulInit() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "ShardHashKeys": [ "__source__" ] @@ -322,8 +429,8 @@ void FlusherSLSUnittest::OnSuccessfulInit() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com" + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com" } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); @@ -343,8 +450,8 @@ void FlusherSLSUnittest::OnSuccessfulInit() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "EnableShardHash": false } )"; @@ -381,7 +488,7 @@ void FlusherSLSUnittest::OnFailedInit() { { "Type": "flusher_sls", "Logstore": "test_logstore", - "Endpoint": "cn-hangzhou.log.aliyuncs.com" + "Endpoint": "test_region.log.aliyuncs.com" } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); @@ -395,7 +502,7 @@ void FlusherSLSUnittest::OnFailedInit() { "Type": "flusher_sls", "Project": true, "Logstore": "test_logstore", - "Endpoint": "cn-hangzhou.log.aliyuncs.com" + "Endpoint": "test_region.log.aliyuncs.com" } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); @@ -409,7 +516,7 @@ void FlusherSLSUnittest::OnFailedInit() { { "Type": "flusher_sls", "Project": "test_project", - "Endpoint": "cn-hangzhou.log.aliyuncs.com" + "Endpoint": "test_region.log.aliyuncs.com" } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); @@ -423,7 +530,7 @@ void FlusherSLSUnittest::OnFailedInit() { "Type": "flusher_sls", "Project": "test_project", "Logstore": true, - "Endpoint": "cn-hangzhou.log.aliyuncs.com" + "Endpoint": "test_region.log.aliyuncs.com" } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); @@ -478,16 +585,14 @@ void FlusherSLSUnittest::OnPipelineUpdate() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com" + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com" } )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); APSARA_TEST_TRUE(flusher1.Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(flusher1.Start()); APSARA_TEST_EQUAL(1U, FlusherSLS::sProjectRefCntMap.size()); - APSARA_TEST_TRUE(FlusherSLS::IsRegionContainingConfig("cn-hangzhou")); - APSARA_TEST_EQUAL(1U, SLSClientManager::GetInstance()->GetRegionAliuids("cn-hangzhou").size()); { PipelineContext ctx2; @@ -500,8 +605,8 @@ void FlusherSLSUnittest::OnPipelineUpdate() { "Type": "flusher_sls", "Project": "test_project_2", "Logstore": "test_logstore_2", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "Aliuid": "123456789" } )"; @@ -510,14 +615,10 @@ void FlusherSLSUnittest::OnPipelineUpdate() { APSARA_TEST_TRUE(flusher1.Stop(false)); APSARA_TEST_TRUE(FlusherSLS::sProjectRefCntMap.empty()); - APSARA_TEST_FALSE(FlusherSLS::IsRegionContainingConfig("cn-hangzhou")); - APSARA_TEST_TRUE(SLSClientManager::GetInstance()->GetRegionAliuids("cn-hangzhou").empty()); APSARA_TEST_TRUE(SenderQueueManager::GetInstance()->IsQueueMarkedDeleted(flusher1.GetQueueKey())); APSARA_TEST_TRUE(flusher2.Start()); APSARA_TEST_EQUAL(1U, FlusherSLS::sProjectRefCntMap.size()); - APSARA_TEST_TRUE(FlusherSLS::IsRegionContainingConfig("cn-hangzhou")); - APSARA_TEST_EQUAL(1U, SLSClientManager::GetInstance()->GetRegionAliuids("cn-hangzhou").size()); APSARA_TEST_TRUE(SenderQueueManager::GetInstance()->IsQueueMarkedDeleted(flusher1.GetQueueKey())); APSARA_TEST_FALSE(SenderQueueManager::GetInstance()->IsQueueMarkedDeleted(flusher2.GetQueueKey())); flusher2.Stop(true); @@ -534,8 +635,8 @@ void FlusherSLSUnittest::OnPipelineUpdate() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "Aliuid": "123456789" } )"; @@ -552,6 +653,486 @@ void FlusherSLSUnittest::OnPipelineUpdate() { } } +void FlusherSLSUnittest::TestBuildRequest() { +#ifdef __ENTERPRISE__ + EnterpriseSLSClientManager::GetInstance()->UpdateLocalRegionEndpointsAndHttpsInfo("test_region", + {kAccelerationDataEndpoint}); + EnterpriseSLSClientManager::GetInstance()->UpdateRemoteRegionEndpoints( + "test_region", {"test_region-intranet.log.aliyuncs.com", "test_region.log.aliyuncs.com"}); + EnterpriseSLSClientManager::GetInstance()->UpdateRemoteRegionEndpoints( + "test_region-b", {"test_region-b-intranet.log.aliyuncs.com", "test_region-b.log.aliyuncs.com"}); +#endif + Json::Value configJson, optionalGoPipeline; + string errorMsg; + string configStr = R"( + { + "Type": "flusher_sls", + "Project": "test_project", + "Logstore": "test_logstore", + "Region": "test_region-b", + "Aliuid": "1234567890", + "Endpoint": "test_endpoint" + } + )"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); + FlusherSLS flusher; + flusher.SetContext(ctx); + flusher.SetMetricsRecordRef(FlusherSLS::sName, "1"); + APSARA_TEST_TRUE(flusher.Init(configJson, optionalGoPipeline)); + + string body = "hello, world!"; + string bodyLenStr = to_string(body.size()); + uint32_t rawSize = 100; + string rawSizeStr = "100"; + + SLSSenderQueueItem item("hello, world!", rawSize, &flusher, flusher.GetQueueKey(), flusher.mLogstore); + unique_ptr req; + bool keepItem = false; + string errMsg; +#ifdef __ENTERPRISE__ + { + // empty ak, first try + APSARA_TEST_FALSE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + // empty ak, second try + APSARA_TEST_FALSE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_EQUAL(nullptr, req); + APSARA_TEST_TRUE(keepItem); + } + EnterpriseSLSClientManager::GetInstance()->SetAccessKey( + "1234567890", SLSClientManager::AuthType::ANONYMOUS, "test_ak", "test_sk"); + { + // no available host, uninitialized + APSARA_TEST_FALSE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_EQUAL(nullptr, req); + APSARA_TEST_TRUE(keepItem); + APSARA_TEST_EQUAL(static_cast(AppConfig::GetInstance()->GetSendRequestConcurrency()), + FlusherSLS::GetRegionConcurrencyLimiter(flusher.mRegion)->GetCurrentLimit()); + } + { + // no available host, initialized + flusher.mCandidateHostsInfo->SetInitialized(); + APSARA_TEST_FALSE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_EQUAL(nullptr, req); + APSARA_TEST_TRUE(keepItem); + APSARA_TEST_EQUAL(static_cast(AppConfig::GetInstance()->GetSendRequestConcurrency()), + FlusherSLS::GetRegionConcurrencyLimiter(flusher.mRegion)->GetCurrentLimit()); + } + EnterpriseSLSClientManager::GetInstance()->UpdateHostLatency("test_project", + EndpointMode::DEFAULT, + "test_project.test_region-b.log.aliyuncs.com", + chrono::milliseconds(100)); + flusher.mCandidateHostsInfo->SelectBestHost(); +#endif + // log telemetry type + { + // normal + SLSSenderQueueItem item("hello, world!", rawSize, &flusher, flusher.GetQueueKey(), flusher.mLogstore); + APSARA_TEST_TRUE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_EQUAL(HTTP_POST, req->mMethod); +#ifdef __ENTERPRISE__ + APSARA_TEST_FALSE(req->mHTTPSFlag); +#else + APSARA_TEST_TRUE(req->mHTTPSFlag); +#endif + APSARA_TEST_EQUAL("/logstores/test_logstore/shards/lb", req->mUrl); + APSARA_TEST_EQUAL("", req->mQueryString); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(12U, req->mHeader.size()); +#else + APSARA_TEST_EQUAL(11U, req->mHeader.size()); +#endif +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", req->mHeader[HOST]); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", req->mHeader[HOST]); +#endif + APSARA_TEST_EQUAL(SLSClientManager::GetInstance()->GetUserAgent(), req->mHeader[USER_AGENT]); + APSARA_TEST_FALSE(req->mHeader[DATE].empty()); + APSARA_TEST_EQUAL(TYPE_LOG_PROTOBUF, req->mHeader[CONTENT_TYPE]); + APSARA_TEST_EQUAL(bodyLenStr, req->mHeader[CONTENT_LENGTH]); + APSARA_TEST_EQUAL(CalcMD5(req->mBody), req->mHeader[CONTENT_MD5]); + APSARA_TEST_EQUAL(LOG_API_VERSION, req->mHeader[X_LOG_APIVERSION]); + APSARA_TEST_EQUAL(HMAC_SHA1, req->mHeader[X_LOG_SIGNATUREMETHOD]); + APSARA_TEST_EQUAL("lz4", req->mHeader[X_LOG_COMPRESSTYPE]); + APSARA_TEST_EQUAL(rawSizeStr, req->mHeader[X_LOG_BODYRAWSIZE]); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(MD5_SHA1_SALT_KEYPROVIDER, req->mHeader[X_LOG_KEYPROVIDER]); +#endif + APSARA_TEST_FALSE(req->mHeader[AUTHORIZATION].empty()); + APSARA_TEST_EQUAL(body, req->mBody); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", req->mHost); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", req->mHost); +#endif +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(80, req->mPort); +#else + APSARA_TEST_EQUAL(443, req->mPort); +#endif + APSARA_TEST_EQUAL(static_cast(INT32_FLAG(default_http_request_timeout_sec)), req->mTimeout); + APSARA_TEST_EQUAL(1U, req->mMaxTryCnt); + APSARA_TEST_FALSE(req->mFollowRedirects); + APSARA_TEST_EQUAL(&item, req->mItem); + APSARA_TEST_FALSE(item.mRealIpFlag); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", item.mCurrentHost); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", item.mCurrentHost); +#endif + } + { + // event group list + SLSSenderQueueItem item("hello, world!", + rawSize, + &flusher, + flusher.GetQueueKey(), + flusher.mLogstore, + RawDataType::EVENT_GROUP_LIST); + APSARA_TEST_TRUE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_EQUAL(HTTP_POST, req->mMethod); +#ifdef __ENTERPRISE__ + APSARA_TEST_FALSE(req->mHTTPSFlag); +#else + APSARA_TEST_TRUE(req->mHTTPSFlag); +#endif + APSARA_TEST_EQUAL("/logstores/test_logstore/shards/lb", req->mUrl); + APSARA_TEST_EQUAL("", req->mQueryString); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(13U, req->mHeader.size()); +#else + APSARA_TEST_EQUAL(12U, req->mHeader.size()); +#endif +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", req->mHeader[HOST]); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", req->mHeader[HOST]); +#endif + APSARA_TEST_EQUAL(SLSClientManager::GetInstance()->GetUserAgent(), req->mHeader[USER_AGENT]); + APSARA_TEST_FALSE(req->mHeader[DATE].empty()); + APSARA_TEST_EQUAL(TYPE_LOG_PROTOBUF, req->mHeader[CONTENT_TYPE]); + APSARA_TEST_EQUAL(bodyLenStr, req->mHeader[CONTENT_LENGTH]); + APSARA_TEST_EQUAL(CalcMD5(req->mBody), req->mHeader[CONTENT_MD5]); + APSARA_TEST_EQUAL(LOG_API_VERSION, req->mHeader[X_LOG_APIVERSION]); + APSARA_TEST_EQUAL(HMAC_SHA1, req->mHeader[X_LOG_SIGNATUREMETHOD]); + APSARA_TEST_EQUAL("lz4", req->mHeader[X_LOG_COMPRESSTYPE]); + APSARA_TEST_EQUAL(bodyLenStr, req->mHeader[X_LOG_BODYRAWSIZE]); + APSARA_TEST_EQUAL(LOG_MODE_BATCH_GROUP, req->mHeader[X_LOG_MODE]); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(MD5_SHA1_SALT_KEYPROVIDER, req->mHeader[X_LOG_KEYPROVIDER]); +#endif + APSARA_TEST_FALSE(req->mHeader[AUTHORIZATION].empty()); + APSARA_TEST_EQUAL(body, req->mBody); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", req->mHost); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", req->mHost); +#endif +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(80, req->mPort); +#else + APSARA_TEST_EQUAL(443, req->mPort); +#endif + APSARA_TEST_EQUAL(static_cast(INT32_FLAG(default_http_request_timeout_sec)), req->mTimeout); + APSARA_TEST_EQUAL(1U, req->mMaxTryCnt); + APSARA_TEST_FALSE(req->mFollowRedirects); + APSARA_TEST_EQUAL(&item, req->mItem); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", item.mCurrentHost); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", item.mCurrentHost); +#endif + } + { + // shard hash + SLSSenderQueueItem item("hello, world!", + rawSize, + &flusher, + flusher.GetQueueKey(), + flusher.mLogstore, + RawDataType::EVENT_GROUP, + "hash_key"); + APSARA_TEST_TRUE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_EQUAL(HTTP_POST, req->mMethod); +#ifdef __ENTERPRISE__ + APSARA_TEST_FALSE(req->mHTTPSFlag); +#else + APSARA_TEST_TRUE(req->mHTTPSFlag); +#endif + APSARA_TEST_EQUAL("/logstores/test_logstore/shards/route", req->mUrl); + map params{{"key", "hash_key"}}; + APSARA_TEST_EQUAL(GetQueryString(params), req->mQueryString); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(12U, req->mHeader.size()); +#else + APSARA_TEST_EQUAL(11U, req->mHeader.size()); +#endif +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", req->mHeader[HOST]); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", req->mHeader[HOST]); +#endif + APSARA_TEST_EQUAL(SLSClientManager::GetInstance()->GetUserAgent(), req->mHeader[USER_AGENT]); + APSARA_TEST_FALSE(req->mHeader[DATE].empty()); + APSARA_TEST_EQUAL(TYPE_LOG_PROTOBUF, req->mHeader[CONTENT_TYPE]); + APSARA_TEST_EQUAL(bodyLenStr, req->mHeader[CONTENT_LENGTH]); + APSARA_TEST_EQUAL(CalcMD5(req->mBody), req->mHeader[CONTENT_MD5]); + APSARA_TEST_EQUAL(LOG_API_VERSION, req->mHeader[X_LOG_APIVERSION]); + APSARA_TEST_EQUAL(HMAC_SHA1, req->mHeader[X_LOG_SIGNATUREMETHOD]); + APSARA_TEST_EQUAL("lz4", req->mHeader[X_LOG_COMPRESSTYPE]); + APSARA_TEST_EQUAL(rawSizeStr, req->mHeader[X_LOG_BODYRAWSIZE]); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(MD5_SHA1_SALT_KEYPROVIDER, req->mHeader[X_LOG_KEYPROVIDER]); +#endif + APSARA_TEST_FALSE(req->mHeader[AUTHORIZATION].empty()); + APSARA_TEST_EQUAL(body, req->mBody); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", req->mHost); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", req->mHost); +#endif +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(80, req->mPort); +#else + APSARA_TEST_EQUAL(443, req->mPort); +#endif + APSARA_TEST_EQUAL(static_cast(INT32_FLAG(default_http_request_timeout_sec)), req->mTimeout); + APSARA_TEST_EQUAL(1U, req->mMaxTryCnt); + APSARA_TEST_FALSE(req->mFollowRedirects); + APSARA_TEST_EQUAL(&item, req->mItem); + APSARA_TEST_FALSE(item.mRealIpFlag); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", item.mCurrentHost); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", item.mCurrentHost); +#endif + } + { + // exactly once + auto cpt = make_shared(); + cpt->index = 0; + cpt->data.set_hash_key("hash_key_0"); + cpt->data.set_sequence_id(1); + SLSSenderQueueItem item("hello, world!", + rawSize, + &flusher, + flusher.GetQueueKey(), + flusher.mLogstore, + RawDataType::EVENT_GROUP, + "hash_key_0", + std::move(cpt)); + APSARA_TEST_TRUE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_EQUAL(HTTP_POST, req->mMethod); +#ifdef __ENTERPRISE__ + APSARA_TEST_FALSE(req->mHTTPSFlag); +#else + APSARA_TEST_TRUE(req->mHTTPSFlag); +#endif + APSARA_TEST_EQUAL("/logstores/test_logstore/shards/route", req->mUrl); + map params{{"key", "hash_key_0"}, {"seqid", "1"}}; + APSARA_TEST_EQUAL(GetQueryString(params), req->mQueryString); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(12U, req->mHeader.size()); +#else + APSARA_TEST_EQUAL(11U, req->mHeader.size()); +#endif +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", req->mHeader[HOST]); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", req->mHeader[HOST]); +#endif + APSARA_TEST_EQUAL(SLSClientManager::GetInstance()->GetUserAgent(), req->mHeader[USER_AGENT]); + APSARA_TEST_FALSE(req->mHeader[DATE].empty()); + APSARA_TEST_EQUAL(TYPE_LOG_PROTOBUF, req->mHeader[CONTENT_TYPE]); + APSARA_TEST_EQUAL(bodyLenStr, req->mHeader[CONTENT_LENGTH]); + APSARA_TEST_EQUAL(CalcMD5(req->mBody), req->mHeader[CONTENT_MD5]); + APSARA_TEST_EQUAL(LOG_API_VERSION, req->mHeader[X_LOG_APIVERSION]); + APSARA_TEST_EQUAL(HMAC_SHA1, req->mHeader[X_LOG_SIGNATUREMETHOD]); + APSARA_TEST_EQUAL("lz4", req->mHeader[X_LOG_COMPRESSTYPE]); + APSARA_TEST_EQUAL(rawSizeStr, req->mHeader[X_LOG_BODYRAWSIZE]); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(MD5_SHA1_SALT_KEYPROVIDER, req->mHeader[X_LOG_KEYPROVIDER]); +#endif + APSARA_TEST_FALSE(req->mHeader[AUTHORIZATION].empty()); + APSARA_TEST_EQUAL(body, req->mBody); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", req->mHost); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", req->mHost); +#endif +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(80, req->mPort); +#else + APSARA_TEST_EQUAL(443, req->mPort); +#endif + APSARA_TEST_EQUAL(static_cast(INT32_FLAG(default_http_request_timeout_sec)), req->mTimeout); + APSARA_TEST_EQUAL(1U, req->mMaxTryCnt); + APSARA_TEST_FALSE(req->mFollowRedirects); + APSARA_TEST_EQUAL(&item, req->mItem); + APSARA_TEST_FALSE(item.mRealIpFlag); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", item.mCurrentHost); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", item.mCurrentHost); +#endif + } + // metric telemtery type + flusher.mTelemetryType = sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_METRICS; + { + SLSSenderQueueItem item("hello, world!", rawSize, &flusher, flusher.GetQueueKey(), flusher.mLogstore); + APSARA_TEST_TRUE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_EQUAL(HTTP_POST, req->mMethod); +#ifdef __ENTERPRISE__ + APSARA_TEST_FALSE(req->mHTTPSFlag); +#else + APSARA_TEST_TRUE(req->mHTTPSFlag); +#endif + APSARA_TEST_EQUAL("/prometheus/test_project/test_logstore/api/v1/write", req->mUrl); + APSARA_TEST_EQUAL("", req->mQueryString); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(12U, req->mHeader.size()); +#else + APSARA_TEST_EQUAL(11U, req->mHeader.size()); +#endif +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", req->mHeader[HOST]); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", req->mHeader[HOST]); +#endif + APSARA_TEST_EQUAL(SLSClientManager::GetInstance()->GetUserAgent(), req->mHeader[USER_AGENT]); + APSARA_TEST_FALSE(req->mHeader[DATE].empty()); + APSARA_TEST_EQUAL(TYPE_LOG_PROTOBUF, req->mHeader[CONTENT_TYPE]); + APSARA_TEST_EQUAL(bodyLenStr, req->mHeader[CONTENT_LENGTH]); + APSARA_TEST_EQUAL(CalcMD5(req->mBody), req->mHeader[CONTENT_MD5]); + APSARA_TEST_EQUAL(LOG_API_VERSION, req->mHeader[X_LOG_APIVERSION]); + APSARA_TEST_EQUAL(HMAC_SHA1, req->mHeader[X_LOG_SIGNATUREMETHOD]); + APSARA_TEST_EQUAL("lz4", req->mHeader[X_LOG_COMPRESSTYPE]); + APSARA_TEST_EQUAL(rawSizeStr, req->mHeader[X_LOG_BODYRAWSIZE]); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(MD5_SHA1_SALT_KEYPROVIDER, req->mHeader[X_LOG_KEYPROVIDER]); +#endif + APSARA_TEST_FALSE(req->mHeader[AUTHORIZATION].empty()); + APSARA_TEST_EQUAL(body, req->mBody); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", req->mHost); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", req->mHost); +#endif +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL(80, req->mPort); +#else + APSARA_TEST_EQUAL(443, req->mPort); +#endif + APSARA_TEST_EQUAL(static_cast(INT32_FLAG(default_http_request_timeout_sec)), req->mTimeout); + APSARA_TEST_EQUAL(1U, req->mMaxTryCnt); + APSARA_TEST_FALSE(req->mFollowRedirects); + APSARA_TEST_EQUAL(&item, req->mItem); + APSARA_TEST_FALSE(item.mRealIpFlag); +#ifdef __ENTERPRISE__ + APSARA_TEST_EQUAL("test_project.test_region-b.log.aliyuncs.com", item.mCurrentHost); +#else + APSARA_TEST_EQUAL("test_project.test_endpoint", item.mCurrentHost); +#endif + } + flusher.mTelemetryType = sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_LOGS; +#ifdef __ENTERPRISE__ + { + // region mode changed + EnterpriseSLSClientManager::GetInstance()->CopyLocalRegionEndpointsAndHttpsInfoIfNotExisted("test_region", + "test_region-b"); + auto old = flusher.mCandidateHostsInfo.get(); + APSARA_TEST_FALSE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_NOT_EQUAL(old, flusher.mCandidateHostsInfo.get()); + + EnterpriseSLSClientManager::GetInstance()->UpdateHostLatency("test_project", + EndpointMode::ACCELERATE, + "test_project." + kAccelerationDataEndpoint, + chrono::milliseconds(10)); + flusher.mCandidateHostsInfo->SelectBestHost(); + APSARA_TEST_TRUE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_EQUAL("test_project." + kAccelerationDataEndpoint, req->mHost); + } + // real ip + BOOL_FLAG(send_prefer_real_ip) = true; + { + // ip not empty + EnterpriseSLSClientManager::GetInstance()->SetRealIp("test_region-b", "192.168.0.1"); + SLSSenderQueueItem item("hello, world!", rawSize, &flusher, flusher.GetQueueKey(), flusher.mLogstore); + APSARA_TEST_TRUE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_EQUAL(HTTP_POST, req->mMethod); + APSARA_TEST_FALSE(req->mHTTPSFlag); + APSARA_TEST_EQUAL("/logstores/test_logstore/shards/lb", req->mUrl); + APSARA_TEST_EQUAL("", req->mQueryString); + APSARA_TEST_EQUAL(12U, req->mHeader.size()); + APSARA_TEST_EQUAL("test_project.192.168.0.1", req->mHeader[HOST]); + APSARA_TEST_EQUAL(SLSClientManager::GetInstance()->GetUserAgent(), req->mHeader[USER_AGENT]); + APSARA_TEST_FALSE(req->mHeader[DATE].empty()); + APSARA_TEST_EQUAL(TYPE_LOG_PROTOBUF, req->mHeader[CONTENT_TYPE]); + APSARA_TEST_EQUAL(bodyLenStr, req->mHeader[CONTENT_LENGTH]); + APSARA_TEST_EQUAL(CalcMD5(req->mBody), req->mHeader[CONTENT_MD5]); + APSARA_TEST_EQUAL(LOG_API_VERSION, req->mHeader[X_LOG_APIVERSION]); + APSARA_TEST_EQUAL(HMAC_SHA1, req->mHeader[X_LOG_SIGNATUREMETHOD]); + APSARA_TEST_EQUAL("lz4", req->mHeader[X_LOG_COMPRESSTYPE]); + APSARA_TEST_EQUAL(rawSizeStr, req->mHeader[X_LOG_BODYRAWSIZE]); + APSARA_TEST_EQUAL(MD5_SHA1_SALT_KEYPROVIDER, req->mHeader[X_LOG_KEYPROVIDER]); + APSARA_TEST_FALSE(req->mHeader[AUTHORIZATION].empty()); + APSARA_TEST_EQUAL(body, req->mBody); + APSARA_TEST_EQUAL("192.168.0.1", req->mHost); + APSARA_TEST_EQUAL(80, req->mPort); + APSARA_TEST_EQUAL(static_cast(INT32_FLAG(default_http_request_timeout_sec)), req->mTimeout); + APSARA_TEST_EQUAL(1U, req->mMaxTryCnt); + APSARA_TEST_FALSE(req->mFollowRedirects); + APSARA_TEST_EQUAL(&item, req->mItem); + APSARA_TEST_TRUE(item.mRealIpFlag); + APSARA_TEST_EQUAL("192.168.0.1", item.mCurrentHost); + } + { + // ip empty + EnterpriseSLSClientManager::GetInstance()->SetRealIp("test_region-b", ""); + SLSSenderQueueItem item("hello, world!", rawSize, &flusher, flusher.GetQueueKey(), flusher.mLogstore); + APSARA_TEST_TRUE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_EQUAL("test_project." + kAccelerationDataEndpoint, req->mHeader[HOST]); + APSARA_TEST_EQUAL(SLSClientManager::GetInstance()->GetUserAgent(), req->mHeader[USER_AGENT]); + APSARA_TEST_FALSE(req->mHeader[DATE].empty()); + APSARA_TEST_EQUAL(TYPE_LOG_PROTOBUF, req->mHeader[CONTENT_TYPE]); + APSARA_TEST_EQUAL(bodyLenStr, req->mHeader[CONTENT_LENGTH]); + APSARA_TEST_EQUAL(CalcMD5(req->mBody), req->mHeader[CONTENT_MD5]); + APSARA_TEST_EQUAL(LOG_API_VERSION, req->mHeader[X_LOG_APIVERSION]); + APSARA_TEST_EQUAL(HMAC_SHA1, req->mHeader[X_LOG_SIGNATUREMETHOD]); + APSARA_TEST_EQUAL("lz4", req->mHeader[X_LOG_COMPRESSTYPE]); + APSARA_TEST_EQUAL(rawSizeStr, req->mHeader[X_LOG_BODYRAWSIZE]); + APSARA_TEST_EQUAL(MD5_SHA1_SALT_KEYPROVIDER, req->mHeader[X_LOG_KEYPROVIDER]); + APSARA_TEST_FALSE(req->mHeader[AUTHORIZATION].empty()); + APSARA_TEST_EQUAL(body, req->mBody); + APSARA_TEST_EQUAL("test_project." + kAccelerationDataEndpoint, req->mHost); + APSARA_TEST_EQUAL(80, req->mPort); + APSARA_TEST_EQUAL(static_cast(INT32_FLAG(default_http_request_timeout_sec)), req->mTimeout); + APSARA_TEST_EQUAL(1U, req->mMaxTryCnt); + APSARA_TEST_FALSE(req->mFollowRedirects); + APSARA_TEST_EQUAL(&item, req->mItem); + APSARA_TEST_FALSE(item.mRealIpFlag); + APSARA_TEST_EQUAL("test_project." + kAccelerationDataEndpoint, item.mCurrentHost); + } + { + // ip empty, and region mode changed + auto& endpoints = EnterpriseSLSClientManager::GetInstance()->mRegionCandidateEndpointsMap["test_region-b"]; + endpoints.mMode = EndpointMode::CUSTOM; + endpoints.mLocalEndpoints = {"custom.endpoint"}; + + auto old = flusher.mCandidateHostsInfo.get(); + APSARA_TEST_FALSE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_NOT_EQUAL(old, flusher.mCandidateHostsInfo.get()); + + EnterpriseSLSClientManager::GetInstance()->UpdateHostLatency( + "test_project", EndpointMode::CUSTOM, "test_project.custom.endpoint", chrono::milliseconds(10)); + flusher.mCandidateHostsInfo->SelectBestHost(); + APSARA_TEST_TRUE(flusher.BuildRequest(&item, req, &keepItem, &errMsg)); + APSARA_TEST_EQUAL("test_project.custom.endpoint", req->mHost); + } + BOOL_FLAG(send_prefer_real_ip) = false; +#endif +} + void FlusherSLSUnittest::TestSend() { { // exactly once enabled @@ -563,8 +1144,8 @@ void FlusherSLSUnittest::TestSend() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "Aliuid": "123456789" } )"; @@ -710,8 +1291,8 @@ void FlusherSLSUnittest::TestSend() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "Aliuid": "123456789", "ShardHashKeys": [ "tag_key" @@ -754,7 +1335,7 @@ void FlusherSLSUnittest::TestSend() { APSARA_TEST_TRUE(item->mBufferOrNot); APSARA_TEST_EQUAL(&flusher, item->mFlusher); APSARA_TEST_EQUAL(flusher.mQueueKey, item->mQueueKey); - APSARA_TEST_EQUAL(sdk::CalcMD5("tag_value"), item->mShardHashKey); + APSARA_TEST_EQUAL(CalcMD5("tag_value"), item->mShardHashKey); APSARA_TEST_EQUAL(flusher.mLogstore, item->mLogstore); auto compressor @@ -805,8 +1386,8 @@ void FlusherSLSUnittest::TestSend() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "Aliuid": "123456789" } )"; @@ -907,8 +1488,8 @@ void FlusherSLSUnittest::TestFlush() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "Aliuid": "123456789" } )"; @@ -951,8 +1532,8 @@ void FlusherSLSUnittest::TestFlushAll() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "Aliuid": "123456789" } )"; @@ -1003,8 +1584,8 @@ void FlusherSLSUnittest::OnGoPipelineSend() { "Type": "flusher_sls", "Project": "test_project", "Logstore": "test_logstore", - "Region": "cn-hangzhou", - "Endpoint": "cn-hangzhou.log.aliyuncs.com", + "Region": "test_region", + "Endpoint": "test_region.log.aliyuncs.com", "Aliuid": "123456789" } )"; @@ -1084,13 +1665,13 @@ void FlusherSLSUnittest::OnGoPipelineSend() { UNIT_TEST_CASE(FlusherSLSUnittest, OnSuccessfulInit) UNIT_TEST_CASE(FlusherSLSUnittest, OnFailedInit) UNIT_TEST_CASE(FlusherSLSUnittest, OnPipelineUpdate) +UNIT_TEST_CASE(FlusherSLSUnittest, TestBuildRequest) UNIT_TEST_CASE(FlusherSLSUnittest, TestSend) UNIT_TEST_CASE(FlusherSLSUnittest, TestFlush) UNIT_TEST_CASE(FlusherSLSUnittest, TestFlushAll) UNIT_TEST_CASE(FlusherSLSUnittest, TestAddPackId) UNIT_TEST_CASE(FlusherSLSUnittest, OnGoPipelineSend) - } // namespace logtail UNIT_TEST_MAIN diff --git a/core/unittest/flusher/SLSClientManagerUnittest.cpp b/core/unittest/flusher/SLSClientManagerUnittest.cpp new file mode 100644 index 0000000000..71dec9fec5 --- /dev/null +++ b/core/unittest/flusher/SLSClientManagerUnittest.cpp @@ -0,0 +1,46 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "plugin/flusher/sls/SLSClientManager.h" +#include "unittest/Unittest.h" + +DECLARE_FLAG_STRING(default_access_key_id); +DECLARE_FLAG_STRING(default_access_key); + +using namespace std; + +namespace logtail { + +class SLSClientManagerUnittest : public ::testing::Test { +public: + void TestAccessKeyManagement(); + +private: + SLSClientManager mManager; +}; + +void SLSClientManagerUnittest::TestAccessKeyManagement() { + string accessKeyId, accessKeySecret; + SLSClientManager::AuthType type; + mManager.GetAccessKey("", type, accessKeyId, accessKeySecret); + APSARA_TEST_EQUAL(SLSClientManager::AuthType::AK, type); + APSARA_TEST_EQUAL(STRING_FLAG(default_access_key_id), accessKeyId); + APSARA_TEST_EQUAL(STRING_FLAG(default_access_key), accessKeySecret); +} + +UNIT_TEST_CASE(SLSClientManagerUnittest, TestAccessKeyManagement) + +} // namespace logtail + +UNIT_TEST_MAIN diff --git a/core/unittest/models/MetricEventUnittest.cpp b/core/unittest/models/MetricEventUnittest.cpp index ada8af3239..b148cce39b 100644 --- a/core/unittest/models/MetricEventUnittest.cpp +++ b/core/unittest/models/MetricEventUnittest.cpp @@ -346,7 +346,7 @@ void MetricEventUnittest::TestTagsIterator() { void MetricEventUnittest::TestCopy() { MetricEvent* oldMetricEvent = mEventGroup->AddMetricEvent(); oldMetricEvent->SetValue(map{{"test-1", 10.0}, {"test-2", 2.0}}); - APSARA_TEST_EQUAL(1, mEventGroup->GetEvents().size()); + APSARA_TEST_EQUAL(1U, mEventGroup->GetEvents().size()); PipelineEventGroup newGroup = mEventGroup->Copy(); MetricEvent newMetricEvent = newGroup.GetEvents().at(0).Cast(); diff --git a/core/unittest/pipeline/GlobalConfigUnittest.cpp b/core/unittest/pipeline/GlobalConfigUnittest.cpp index 7c7bc555bd..d646d17251 100644 --- a/core/unittest/pipeline/GlobalConfigUnittest.cpp +++ b/core/unittest/pipeline/GlobalConfigUnittest.cpp @@ -43,6 +43,8 @@ void GlobalConfigUnittest::OnSuccessfulInit() const { // only mandatory param config.reset(new GlobalConfig()); + APSARA_TEST_TRUE(config->Init(Json::Value(Json::ValueType::objectValue), ctx, extendedParams)); + APSARA_TEST_TRUE(extendedParams.isNull()); APSARA_TEST_EQUAL(GlobalConfig::TopicType::NONE, config->mTopicType); APSARA_TEST_EQUAL("", config->mTopicFormat); APSARA_TEST_EQUAL(1U, config->mPriority); diff --git a/core/unittest/pipeline/HttpSinkMock.h b/core/unittest/pipeline/HttpSinkMock.h index 3016f56e1d..38a3a9882e 100644 --- a/core/unittest/pipeline/HttpSinkMock.h +++ b/core/unittest/pipeline/HttpSinkMock.h @@ -22,7 +22,7 @@ #include "plugin/flusher/sls/FlusherSLS.h" #include "runner/FlusherRunner.h" #include "runner/sink/http/HttpSink.h" -#include "sdk/Common.h" +#include "plugin/flusher/sls/SLSConstant.h" namespace logtail { class HttpSinkMock : public HttpSink { @@ -64,8 +64,11 @@ class HttpSinkMock : public HttpSink { std::lock_guard lock(mMutex); mRequests.push_back(*(request->mItem)); } + request->mResponse.SetNetworkStatus(NetworkCode::Ok, ""); request->mResponse.SetStatusCode(200); - request->mResponse.mHeader[sdk::X_LOG_REQUEST_ID] = "request_id"; + request->mResponse.SetResponseTime(std::chrono::milliseconds(10)); + // for sls only + request->mResponse.mHeader[X_LOG_REQUEST_ID] = "request_id"; static_cast(request->mItem->mFlusher)->OnSendDone(request->mResponse, request->mItem); FlusherRunner::GetInstance()->DecreaseHttpSendingCnt(); request.reset(); diff --git a/core/unittest/pipeline/PipelineUpdateUnittest.cpp b/core/unittest/pipeline/PipelineUpdateUnittest.cpp index 6e5a4be888..1301e4046d 100644 --- a/core/unittest/pipeline/PipelineUpdateUnittest.cpp +++ b/core/unittest/pipeline/PipelineUpdateUnittest.cpp @@ -33,6 +33,9 @@ #include "unittest/pipeline/HttpSinkMock.h" #include "unittest/pipeline/LogtailPluginMock.h" #include "unittest/plugin/PluginMock.h" +#ifdef __ENTERPRISE__ +#include "config/provider/EnterpriseConfigProvider.h" +#endif using namespace std; @@ -63,7 +66,7 @@ class FlusherSLSMock : public FlusherSLS { public: static const std::string sName; - bool BuildRequest(SenderQueueItem* item, std::unique_ptr& req, bool* keepItem) const override { + bool BuildRequest(SenderQueueItem* item, std::unique_ptr& req, bool* keepItem, std::string* errMsg) override { auto data = static_cast(item); std::map header; req = std::make_unique( @@ -372,10 +375,12 @@ class PipelineUpdateUnittest : public testing::Test { "Type": "flusher_stdout2" })"; - size_t builtinPipelineCnt = 0; + static size_t builtinPipelineCnt; bool isFileServerStart = false; }; +size_t PipelineUpdateUnittest::builtinPipelineCnt = 0; + void PipelineUpdateUnittest::TestFileServerStart() { isFileServerStart = true; Json::Value nativePipelineConfigJson diff --git a/core/unittest/plugin/PluginMock.h b/core/unittest/plugin/PluginMock.h index 154d1930be..e61410df75 100644 --- a/core/unittest/plugin/PluginMock.h +++ b/core/unittest/plugin/PluginMock.h @@ -151,7 +151,7 @@ class FlusherHttpMock : public HttpFlusher { return true; } bool FlushAll() override { return mIsValid; } - bool BuildRequest(SenderQueueItem* item, std::unique_ptr& req, bool* keepItem) const override { + bool BuildRequest(SenderQueueItem* item, std::unique_ptr& req, bool* keepItem, std::string* errMsg) override { if (item->mData == "invalid_keep") { *keepItem = true; return false; diff --git a/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp b/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp index dc159d3cb1..1eb84623a3 100644 --- a/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp +++ b/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp @@ -14,11 +14,11 @@ * limitations under the License. */ - #include #include #include "common/StringTools.h" +#include "common/http/Curl.h" #include "common/http/HttpResponse.h" #include "common/timer/Timer.h" #include "models/RawEvent.h" @@ -80,13 +80,13 @@ void ScrapeSchedulerUnittest::TestProcess() { // if status code is not 200, no data will be processed // but will continue running, sending self-monitoring metrics httpResponse.SetStatusCode(503); - httpResponse.SetNetworkStatus(CURLE_OK); + httpResponse.SetNetworkStatus(NetworkCode::Ok, ""); event.OnMetricResult(httpResponse, 0); APSARA_TEST_EQUAL(1UL, event.mPromStreamScraper.mItem.size()); event.mPromStreamScraper.mItem.clear(); httpResponse.SetStatusCode(503); - httpResponse.SetNetworkStatus(CURLE_COULDNT_CONNECT); + httpResponse.SetNetworkStatus(GetNetworkStatus(CURLE_COULDNT_CONNECT), ""); event.OnMetricResult(httpResponse, 0); APSARA_TEST_EQUAL(event.mPromStreamScraper.mItem[0] ->mEventGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_STATE) @@ -96,7 +96,7 @@ void ScrapeSchedulerUnittest::TestProcess() { event.mPromStreamScraper.mItem.clear(); httpResponse.SetStatusCode(200); - httpResponse.SetNetworkStatus(CURLE_OK); + httpResponse.SetNetworkStatus(NetworkCode::Ok, ""); string body1 = "# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n" "# TYPE go_gc_duration_seconds summary\n" "go_gc_duration_seconds{quantile=\"0\"} 1.5531e-05\n" @@ -284,4 +284,4 @@ UNIT_TEST_CASE(ScrapeSchedulerUnittest, TestExactlyScrape) } // namespace logtail -UNIT_TEST_MAIN \ No newline at end of file +UNIT_TEST_MAIN diff --git a/core/unittest/sdk/CMakeLists.txt b/core/unittest/sdk/CMakeLists.txt deleted file mode 100644 index f052c1a847..0000000000 --- a/core/unittest/sdk/CMakeLists.txt +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2022 iLogtail Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -cmake_minimum_required(VERSION 3.22) -project(sdk_unittest) - -# add_executable(sdk_common_unittest SDKCommonUnittest.cpp) -# target_link_libraries(sdk_common_unittest ${UT_BASE_TARGET}) diff --git a/core/unittest/sdk/SDKCommonUnittest.cpp b/core/unittest/sdk/SDKCommonUnittest.cpp deleted file mode 100644 index 315a3b450d..0000000000 --- a/core/unittest/sdk/SDKCommonUnittest.cpp +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2022 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "unittest/Unittest.h" -#include "sdk/Common.h" -#include "sdk/Client.h" -#include "sdk/Exception.h" -#include "common/CompressTools.h" -#include "plugin/flusher/sls/EnterpriseSLSClientManager.h" - -DECLARE_FLAG_STRING(default_access_key_id); -DECLARE_FLAG_STRING(default_access_key); - -namespace logtail { - -class HttpMessageUnittest : public ::testing::Test { -public: - void TestGetServerTimeFromHeader(); -}; - -UNIT_TEST_CASE(HttpMessageUnittest, TestGetServerTimeFromHeader); - -void HttpMessageUnittest::TestGetServerTimeFromHeader() { - sdk::HttpMessage httpMsg; - EXPECT_EQ(0, httpMsg.GetServerTimeFromHeader()); - - auto& header = httpMsg.header; - header["Date"] = "Thu, 18 Feb 2021 10:11:10 GMT"; - EXPECT_EQ(1613643070, httpMsg.GetServerTimeFromHeader()); - - const time_t kTimestamp = 1613588970; - header["x-log-time"] = std::to_string(kTimestamp); - EXPECT_EQ(kTimestamp, httpMsg.GetServerTimeFromHeader()); -} - -class SDKClientUnittest : public ::testing::Test {}; - -TEST_F(SDKClientUnittest, TestNetwork) { - sdk::Client client("log-global.aliyuncs.com", - STRING_FLAG(default_access_key_id), - STRING_FLAG(default_access_key), - INT32_FLAG(sls_client_send_timeout), - "192.168.1.1", - ""); - try { - client.TestNetwork(); - ASSERT_TRUE(false); - } catch (const sdk::LOGException& e) { - const std::string& errorCode = e.GetErrorCode(); - ASSERT_EQ(errorCode, sdk::LOGE_REQUEST_ERROR); - std::cout << "ErrorMessage: " << e.GetMessage() << std::endl; - } - - // Machine to run the test might have accesibility to Internet. - client.SetSlsHost("cn-hangzhou.log.aliyuncs.com"); - try { - client.TestNetwork(); - ASSERT_TRUE(false); - } catch (const sdk::LOGException& e) { - const std::string errorCode = e.GetErrorCode(); - std::cout << errorCode << std::endl; - std::cout << e.GetMessage() << std::endl; - if (e.GetHttpCode() == 404) { - EXPECT_EQ(errorCode, sdk::LOGE_PROJECT_NOT_EXIST); - } else if (e.GetHttpCode() == 401) { - EXPECT_EQ(ConvertErrorCode(errorCode), SEND_UNAUTHORIZED); - } else if (e.GetHttpCode() == 400) { - EXPECT_EQ(ConvertErrorCode(errorCode), SEND_PARAMETER_INVALID); - } else { - std::cout << "HttpCode: " << e.GetHttpCode() << std::endl; - EXPECT_EQ(ConvertErrorCode(errorCode), SEND_NETWORK_ERROR); - } - } -} - -TEST_F(SDKClientUnittest, TestGetRealIp) { - sdk::Client client("cn-shanghai-corp.sls.aliyuncs.com", - STRING_FLAG(default_access_key_id), - STRING_FLAG(default_access_key), - INT32_FLAG(sls_client_send_timeout), - "192.168.1.1", - ""); - logtail::sdk::GetRealIpResponse resp = client.GetRealIp(); - std::cout << "realIp: " << resp.realIp << std::endl; - EXPECT_GT(resp.realIp.size(), 0L); - - client.SetSlsHost("cn-shanghai.sls.aliyuncs.com"); - resp = client.GetRealIp(); - std::cout << "realIp: " << resp.realIp << std::endl; - EXPECT_EQ(resp.realIp.size(), 0L); -} - -/* -TEST_F(SDKClientUnittest, PostLogstoreLogsSuccessOpenSource) { - std::string uid = ""; - std::string accessKeyId = ""; - std::string accessKey = ""; - std::string region = "cn-wulanchabu"; - std::string project = ""; - std::string logstore = ""; - sdk::Client client("cn-wulanchabu.log.aliyuncs.com", - accessKeyId, - accessKey, - INT32_FLAG(sls_client_send_timeout), - "192.168.1.1", - ""); - SLSControl::Instance()->SetSlsSendClientCommonParam(&client); - client.SetKeyProvider(""); - sls_logs::LogGroup logGroup; - - logGroup.set_source("192.168.1.1"); - logGroup.set_category(logstore); - logGroup.set_topic("unittest"); - - sls_logs::Log* log = logGroup.add_logs(); - log->set_time(time(NULL)); - sls_logs::Log_Content* content = nullptr; - content = log->add_contents(); - content->set_key("kk1"); - content->set_value("vv1"); - content = log->add_contents(); - content->set_key("kk2"); - content->set_value("vv2"); - - std::string oriData; - logGroup.SerializeToString(&oriData); - int32_t logSize = (int32_t)logGroup.logs_size(); - time_t curTime = time(NULL); - sls_logs::SlsCompressType compressType = sls_logs::SLS_CMP_ZSTD; - - LogGroupContext logGroupContext(region, project, logstore, compressType); - - LoggroupTimeValue* data = new LoggroupTimeValue(project, - logstore, - "ut-config", - "ut.log", - false, - uid, - "cn-huhehaote", - LOGGROUP_COMPRESSED, - logSize, - oriData.size(), - curTime, - "", - 0, - logGroupContext); - - ASSERT_TRUE(CompressData(compressType, oriData, data->mLogData)); - - try { - sdk::PostLogStoreLogsResponse resp = client.PostLogStoreLogs( - data->mProjectName, data->mLogstore, data->mLogGroupContext.mCompressType, data->mLogData, data->mRawSize); - std::cout << resp.requestId << "," << resp.statusCode << "," << resp.bodyBytes << std::endl; - } catch (const sdk::LOGException& e) { - const std::string& errorCode = e.GetErrorCode(); - std::cerr << "errorCode:" << errorCode << " errorMessage: " << e.GetMessage() << std::endl; - if (e.GetMessage().find("x-log-compresstype : zstd") == std::string::npos) { // ignore compresstype error - ASSERT_TRUE(false); - } - std::cerr << "compresstype zstd is not supported, fallback to lz4" << std::endl; - } - - // fallback to lz4 - ASSERT_TRUE(UncompressData(compressType, data->mLogData, data->mRawSize, oriData)); - - compressType = sls_logs::SLS_CMP_LZ4; - - logGroupContext.mCompressType = compressType; - - data->mLogGroupContext = logGroupContext; - - ASSERT_TRUE(CompressData(compressType, oriData, data->mLogData)); - - try { - sdk::PostLogStoreLogsResponse resp = client.PostLogStoreLogs( - data->mProjectName, data->mLogstore, data->mLogGroupContext.mCompressType, data->mLogData, data->mRawSize); - std::cout << resp.requestId << "," << resp.statusCode << "," << resp.bodyBytes << std::endl; - } catch (const sdk::LOGException& e) { - const std::string& errorCode = e.GetErrorCode(); - std::cerr << "errorCode:" << errorCode << " errorMessage: " << e.GetMessage() << std::endl; - ASSERT_TRUE(false); - } -} - -TEST_F(SDKClientUnittest, PostLogstoreLogsSuccessClosedSource) { - std::string uid = ""; - std::string accessKeyId = ""; // start with ## - std::string accessKey = ""; - std::string region = "cn-wulanchabu"; - std::string project = ""; - std::string logstore = ""; - sdk::Client client("cn-wulanchabu.log.aliyuncs.com", - accessKeyId, - accessKey, - INT32_FLAG(sls_client_send_timeout), - "192.168.1.1", - ""); - SLSControl::Instance()->SetSlsSendClientCommonParam(&client); - client.SetKeyProvider(sdk::MD5_SHA1_SALT_KEYPROVIDER); - sls_logs::LogGroup logGroup; - - logGroup.set_source("192.168.1.1"); - logGroup.set_category(logstore); - logGroup.set_topic("unittest"); - - sls_logs::Log* log = logGroup.add_logs(); - log->set_time(time(NULL)); - sls_logs::Log_Content* content = nullptr; - content = log->add_contents(); - content->set_key("kk1"); - content->set_value("vv1"); - content = log->add_contents(); - content->set_key("kk2"); - content->set_value("vv2"); - - std::string oriData; - logGroup.SerializeToString(&oriData); - int32_t logSize = (int32_t)logGroup.logs_size(); - time_t curTime = time(NULL); - - // try zstd first - sls_logs::SlsCompressType compressType = sls_logs::SLS_CMP_ZSTD; - - LogGroupContext logGroupContext(region, project, logstore, compressType); - - LoggroupTimeValue* data = new LoggroupTimeValue(project, - logstore, - "ut-config", - "ut.log", - false, - uid, - "cn-huhehaote", - LOGGROUP_COMPRESSED, - logSize, - oriData.size(), - curTime, - "", - 0, - logGroupContext); - - ASSERT_TRUE(CompressData(compressType, oriData, data->mLogData)); - try { - sdk::PostLogStoreLogsResponse resp = client.PostLogStoreLogs( - data->mProjectName, data->mLogstore, data->mLogGroupContext.mCompressType, data->mLogData, data->mRawSize); - std::cout << resp.requestId << "," << resp.statusCode << "," << resp.bodyBytes << std::endl; - } catch (const sdk::LOGException& e) { - const std::string& errorCode = e.GetErrorCode(); - std::cerr << "errorCode:" << errorCode << " errorMessage: " << e.GetMessage() << std::endl; - if (e.GetMessage().find("x-log-compresstype : zstd") == std::string::npos) { // ignore compresstype error - ASSERT_TRUE(false); - } - std::cerr << "compresstype zstd is not supported, fallback to lz4" << std::endl; - } - - // fallback to lz4 - ASSERT_TRUE(UncompressData(compressType, data->mLogData, data->mRawSize, oriData)); - - compressType = sls_logs::SLS_CMP_LZ4; - - logGroupContext.mCompressType = compressType; - - data->mLogGroupContext = logGroupContext; - - ASSERT_TRUE(CompressData(compressType, oriData, data->mLogData)); - - try { - sdk::PostLogStoreLogsResponse resp = client.PostLogStoreLogs( - data->mProjectName, data->mLogstore, data->mLogGroupContext.mCompressType, data->mLogData, data->mRawSize); - std::cout << resp.requestId << "," << resp.statusCode << "," << resp.bodyBytes << std::endl; - } catch (const sdk::LOGException& e) { - const std::string& errorCode = e.GetErrorCode(); - std::cerr << "errorCode:" << errorCode << " errorMessage: " << e.GetMessage() << std::endl; - ASSERT_TRUE(false); - } -} -*/ -} // namespace logtail - -UNIT_TEST_MAIN diff --git a/core/unittest/sender/FlusherRunnerUnittest.cpp b/core/unittest/sender/FlusherRunnerUnittest.cpp index e0ce09cf66..eac4f5ff70 100644 --- a/core/unittest/sender/FlusherRunnerUnittest.cpp +++ b/core/unittest/sender/FlusherRunnerUnittest.cpp @@ -32,6 +32,10 @@ class FlusherRunnerUnittest : public ::testing::Test { void TestPushToHttpSink(); protected: + static void SetUpTestCase() { + AppConfig::GetInstance()->mSendRequestGlobalConcurrency = 10; + } + void TearDown() override { SenderQueueManager::GetInstance()->Clear(); HttpSink::GetInstance()->mQueue.Clear(); @@ -48,8 +52,6 @@ void FlusherRunnerUnittest::TestDispatch() { flusher->SetMetricsRecordRef("name", "1"); flusher->Init(Json::Value(), tmp); - AppConfig::GetInstance()->mSendRequestGlobalConcurrency = 10; - auto item = make_unique("content", 10, flusher.get(), flusher->GetQueueKey()); auto realItem = item.get(); flusher->PushToQueue(std::move(item)); diff --git a/core/unittest/serializer/SLSSerializerUnittest.cpp b/core/unittest/serializer/SLSSerializerUnittest.cpp index 75a36a307d..d542ec95cb 100644 --- a/core/unittest/serializer/SLSSerializerUnittest.cpp +++ b/core/unittest/serializer/SLSSerializerUnittest.cpp @@ -218,7 +218,7 @@ void SLSSerializerUnittest::TestSerializeEventGroup() { // span string res, errorMsg; auto events = CreateBatchedSpanEvents(); - APSARA_TEST_EQUAL(events.mEvents.size(), 1); + APSARA_TEST_EQUAL(events.mEvents.size(), 1U); APSARA_TEST_TRUE(events.mEvents[0]->GetType() == PipelineEvent::Type::SPAN); APSARA_TEST_TRUE(serializer.DoSerialize(std::move(events), res, errorMsg)); sls_logs::LogGroup logGroup; @@ -256,7 +256,7 @@ void SLSSerializerUnittest::TestSerializeEventGroup() { std::istringstream s(attrs); bool ret = Json::parseFromStream(readerBuilder, s, &jsonVal, &errs); APSARA_TEST_TRUE(ret); - APSARA_TEST_EQUAL(jsonVal.size(), 10); + APSARA_TEST_EQUAL(jsonVal.size(), 10U); APSARA_TEST_EQUAL(jsonVal["rpcType"].asString(), "25"); APSARA_TEST_EQUAL(jsonVal["scope-tag-0"].asString(), "scope-value-0"); // APSARA_TEST_EQUAL(logGroup.logs(0).contents(7).value(), ""); @@ -268,7 +268,7 @@ void SLSSerializerUnittest::TestSerializeEventGroup() { std::istringstream ss(linksStr); ret = Json::parseFromStream(readerBuilder, ss, &jsonVal, &errs); APSARA_TEST_TRUE(ret); - APSARA_TEST_EQUAL(jsonVal.size(), 1); + APSARA_TEST_EQUAL(jsonVal.size(), 1U); for (auto& link : jsonVal) { APSARA_TEST_EQUAL(link["spanId"].asString(), "inner-link-spanid"); APSARA_TEST_EQUAL(link["traceId"].asString(), "inner-link-traceid"); @@ -280,7 +280,7 @@ void SLSSerializerUnittest::TestSerializeEventGroup() { std::istringstream sss(eventsStr); ret = Json::parseFromStream(readerBuilder, sss, &jsonVal, &errs); APSARA_TEST_TRUE(ret); - APSARA_TEST_EQUAL(jsonVal.size(), 1); + APSARA_TEST_EQUAL(jsonVal.size(), 1U); for (auto& event : jsonVal) { APSARA_TEST_EQUAL(event["name"].asString(), "inner-event"); APSARA_TEST_EQUAL(event["timestamp"].asString(), "1000"); diff --git a/docs/cn/developer-guide/plugin-development/native-plugins/how-to-write-native-flusher-plugins.md b/docs/cn/developer-guide/plugin-development/native-plugins/how-to-write-native-flusher-plugins.md index 031b1ed76c..4b62fe91f6 100644 --- a/docs/cn/developer-guide/plugin-development/native-plugins/how-to-write-native-flusher-plugins.md +++ b/docs/cn/developer-guide/plugin-development/native-plugins/how-to-write-native-flusher-plugins.md @@ -24,7 +24,7 @@ public: class HttpFlusher : public Flusher { public: // 用于将待发送数据打包成http请求 - virtual bool BuildRequest(SenderQueueItem* item, std::unique_ptr& req, bool* keepItem) const = 0; + virtual bool BuildRequest(SenderQueueItem* item, std::unique_ptr& req, bool* keepItem, std::string* errMsg) = 0; // 用于发送完成后进行记录和处理 virtual void OnSendDone(const HttpResponse& response, SenderQueueItem* item) = 0; }; From bd3ad057596e713df36e37f487de135273df0dc0 Mon Sep 17 00:00:00 2001 From: quzard <1191890118@qq.com> Date: Tue, 31 Dec 2024 16:35:39 +0800 Subject: [PATCH 12/12] fix GetLastLine core (#2000) --- core/file_server/reader/LogFileReader.cpp | 178 ++-- core/file_server/reader/LogFileReader.h | 22 +- .../reader/GetLastLineDataUnittest.cpp | 119 ++- .../RemoveLastIncompleteLogUnittest.cpp | 854 +++++++++++++++++- 4 files changed, 1052 insertions(+), 121 deletions(-) diff --git a/core/file_server/reader/LogFileReader.cpp b/core/file_server/reader/LogFileReader.cpp index d5024ca48c..6e3822e470 100644 --- a/core/file_server/reader/LogFileReader.cpp +++ b/core/file_server/reader/LogFileReader.cpp @@ -350,26 +350,25 @@ void LogFileReader::InitReader(bool tailExisted, FileReadPolicy policy, uint32_t namespace detail { - void updatePrimaryCheckpoint(const std::string& key, PrimaryCheckpointPB& cpt, const std::string& field) { - cpt.set_update_time(time(NULL)); - if (CheckpointManagerV2::GetInstance()->SetPB(key, cpt)) { - LOG_INFO(sLogger, ("update primary checkpoint", key)("field", field)("checkpoint", cpt.DebugString())); - } else { - LOG_WARNING(sLogger, - ("update primary checkpoint error", key)("field", field)("checkpoint", cpt.DebugString())); - } +void updatePrimaryCheckpoint(const std::string& key, PrimaryCheckpointPB& cpt, const std::string& field) { + cpt.set_update_time(time(NULL)); + if (CheckpointManagerV2::GetInstance()->SetPB(key, cpt)) { + LOG_INFO(sLogger, ("update primary checkpoint", key)("field", field)("checkpoint", cpt.DebugString())); + } else { + LOG_WARNING(sLogger, ("update primary checkpoint error", key)("field", field)("checkpoint", cpt.DebugString())); } +} - std::pair getPartitionRange(size_t idx, size_t concurrency, size_t totalPartitionCount) { - auto base = totalPartitionCount / concurrency; - auto extra = totalPartitionCount % concurrency; - if (extra == 0) { - return std::make_pair(idx * base, (idx + 1) * base - 1); - } - size_t min = idx <= extra ? idx * (base + 1) : extra * (base + 1) + (idx - extra) * base; - size_t max = idx < extra ? min + base : min + base - 1; - return std::make_pair(min, max); +std::pair getPartitionRange(size_t idx, size_t concurrency, size_t totalPartitionCount) { + auto base = totalPartitionCount / concurrency; + auto extra = totalPartitionCount % concurrency; + if (extra == 0) { + return std::make_pair(idx * base, (idx + 1) * base - 1); } + size_t min = idx <= extra ? idx * (base + 1) : extra * (base + 1) + (idx - extra) * base; + size_t max = idx < extra ? min + base : min + base - 1; + return std::make_pair(min, max); +} } // namespace detail @@ -688,7 +687,7 @@ void LogFileReader::SetFilePosBackwardToFixedPos(LogFileOperator& op) { void LogFileReader::checkContainerType(LogFileOperator& op) { // 判断container类型 - char containerBOMBuffer[1] = {0}; + char containerBOMBuffer[2] = {0}; size_t readBOMByte = 1; int64_t filePos = 0; TruncateInfo* truncateInfo = NULL; @@ -1992,7 +1991,8 @@ LogFileReader::FileCompareResult LogFileReader::CompareToFile(const string& file 3. continue\nend\ncontinue\nend\n -> continue\nxxx\nend 5. mLogEndRegPtr != NULL 1. xxx\nend\n -> xxx\nend - 1. xxx\nend\nxxx\n -> xxx\nend + 2. xxx\nend\nxxx\n -> xxx\nend + 3. xxx\nend -> "" */ /* return: the number of bytes left, including \n @@ -2010,6 +2010,7 @@ LogFileReader::RemoveLastIncompleteLog(char* buffer, int32_t size, int32_t& roll } rollbackLineFeedCount = 0; // Multiline rollback + bool foundEnd = false; if (mMultilineConfig.first->IsMultiline()) { std::string exception; while (endPs >= 0) { @@ -2020,6 +2021,8 @@ LogFileReader::RemoveLastIncompleteLog(char* buffer, int32_t size, int32_t& roll content.data.size(), *mMultilineConfig.first->GetEndPatternReg(), exception)) { + rollbackLineFeedCount += content.forceRollbackLineFeedCount; + foundEnd = true; // Ensure the end line is complete if (buffer[content.lineEnd] == '\n') { return content.lineEnd + 1; @@ -2031,14 +2034,19 @@ LogFileReader::RemoveLastIncompleteLog(char* buffer, int32_t size, int32_t& roll *mMultilineConfig.first->GetStartPatternReg(), exception)) { // start + continue, start + rollbackLineFeedCount += content.forceRollbackLineFeedCount; rollbackLineFeedCount += content.rollbackLineFeedCount; // Keep all the buffer if rollback all return content.lineBegin; } + rollbackLineFeedCount += content.forceRollbackLineFeedCount; rollbackLineFeedCount += content.rollbackLineFeedCount; endPs = content.lineBegin - 1; } } + if (mMultilineConfig.first->GetEndPatternReg() && foundEnd) { + return 0; + } // Single line rollback or all unmatch rollback rollbackLineFeedCount = 0; if (buffer[size - 1] == '\n') { @@ -2048,11 +2056,13 @@ LogFileReader::RemoveLastIncompleteLog(char* buffer, int32_t size, int32_t& roll } LineInfo content = GetLastLine(StringView(buffer, size), endPs, true); // 最后一行是完整行,且以 \n 结尾 - if (content.fullLine && buffer[endPs] == '\n') { - return size; + if (content.fullLine && buffer[content.lineEnd] == '\n') { + rollbackLineFeedCount += content.forceRollbackLineFeedCount; + return content.lineEnd + 1; } content = GetLastLine(StringView(buffer, size), endPs, false); - rollbackLineFeedCount = content.rollbackLineFeedCount; + rollbackLineFeedCount += content.forceRollbackLineFeedCount; + rollbackLineFeedCount += content.rollbackLineFeedCount; return content.lineBegin; } @@ -2186,32 +2196,31 @@ StringBuffer* BaseLineParse::GetStringBuffer() { return &mStringBuffer; } +/* + params: + buffer: all read logs + end: the end position of current line, \n or \0 + return: + last line (backward), without \n or \0 +*/ LineInfo RawTextParser::GetLastLine(StringView buffer, int32_t end, size_t protocolFunctionIndex, bool needSingleLine, std::vector* lineParsers) { if (end == 0) { - return {.data = StringView(), .lineBegin = 0, .lineEnd = 0, .rollbackLineFeedCount = 0, .fullLine = false}; + return LineInfo(StringView(), 0, 0, 0, false, 0); } if (protocolFunctionIndex != 0) { - return {.data = StringView(), .lineBegin = 0, .lineEnd = 0, .rollbackLineFeedCount = 0, .fullLine = false}; + return LineInfo(StringView(), 0, 0, 0, false, 0); } for (int32_t begin = end; begin > 0; --begin) { - if (begin == 0 || buffer[begin - 1] == '\n') { - return {.data = StringView(buffer.data() + begin, end - begin), - .lineBegin = begin, - .lineEnd = end, - .rollbackLineFeedCount = 1, - .fullLine = true}; - } - } - return {.data = StringView(buffer.data(), end), - .lineBegin = 0, - .lineEnd = end, - .rollbackLineFeedCount = 1, - .fullLine = true}; + if (buffer[begin - 1] == '\n') { + return LineInfo(StringView(buffer.data() + begin, end - begin), begin, end, 1, true, 0); + } + } + return LineInfo(StringView(buffer.data(), end), 0, end, 1, true, 0); } LineInfo DockerJsonFileParser::GetLastLine(StringView buffer, @@ -2220,38 +2229,41 @@ LineInfo DockerJsonFileParser::GetLastLine(StringView buffer, bool needSingleLine, std::vector* lineParsers) { if (end == 0) { - return {.data = StringView(), .lineBegin = 0, .lineEnd = 0, .rollbackLineFeedCount = 0, .fullLine = false}; + return LineInfo(StringView(), 0, 0, 0, false, 0); } if (protocolFunctionIndex == 0) { // 异常情况, DockerJsonFileParse不允许在最后一个解析器 - return {.data = StringView(), .lineBegin = 0, .lineEnd = 0, .rollbackLineFeedCount = 0, .fullLine = false}; + return LineInfo(StringView(), 0, 0, 0, false, 0); } size_t nextProtocolFunctionIndex = protocolFunctionIndex - 1; - LineInfo finalLine - = {.data = StringView(), .lineBegin = 0, .lineEnd = 0, .rollbackLineFeedCount = 0, .fullLine = false}; + LineInfo finalLine; while (!finalLine.fullLine) { LineInfo rawLine = (*lineParsers)[nextProtocolFunctionIndex]->GetLastLine( buffer, end, nextProtocolFunctionIndex, needSingleLine, lineParsers); - if (rawLine.data.back() == '\n') { + if (rawLine.data.size() > 0 && rawLine.data.back() == '\n') { rawLine.data = StringView(rawLine.data.data(), rawLine.data.size() - 1); } - LineInfo line - = {.data = StringView(), .lineBegin = 0, .lineEnd = 0, .rollbackLineFeedCount = 0, .fullLine = false}; + LineInfo line; parseLine(rawLine, line); - finalLine.data = line.data; - finalLine.fullLine = line.fullLine; - finalLine.lineBegin = line.lineBegin; - finalLine.rollbackLineFeedCount += line.rollbackLineFeedCount; - finalLine.dataRaw = line.dataRaw; - if (finalLine.lineEnd == 0) { - finalLine.lineEnd = line.lineEnd; + int32_t rollbackLineFeedCount = 0; + int32_t forceRollbackLineFeedCount = 0; + if (line.fullLine) { + rollbackLineFeedCount = line.rollbackLineFeedCount; + forceRollbackLineFeedCount = finalLine.forceRollbackLineFeedCount; + } else { + forceRollbackLineFeedCount + = finalLine.forceRollbackLineFeedCount + line.forceRollbackLineFeedCount + line.rollbackLineFeedCount; + rollbackLineFeedCount = 0; } + finalLine = std::move(line); + finalLine.rollbackLineFeedCount = rollbackLineFeedCount; + finalLine.forceRollbackLineFeedCount = forceRollbackLineFeedCount; if (!finalLine.fullLine) { if (finalLine.lineBegin == 0) { - finalLine.data = StringView(); - return finalLine; + return LineInfo( + StringView(), 0, 0, finalLine.rollbackLineFeedCount, false, finalLine.forceRollbackLineFeedCount); } end = finalLine.lineBegin - 1; } @@ -2262,7 +2274,9 @@ LineInfo DockerJsonFileParser::GetLastLine(StringView buffer, bool DockerJsonFileParser::parseLine(LineInfo rawLine, LineInfo& paseLine) { paseLine = rawLine; paseLine.fullLine = false; - + if (rawLine.data.size() == 0) { + return false; + } rapidjson::Document doc; doc.Parse(rawLine.data.data(), rawLine.data.size()); @@ -2300,40 +2314,44 @@ LineInfo ContainerdTextParser::GetLastLine(StringView buffer, bool needSingleLine, std::vector* lineParsers) { if (end == 0) { - return {.data = StringView(), .lineBegin = 0, .lineEnd = 0, .rollbackLineFeedCount = 0, .fullLine = false}; + return LineInfo(StringView(), 0, 0, 0, false, 0); } if (protocolFunctionIndex == 0) { - // 异常情况, DockerJsonFileParse不允许在最后一个解析器 - return {.data = StringView(), .lineBegin = 0, .lineEnd = 0, .rollbackLineFeedCount = 0, .fullLine = false}; + // 异常情况, ContainerdTextParser不允许在最后一个解析器 + return LineInfo(StringView(), 0, 0, 0, false, 0); } - LineInfo finalLine - = {.data = StringView(), .lineBegin = 0, .lineEnd = 0, .rollbackLineFeedCount = 0, .fullLine = false}; - // 跳过最后的连续P + LineInfo finalLine; + finalLine.fullLine = false; size_t nextProtocolFunctionIndex = protocolFunctionIndex - 1; + // 跳过最后的连续P while (!finalLine.fullLine) { LineInfo rawLine = (*lineParsers)[nextProtocolFunctionIndex]->GetLastLine( buffer, end, nextProtocolFunctionIndex, needSingleLine, lineParsers); - if (rawLine.data.back() == '\n') { + if (rawLine.data.size() > 0 && rawLine.data.back() == '\n') { rawLine.data = StringView(rawLine.data.data(), rawLine.data.size() - 1); } - LineInfo line - = {.data = StringView(), .lineBegin = 0, .lineEnd = 0, .rollbackLineFeedCount = 0, .fullLine = false}; + LineInfo line; parseLine(rawLine, line); - // containerd 不需要外层协议的 dataRaw - finalLine.data = line.data; - finalLine.fullLine = line.fullLine; - finalLine.lineBegin = line.lineBegin; - finalLine.rollbackLineFeedCount += line.rollbackLineFeedCount; - mergeLines(finalLine, finalLine, true); - if (finalLine.lineEnd == 0) { - finalLine.lineEnd = line.lineEnd; + int32_t rollbackLineFeedCount = 0; + int32_t forceRollbackLineFeedCount = 0; + if (line.fullLine) { + rollbackLineFeedCount = line.rollbackLineFeedCount; + forceRollbackLineFeedCount = finalLine.forceRollbackLineFeedCount; + } else { + forceRollbackLineFeedCount + = finalLine.forceRollbackLineFeedCount + line.forceRollbackLineFeedCount + line.rollbackLineFeedCount; + rollbackLineFeedCount = 0; } + finalLine = std::move(line); + finalLine.rollbackLineFeedCount = rollbackLineFeedCount; + finalLine.forceRollbackLineFeedCount = forceRollbackLineFeedCount; + mergeLines(finalLine, finalLine, true); if (!finalLine.fullLine) { if (finalLine.lineBegin == 0) { - finalLine.data = StringView(); - return finalLine; + return LineInfo( + StringView(), 0, 0, finalLine.rollbackLineFeedCount, false, finalLine.forceRollbackLineFeedCount); } end = finalLine.lineBegin - 1; } @@ -2353,11 +2371,10 @@ LineInfo ContainerdTextParser::GetLastLine(StringView buffer, break; } - LineInfo previousLine - = {.data = StringView(), .lineBegin = 0, .lineEnd = 0, .rollbackLineFeedCount = 0, .fullLine = false}; + LineInfo previousLine; LineInfo rawLine = (*lineParsers)[nextProtocolFunctionIndex]->GetLastLine( buffer, finalLine.lineBegin - 1, nextProtocolFunctionIndex, needSingleLine, lineParsers); - if (rawLine.data.back() == '\n') { + if (rawLine.data.size() > 0 && rawLine.data.back() == '\n') { rawLine.data = StringView(rawLine.data.data(), rawLine.data.size() - 1); } @@ -2394,9 +2411,10 @@ void ContainerdTextParser::parseLine(LineInfo rawLine, LineInfo& paseLine) { const char* lineEnd = rawLine.data.data() + rawLine.data.size(); paseLine = rawLine; paseLine.fullLine = true; - + if (rawLine.data.size() == 0) { + return; + } // 寻找第一个分隔符位置 time - StringView timeValue; const char* pch1 = std::find(rawLine.data.data(), lineEnd, ProcessorParseContainerLogNative::CONTAINERD_DELIMITER); if (pch1 == lineEnd) { return; @@ -2412,6 +2430,10 @@ void ContainerdTextParser::parseLine(LineInfo rawLine, LineInfo& paseLine) { return; } // 如果既不以 P 开头,也不以 F 开头 + if (pch2 + 1 >= lineEnd) { + paseLine.data = StringView(pch2 + 1, lineEnd - pch2 - 1); + return; + } if (*(pch2 + 1) != ProcessorParseContainerLogNative::CONTAINERD_PART_TAG && *(pch2 + 1) != ProcessorParseContainerLogNative::CONTAINERD_FULL_TAG) { paseLine.data = StringView(pch2 + 1, lineEnd - pch2 - 1); diff --git a/core/file_server/reader/LogFileReader.h b/core/file_server/reader/LogFileReader.h index 7c62347882..a1a9a2bd49 100644 --- a/core/file_server/reader/LogFileReader.h +++ b/core/file_server/reader/LogFileReader.h @@ -31,16 +31,16 @@ #include "common/StringTools.h" #include "common/TimeUtil.h" #include "common/memory/SourceBuffer.h" -#include "file_server/event/Event.h" #include "file_server/FileDiscoveryOptions.h" #include "file_server/FileServer.h" #include "file_server/MultilineOptions.h" -#include "protobuf/sls/sls_logs.pb.h" +#include "file_server/event/Event.h" +#include "file_server/reader/FileReaderOptions.h" #include "logger/Logger.h" #include "models/StringView.h" #include "pipeline/queue/QueueKey.h" +#include "protobuf/sls/sls_logs.pb.h" #include "rapidjson/allocators.h" -#include "file_server/reader/FileReaderOptions.h" namespace logtail { @@ -57,6 +57,19 @@ struct LineInfo { int32_t lineEnd; int32_t rollbackLineFeedCount; bool fullLine; + int32_t forceRollbackLineFeedCount; + LineInfo(StringView data = StringView(), + int32_t lineBegin = 0, + int32_t lineEnd = 0, + int32_t rollbackLineFeedCount = 0, + bool fullLine = false, + int32_t forceRollbackLineFeedCount = 0) + : data(data), + lineBegin(lineBegin), + lineEnd(lineEnd), + rollbackLineFeedCount(rollbackLineFeedCount), + fullLine(fullLine), + forceRollbackLineFeedCount(forceRollbackLineFeedCount) {} }; class BaseLineParse { @@ -234,7 +247,7 @@ class LogFileReader { /// @return e.g. `/home/admin/access.log` const std::string& GetConvertedPath() const; - + const std::string& GetHostLogPathFile() const { return mHostLogPathFile; } int64_t GetFileSize() const { return mLastFileSize; } @@ -686,6 +699,7 @@ class LogFileReader { friend class LogSplitNoDiscardUnmatchUnittest; friend class RemoveLastIncompleteLogMultilineUnittest; friend class LogFileReaderCheckpointUnittest; + friend class GetLastLineUnittest; friend class LastMatchedContainerdTextLineUnittest; friend class LastMatchedDockerJsonFileUnittest; friend class LastMatchedContainerdTextWithDockerJsonUnittest; diff --git a/core/unittest/reader/GetLastLineDataUnittest.cpp b/core/unittest/reader/GetLastLineDataUnittest.cpp index 761c225a57..5b05dd9173 100644 --- a/core/unittest/reader/GetLastLineDataUnittest.cpp +++ b/core/unittest/reader/GetLastLineDataUnittest.cpp @@ -13,8 +13,8 @@ // limitations under the License. #include "common/FileSystemUtil.h" -#include "file_server/reader/LogFileReader.h" #include "common/memory/SourceBuffer.h" +#include "file_server/reader/LogFileReader.h" #include "unittest/Unittest.h" namespace logtail { @@ -104,7 +104,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineSingleLine APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(false, line.fullLine); } // case: PartLogFlag存在,第三个空格不存在 @@ -122,7 +123,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineSingleLine APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(false, line.fullLine); } // case: PartLogFlag不存在,第二个空格存在 @@ -197,7 +199,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineSingleLine APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(false, line.fullLine); } // case: PartLogFlag存在,第三个空格不存在 @@ -215,7 +218,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineSingleLine APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(false, line.fullLine); } // case: PartLogFlag不存在,第二个空格存在 @@ -289,7 +293,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineSingleLine APSARA_TEST_EQUAL("789", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(3, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(2, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(true, line.fullLine); } // case: F + P + P + '\n' @@ -308,7 +313,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineSingleLine APSARA_TEST_EQUAL("789", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(3, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(2, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(true, line.fullLine); } @@ -483,7 +489,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineSingleLine APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(2, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(2, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(false, line.fullLine); } // case: P + P + '\n' @@ -502,7 +509,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineSingleLine APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(2, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(2, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(false, line.fullLine); } } @@ -516,6 +524,45 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineMerge() { BaseLineParse* baseLineParsePtr = nullptr; baseLineParsePtr = logFileReader.GetParser(LogFileReader::BUFFER_SIZE); logFileReader.mLineParsers.emplace_back(baseLineParsePtr); + { + { + std::string testLog = "\n2024-01-05T23:28:06.818486411+08:00 stdout P 123123\n"; + + int32_t size = testLog.size(); + int32_t endPs; // the position of \n or \0 + if (testLog[size - 1] == '\n') { + endPs = size - 1; + } else { + endPs = size; + } + LineInfo line = logFileReader.GetLastLine(testLog, endPs); + + APSARA_TEST_EQUAL("", line.data.to_string()); + APSARA_TEST_EQUAL(0, line.lineBegin); + APSARA_TEST_EQUAL(0, line.lineEnd); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); + APSARA_TEST_EQUAL(true, line.fullLine); + } + { + std::string testLog = "\n2024-01-05T23:28:06.818486411+08:00 stdout F 123123\n"; + + int32_t size = testLog.size(); + int32_t endPs; // the position of \n or \0 + if (testLog[size - 1] == '\n') { + endPs = size - 1; + } else { + endPs = size; + } + LineInfo line = logFileReader.GetLastLine(testLog, endPs); + + APSARA_TEST_EQUAL("123123", line.data.to_string()); + APSARA_TEST_EQUAL(1, line.lineBegin); + APSARA_TEST_EQUAL(endPs, line.lineEnd); + APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(true, line.fullLine); + } + } // 异常情况+有回车 { // case: PartLogFlag存在,第三个空格存在但空格后无内容 @@ -533,7 +580,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineMerge() { APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(false, line.fullLine); } // case: PartLogFlag存在,第三个空格不存在 @@ -551,7 +599,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineMerge() { APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(false, line.fullLine); } // case: PartLogFlag不存在,第二个空格存在 @@ -626,7 +675,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineMerge() { APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(false, line.fullLine); } // case: PartLogFlag存在,第三个空格不存在 @@ -644,7 +694,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineMerge() { APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(false, line.fullLine); } // case: PartLogFlag不存在,第二个空格存在 @@ -718,7 +769,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineMerge() { APSARA_TEST_EQUAL("789", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(3, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(2, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(true, line.fullLine); } // case: F + P + P + '\n' @@ -737,7 +789,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineMerge() { APSARA_TEST_EQUAL("789", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(3, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(2, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(true, line.fullLine); } @@ -912,7 +965,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineMerge() { APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(2, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(2, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(false, line.fullLine); } // case: P + P + '\n' @@ -931,7 +985,8 @@ void LastMatchedContainerdTextLineUnittest::TestLastContainerdTextLineMerge() { APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); - APSARA_TEST_EQUAL(2, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(2, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(false, line.fullLine); } } @@ -1031,7 +1086,8 @@ void LastMatchedDockerJsonFileUnittest::TestLastDockerJsonFile() { endPs = size; } LineInfo line = logFileReader.GetLastLine(testLog, endPs); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); APSARA_TEST_EQUAL(false, line.fullLine); @@ -1048,7 +1104,8 @@ void LastMatchedDockerJsonFileUnittest::TestLastDockerJsonFile() { endPs = size; } LineInfo line = logFileReader.GetLastLine(testLog, endPs); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); APSARA_TEST_EQUAL(false, line.fullLine); @@ -1065,7 +1122,8 @@ void LastMatchedDockerJsonFileUnittest::TestLastDockerJsonFile() { endPs = size; } LineInfo line = logFileReader.GetLastLine(testLog, endPs); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); APSARA_TEST_EQUAL(false, line.fullLine); @@ -1082,7 +1140,8 @@ void LastMatchedDockerJsonFileUnittest::TestLastDockerJsonFile() { endPs = size; } LineInfo line = logFileReader.GetLastLine(testLog, endPs); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); APSARA_TEST_EQUAL(false, line.fullLine); @@ -1124,7 +1183,8 @@ void LastMatchedDockerJsonFileUnittest::TestLastDockerJsonFile() { endPs = size; } LineInfo line = logFileReader.GetLastLine(testLog, endPs); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); APSARA_TEST_EQUAL(false, line.fullLine); @@ -1143,7 +1203,8 @@ void LastMatchedDockerJsonFileUnittest::TestLastDockerJsonFile() { endPs = size; } LineInfo line = logFileReader.GetLastLine(testLog, endPs); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); APSARA_TEST_EQUAL(false, line.fullLine); @@ -1162,7 +1223,8 @@ void LastMatchedDockerJsonFileUnittest::TestLastDockerJsonFile() { endPs = size; } LineInfo line = logFileReader.GetLastLine(testLog, endPs); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); APSARA_TEST_EQUAL(false, line.fullLine); @@ -1181,7 +1243,8 @@ void LastMatchedDockerJsonFileUnittest::TestLastDockerJsonFile() { endPs = size; } LineInfo line = logFileReader.GetLastLine(testLog, endPs); - APSARA_TEST_EQUAL(1, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL("", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); APSARA_TEST_EQUAL(false, line.fullLine); @@ -1294,7 +1357,8 @@ void LastMatchedContainerdTextWithDockerJsonUnittest::TestContainerdTextWithDock endPs = size; } LineInfo line = logFileReader.GetLastLine(testLog, endPs); - APSARA_TEST_EQUAL(4, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(2, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(R"(Exception in thread "main" java.lang.NullPoinntterException)", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); APSARA_TEST_EQUAL(true, line.fullLine); @@ -1344,7 +1408,8 @@ void LastMatchedContainerdTextWithDockerJsonUnittest::TestDockerJsonWithContaine endPs = size; } LineInfo line = logFileReader.GetLastLine(testLog, endPs); - APSARA_TEST_EQUAL(3, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(2, line.rollbackLineFeedCount); + APSARA_TEST_EQUAL(1, line.forceRollbackLineFeedCount); APSARA_TEST_EQUAL(R"(Exception in thread "main" java.lang.NullPoinntterException)", line.data.to_string()); APSARA_TEST_EQUAL(0, line.lineBegin); APSARA_TEST_EQUAL(true, line.fullLine); diff --git a/core/unittest/reader/RemoveLastIncompleteLogUnittest.cpp b/core/unittest/reader/RemoveLastIncompleteLogUnittest.cpp index 5ce57053b3..8e514a08d2 100644 --- a/core/unittest/reader/RemoveLastIncompleteLogUnittest.cpp +++ b/core/unittest/reader/RemoveLastIncompleteLogUnittest.cpp @@ -15,6 +15,8 @@ #include "common/FileSystemUtil.h" #include "common/memory/SourceBuffer.h" #include "file_server/reader/LogFileReader.h" +#include "rapidjson/stringbuffer.h" +#include "rapidjson/writer.h" #include "unittest/Unittest.h" namespace logtail { @@ -186,8 +188,8 @@ void RemoveLastIncompleteLogUnittest::TestMultiline() { } { // case empty string std::string expectMatch = ""; - std::string testLog2 = expectMatch + ""; int32_t rollbackLineFeedCount = 0; + std::string testLog2 = expectMatch + ""; int32_t matchSize = logFileReader.RemoveLastIncompleteLog( const_cast(testLog2.data()), testLog2.size(), rollbackLineFeedCount); APSARA_TEST_EQUAL_FATAL(int32_t(expectMatch.size()), matchSize); @@ -417,37 +419,865 @@ void RemoveLastIncompleteLogMultilineUnittest::TestRemoveLastIncompleteLogWithEn "dir", "file", DevInode(), std::make_pair(&readerOpts, &ctx), std::make_pair(&multilineOpts, &ctx)); // logFileReader.mDiscardUnmatch = true; { // case: end with end + { + std::string expectMatch = LOG_UNMATCH + "\n" + LOG_UNMATCH + "\n" + LOG_END_STRING + '\n'; + std::string testLog = std::string(expectMatch.data()); + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + APSARA_TEST_EQUAL_FATAL(static_cast(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL_FATAL(std::string(testLog.data(), matchSize), expectMatch); + APSARA_TEST_EQUAL_FATAL(0, rollbackLineFeedCount); + } + { + std::string expectMatch = LOG_UNMATCH + "\n" + LOG_UNMATCH + "\n" + LOG_END_STRING; + std::string testLog = std::string(expectMatch.data()); + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + APSARA_TEST_EQUAL(0, matchSize); + APSARA_TEST_EQUAL(std::string(testLog.data(), matchSize), ""); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } + } + { // case: end with unmatch std::string expectMatch = LOG_UNMATCH + "\n" + LOG_UNMATCH + "\n" + LOG_END_STRING + '\n'; - std::string testLog = std::string(expectMatch.data()); + std::string testLog = expectMatch + LOG_UNMATCH + "\n"; int32_t rollbackLineFeedCount = 0; int32_t matchSize = logFileReader.RemoveLastIncompleteLog( const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); APSARA_TEST_EQUAL_FATAL(static_cast(expectMatch.size()), matchSize); APSARA_TEST_EQUAL_FATAL(std::string(testLog.data(), matchSize), expectMatch); - APSARA_TEST_EQUAL_FATAL(0, rollbackLineFeedCount); + APSARA_TEST_EQUAL_FATAL(1, rollbackLineFeedCount); + } + { // case: all unmatch + { + std::string expectMatch = "\n\n"; + std::string testLog = expectMatch + LOG_UNMATCH; + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + APSARA_TEST_EQUAL_FATAL(static_cast(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL_FATAL(std::string(testLog.data(), matchSize), expectMatch); + APSARA_TEST_EQUAL_FATAL(1, rollbackLineFeedCount); + } + { + std::string expectMatch = "\n\n" + LOG_UNMATCH + "\n"; + std::string testLog = expectMatch; + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + APSARA_TEST_EQUAL(static_cast(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(std::string(testLog.data(), matchSize), expectMatch); + APSARA_TEST_EQUAL(0, rollbackLineFeedCount); + } + } +} + +class GetLastLineUnittest : public ::testing::Test { +public: + void TestGetLastLine(); + void TestGetLastLineEmpty(); + +private: + FileReaderOptions readerOpts; + PipelineContext ctx; +}; + +UNIT_TEST_CASE(GetLastLineUnittest, TestGetLastLine); +UNIT_TEST_CASE(GetLastLineUnittest, TestGetLastLineEmpty); + +void GetLastLineUnittest::TestGetLastLine() { + std::string testLog = "first line\nsecond line\nthird line"; + LogFileReader logFileReader( + "dir", "file", DevInode(), std::make_pair(&readerOpts, &ctx), std::make_pair(nullptr, &ctx)); + auto lastLine = logFileReader.GetLastLine(const_cast(testLog.data()), testLog.size()); + std::string expectLog = "third line"; + APSARA_TEST_EQUAL_FATAL(expectLog, std::string(lastLine.data.data(), lastLine.data.size())); +} + +void GetLastLineUnittest::TestGetLastLineEmpty() { + std::string testLog = ""; + LogFileReader logFileReader( + "dir", "file", DevInode(), std::make_pair(&readerOpts, &ctx), std::make_pair(nullptr, &ctx)); + auto lastLine = logFileReader.GetLastLine(const_cast(testLog.data()), testLog.size()); + APSARA_TEST_EQUAL_FATAL(0, int(lastLine.data.size())); + APSARA_TEST_EQUAL_FATAL("", std::string(lastLine.data.data(), lastLine.data.size())); + APSARA_TEST_EQUAL_FATAL(testLog.data(), lastLine.data); +} + +class ContainerdTextRemoveLastIncompleteLogMultilineUnittest : public ::testing::Test { +public: + void TestRemoveLastIncompleteLogWithBeginEnd(); + void TestRemoveLastIncompleteLogWithBegin(); + void TestRemoveLastIncompleteLogWithEnd(); + void SetUp() override { readerOpts.mInputType = FileReaderOptions::InputType::InputContainerStdio; } + +private: + FileReaderOptions readerOpts; + PipelineContext ctx; + const std::string LOG_PART = "2021-08-25T07:00:00.000000000Z stdout P "; + const std::string LOG_FULL = "2021-08-25T07:00:00.000000000Z stdout F "; + const std::string LOG_FULL_NOT_FOUND = "2021-08-25T07:00:00.000000000Z stdout "; + const std::string LOG_ERROR = "2021-08-25T07:00:00.000000000Z stdout"; + + const std::string LOG_BEGIN_STRING = "Exception in thread \"main\" java.lang.NullPointerException"; + const std::string LOG_BEGIN_REGEX = R"(Exception.*)"; + + const std::string LOG_END_STRING = " ...23 more"; + const std::string LOG_END_REGEX = R"(\s*\.\.\.\d+ more.*)"; + + const std::string LOG_UNMATCH = "unmatch log"; +}; + +UNIT_TEST_CASE(ContainerdTextRemoveLastIncompleteLogMultilineUnittest, TestRemoveLastIncompleteLogWithBeginEnd); +UNIT_TEST_CASE(ContainerdTextRemoveLastIncompleteLogMultilineUnittest, TestRemoveLastIncompleteLogWithBegin); +UNIT_TEST_CASE(ContainerdTextRemoveLastIncompleteLogMultilineUnittest, TestRemoveLastIncompleteLogWithEnd); + +void ContainerdTextRemoveLastIncompleteLogMultilineUnittest::TestRemoveLastIncompleteLogWithBeginEnd() { + Json::Value config; + config["StartPattern"] = LOG_BEGIN_REGEX; + config["EndPattern"] = LOG_END_REGEX; + MultilineOptions multilineOpts; + multilineOpts.Init(config, ctx, ""); + LogFileReader logFileReader( + "dir", "file", DevInode(), std::make_pair(&readerOpts, &ctx), std::make_pair(&multilineOpts, &ctx)); + BaseLineParse* baseLineParsePtr = nullptr; + baseLineParsePtr = logFileReader.GetParser(LogFileReader::BUFFER_SIZE); + logFileReader.mLineParsers.emplace_back(baseLineParsePtr); + { // case: end with begin end + std::string expectMatch + = LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_END_STRING + '\n'; + std::string testLog = std::string(expectMatch.data()); + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(0, rollbackLineFeedCount); + } + { // case: end with begin + std::string expectMatch + = LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_END_STRING + '\n'; + std::string testLog = expectMatch + LOG_FULL + LOG_BEGIN_STRING + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); } { // case: end with unmatch - std::string expectMatch = LOG_UNMATCH + "\n" + LOG_UNMATCH + "\n" + LOG_END_STRING + '\n'; - std::string testLog = expectMatch + LOG_UNMATCH + "\n"; + std::string expectMatch + = LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_END_STRING + "\n"; + std::string testLog = expectMatch + LOG_FULL + LOG_UNMATCH + "\n"; + int32_t rollbackLineFeedCount = 0; int32_t matchSize = logFileReader.RemoveLastIncompleteLog( const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); - APSARA_TEST_EQUAL_FATAL(static_cast(expectMatch.size()), matchSize); - APSARA_TEST_EQUAL_FATAL(std::string(testLog.data(), matchSize), expectMatch); - APSARA_TEST_EQUAL_FATAL(1, rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); } { // case: all unmatch std::string expectMatch = "\n\n"; - std::string testLog = expectMatch + LOG_UNMATCH; + std::string testLog = expectMatch + LOG_FULL + LOG_UNMATCH; + int32_t rollbackLineFeedCount = 0; int32_t matchSize = logFileReader.RemoveLastIncompleteLog( const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); - APSARA_TEST_EQUAL_FATAL(static_cast(expectMatch.size()), matchSize); - APSARA_TEST_EQUAL_FATAL(std::string(testLog.data(), matchSize), expectMatch); - APSARA_TEST_EQUAL_FATAL(1, rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); + } +} + +void ContainerdTextRemoveLastIncompleteLogMultilineUnittest::TestRemoveLastIncompleteLogWithBegin() { + Json::Value config; + config["StartPattern"] = LOG_BEGIN_REGEX; + MultilineOptions multilineOpts; + multilineOpts.Init(config, ctx, ""); + LogFileReader logFileReader( + "dir", "file", DevInode(), std::make_pair(&readerOpts, &ctx), std::make_pair(&multilineOpts, &ctx)); + BaseLineParse* baseLineParsePtr = nullptr; + baseLineParsePtr = logFileReader.GetParser(LogFileReader::BUFFER_SIZE); + logFileReader.mLineParsers.emplace_back(baseLineParsePtr); + { // case: end with begin + { + std::string expectMatch + = LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + '\n'; + std::string testLog = expectMatch + LOG_PART + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(2, rollbackLineFeedCount); + } + { + std::string expectMatch + = LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + '\n'; + std::string testLog = expectMatch + LOG_PART + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(2, rollbackLineFeedCount); + } + { + std::string expectMatch + = LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + '\n'; + std::string testLog = expectMatch + LOG_FULL + LOG_BEGIN_STRING + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); + } + { + std::string expectMatch + = LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + '\n'; + std::string testLog = expectMatch + LOG_FULL + LOG_BEGIN_STRING; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); + } + } + { // case: end with unmatch + { + std::string expectMatch + = LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + "\n"; + std::string testLog = expectMatch + LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(2, rollbackLineFeedCount); + } + { + std::string expectMatch + = LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + "\n"; + std::string testLog = expectMatch + LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(2, rollbackLineFeedCount); + } + } + { // case: all unmatch + { + std::string expectMatch = "\n\n" + LOG_FULL + LOG_UNMATCH + "\n"; + std::string testLog = expectMatch; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(0, rollbackLineFeedCount); + } + { + std::string expectMatch = "\n\n"; + std::string testLog = expectMatch + LOG_FULL + LOG_UNMATCH; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); + } + { + std::string expectMatch = "\n\n" + LOG_FULL + LOG_UNMATCH + "\n"; + std::string testLog = expectMatch + LOG_PART + LOG_BEGIN_STRING + "\n" + LOG_PART + LOG_BEGIN_STRING + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(2, rollbackLineFeedCount); + } + { + std::string expectMatch = "\n\n" + LOG_FULL + LOG_UNMATCH + "\n"; + std::string testLog = expectMatch + LOG_PART + LOG_BEGIN_STRING + "\n" + LOG_PART + LOG_BEGIN_STRING; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(2, rollbackLineFeedCount); + } + } + { // case: end with part log + { + std::string expectMatch + = LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + '\n'; + std::string testLog = expectMatch + LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_PART + LOG_BEGIN_STRING + "\n" + + LOG_PART + LOG_BEGIN_STRING + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } + { + std::string expectMatch + = LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + '\n'; + std::string testLog = expectMatch + LOG_FULL + LOG_BEGIN_STRING + "\n" + LOG_PART + LOG_BEGIN_STRING + "\n" + + LOG_PART + LOG_BEGIN_STRING; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } } } +void ContainerdTextRemoveLastIncompleteLogMultilineUnittest::TestRemoveLastIncompleteLogWithEnd() { + Json::Value config; + config["EndPattern"] = LOG_END_REGEX; + MultilineOptions multilineOpts; + multilineOpts.Init(config, ctx, ""); + LogFileReader logFileReader( + "dir", "file", DevInode(), std::make_pair(&readerOpts, &ctx), std::make_pair(&multilineOpts, &ctx)); + BaseLineParse* baseLineParsePtr = nullptr; + baseLineParsePtr = logFileReader.GetParser(LogFileReader::BUFFER_SIZE); + logFileReader.mLineParsers.emplace_back(baseLineParsePtr); + { // case: end with end + { + std::string expectMatch = LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_PART + + LOG_END_STRING + '\n' + LOG_FULL + LOG_UNMATCH; + std::string testLog = expectMatch; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(0, matchSize); + APSARA_TEST_EQUAL("", matchLog); + APSARA_TEST_EQUAL(4, rollbackLineFeedCount); + } + { + std::string expectMatch = LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_PART + + LOG_END_STRING + '\n' + LOG_FULL + LOG_UNMATCH + '\n'; + std::string testLog = expectMatch; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(0, rollbackLineFeedCount); + } + { + std::string expectMatch + = LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_END_STRING + '\n'; + std::string testLog = expectMatch; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(0, rollbackLineFeedCount); + } + { + std::string expectMatch + = LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_END_STRING; + std::string testLog = expectMatch; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(0, matchSize); + APSARA_TEST_EQUAL("", matchLog); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } + } + { // case: end with unmatch + std::string expectMatch + = LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_END_STRING + '\n'; + std::string testLog = expectMatch + LOG_FULL + LOG_UNMATCH + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); + } + { // case: all unmatch + { + std::string expectMatch = "\n\n"; + std::string testLog = expectMatch + LOG_FULL + LOG_UNMATCH; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); + } + { + std::string expectMatch = "\n\n" + LOG_FULL + LOG_UNMATCH + "\n"; + std::string testLog = expectMatch; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(0, rollbackLineFeedCount); + } + } + { // case: end with part log + { + std::string expectMatch + = LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_END_STRING + '\n'; + std::string testLog + = expectMatch + LOG_PART + LOG_UNMATCH + "\n" + LOG_PART + LOG_UNMATCH + "\n" + LOG_PART + LOG_UNMATCH; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } + { + std::string expectMatch + = LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_END_STRING + '\n'; + std::string testLog = expectMatch + LOG_PART + LOG_UNMATCH + "\n" + LOG_PART + LOG_UNMATCH + "\n" + LOG_PART + + LOG_UNMATCH + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } + { + std::string expectMatch + = LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_UNMATCH + "\n" + LOG_FULL + LOG_END_STRING + '\n'; + std::string testLog = expectMatch + LOG_PART + LOG_UNMATCH + "\n" + LOG_PART + LOG_UNMATCH + "\n" + + "2021-08-25T07:00:00.000000000Z"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } + } +} + +class DockerJsonRemoveLastIncompleteLogMultilineUnittest : public ::testing::Test { +public: + void TestRemoveLastIncompleteLogWithBeginEnd(); + void TestRemoveLastIncompleteLogWithBegin(); + void TestRemoveLastIncompleteLogWithEnd(); + void SetUp() override { readerOpts.mInputType = FileReaderOptions::InputType::InputContainerStdio; } + +private: + FileReaderOptions readerOpts; + PipelineContext ctx; + + const std::string LOG_BEGIN_STRING = "Exception in thread \"main\" java.lang.NullPointerException"; + const std::string LOG_BEGIN_REGEX = R"(Exception.*)"; + + const std::string LOG_END_STRING = " ...23 more"; + const std::string LOG_END_REGEX = R"(\s*\.\.\.\d+ more.*)"; + + const std::string LOG_UNMATCH = "unmatch log"; + + std::string BuildLog(const std::string& log, bool isNormalLog = true) { + if (isNormalLog) { + rapidjson::StringBuffer buffer; + rapidjson::Writer writer(buffer); + writer.StartObject(); + writer.Key("log"); + writer.String((log + "\\n").c_str()); + writer.Key("stream"); + writer.String("stdout"); + writer.Key("time"); + writer.String("2024-02-19T03:49:37.793533014Z"); + writer.EndObject(); + return buffer.GetString(); + } else { + return R"({"log":")" + log + R"(\n","stream":"stdout","time":"2024-02-19T03:49:37.79)"; + } + } +}; + +UNIT_TEST_CASE(DockerJsonRemoveLastIncompleteLogMultilineUnittest, TestRemoveLastIncompleteLogWithBegin); +UNIT_TEST_CASE(DockerJsonRemoveLastIncompleteLogMultilineUnittest, TestRemoveLastIncompleteLogWithEnd); + +void DockerJsonRemoveLastIncompleteLogMultilineUnittest::TestRemoveLastIncompleteLogWithBegin() { + Json::Value config; + config["StartPattern"] = LOG_BEGIN_REGEX; + MultilineOptions multilineOpts; + multilineOpts.Init(config, ctx, ""); + LogFileReader logFileReader( + "dir", "file", DevInode(), std::make_pair(&readerOpts, &ctx), std::make_pair(&multilineOpts, &ctx)); + BaseLineParse* baseLineParsePtr = nullptr; + baseLineParsePtr = logFileReader.GetParser(0); + logFileReader.mLineParsers.emplace_back(baseLineParsePtr); + { // case: end with begin + { + std::string expectMatch + = BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + '\n'; + std::string testLog = expectMatch + BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_UNMATCH); + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(2, rollbackLineFeedCount); + } + { + std::string expectMatch + = BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + '\n'; + std::string testLog = expectMatch + BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_UNMATCH) + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(2, rollbackLineFeedCount); + } + { + std::string expectMatch + = BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + '\n'; + std::string testLog = expectMatch + BuildLog(LOG_BEGIN_STRING) + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); + } + { + std::string expectMatch + = BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + '\n'; + std::string testLog = expectMatch + BuildLog(LOG_BEGIN_STRING); + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); + } + } + { // case: end with unmatch + { + std::string expectMatch + = BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + "\n"; + std::string testLog = expectMatch + BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_UNMATCH) + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(2, rollbackLineFeedCount); + } + { + std::string expectMatch + = BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + "\n"; + std::string testLog = expectMatch + BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_UNMATCH); + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(2, rollbackLineFeedCount); + } + } + { // case: all unmatch + { + std::string expectMatch = "\n\n" + BuildLog(LOG_UNMATCH) + "\n"; + std::string testLog = expectMatch; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(0, rollbackLineFeedCount); + } + { + std::string expectMatch = "\n\n"; + std::string testLog = expectMatch + BuildLog(LOG_UNMATCH); + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); + } + } + { // case: end with part log + { + std::string expectMatch + = BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + '\n'; + std::string testLog = expectMatch + BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_BEGIN_STRING, false) + + "\n" + BuildLog(LOG_BEGIN_STRING, false) + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } + { + std::string expectMatch + = BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + '\n'; + std::string testLog = expectMatch + BuildLog(LOG_BEGIN_STRING) + "\n" + BuildLog(LOG_BEGIN_STRING, false) + + "\n" + BuildLog(LOG_BEGIN_STRING, false); + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } + } +} + +void DockerJsonRemoveLastIncompleteLogMultilineUnittest::TestRemoveLastIncompleteLogWithEnd() { + Json::Value config; + config["EndPattern"] = LOG_END_REGEX; + MultilineOptions multilineOpts; + multilineOpts.Init(config, ctx, ""); + LogFileReader logFileReader( + "dir", "file", DevInode(), std::make_pair(&readerOpts, &ctx), std::make_pair(&multilineOpts, &ctx)); + BaseLineParse* baseLineParsePtr = nullptr; + baseLineParsePtr = logFileReader.GetParser(0); + logFileReader.mLineParsers.emplace_back(baseLineParsePtr); + { // case: end with end + { + std::string expectMatch + = BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_END_STRING); + std::string testLog = expectMatch; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(0, matchSize); + APSARA_TEST_EQUAL("", matchLog); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } + { + std::string expectMatch + = BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_END_STRING) + '\n'; + std::string testLog = expectMatch; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(0, rollbackLineFeedCount); + } + } + { // case: end with unmatch + std::string expectMatch + = BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_END_STRING) + '\n'; + std::string testLog = expectMatch + BuildLog(LOG_UNMATCH) + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); + } + { // case: all unmatch + { + std::string expectMatch = "\n\n"; + std::string testLog = expectMatch + BuildLog(LOG_UNMATCH); + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(1, rollbackLineFeedCount); + } + { + std::string expectMatch = "\n\n" + BuildLog(LOG_UNMATCH) + "\n"; + std::string testLog = expectMatch; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(0, rollbackLineFeedCount); + } + } + { // case: end with part log + { + std::string expectMatch + = BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_END_STRING) + '\n'; + std::string testLog = expectMatch + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH, false) + "\n" + + BuildLog(LOG_UNMATCH, false); + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } + { + std::string expectMatch + = BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_END_STRING) + '\n'; + std::string testLog = expectMatch + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH, false) + "\n" + + BuildLog(LOG_UNMATCH, false) + "\n"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } + { + std::string expectMatch + = BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_END_STRING) + '\n'; + std::string testLog = expectMatch + BuildLog(LOG_UNMATCH) + "\n" + BuildLog(LOG_UNMATCH, false) + "\n" + + "2021-08-25T07:00:00.000000000Z"; + + int32_t rollbackLineFeedCount = 0; + int32_t matchSize = logFileReader.RemoveLastIncompleteLog( + const_cast(testLog.data()), testLog.size(), rollbackLineFeedCount); + const auto& matchLog = std::string(testLog.data(), matchSize); + + APSARA_TEST_EQUAL(int32_t(expectMatch.size()), matchSize); + APSARA_TEST_EQUAL(expectMatch, matchLog); + APSARA_TEST_EQUAL(3, rollbackLineFeedCount); + } + } +} } // namespace logtail UNIT_TEST_MAIN