From 41a935704434a8dc41ee6c30afd71ba4b827cc7a Mon Sep 17 00:00:00 2001 From: Takuka0311 <1914426213@qq.com> Date: Tue, 10 Dec 2024 10:14:08 +0800 Subject: [PATCH 1/8] update developer-guide and fix pure-plugin-start (#1950) * update developer-guide and fix pure-plugin-start * fix lint --- docs/cn/README.md | 26 ++++++---- .../code-check/check-dependency-license.md | 10 ++-- docs/cn/developer-guide/codestyle.md | 2 +- docs/cn/developer-guide/data-model.md | 4 +- .../development-environment.md | 49 ++++++++++--------- .../log-protocol/How-to-add-new-protocol.md | 4 +- .../cn/developer-guide/log-protocol/README.md | 4 +- .../developer-guide/log-protocol/converter.md | 10 ++-- .../log-protocol/protocol-spec/sls.md | 2 +- .../how-to-custom-builtin-plugins.md | 4 +- .../how-to-write-external-plugins.md | 6 +-- .../how-to-write-flusher-plugins.md | 2 +- .../plugin-debug/logger-api.md | 12 ++--- .../plugin-debug/plugin-self-monitor-guide.md | 34 ++++++++++--- .../plugin-debug/pure-plugin-start.md | 27 +++++----- .../plugin-development-guide.md | 34 +++++++++---- .../plugin-docs/plugin-doc-templete.md | 2 +- .../test/How-to-add-subscriber.md | 6 +-- docs/cn/developer-guide/test/benchmark.md | 2 +- docs/cn/developer-guide/test/e2e-test-step.md | 19 +++---- docs/cn/developer-guide/test/e2e-test.md | 12 +++-- docs/cn/developer-guide/test/unit-test.md | 10 ++-- .../release-notes/release-notes.md | 1 + pkg/logger/logger.go | 6 ++- scripts/plugin_build.sh | 2 +- 25 files changed, 172 insertions(+), 118 deletions(-) diff --git a/docs/cn/README.md b/docs/cn/README.md index b5915e12ea..224bd2c605 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -1,10 +1,16 @@ # 什么是LoongCollector -![]() +![logo]() LoongCollector 是一款集卓越性能、超强稳定性和灵活可编程性于一身的数据采集器,专为构建下一代可观测 Pipeline 设计。源自阿里云可观测性团队所开源的 iLogtail 项目,在继承了 iLogtail 强大的日志采集与处理能力的基础上,进行了全面的功能升级与扩展。从原来单一日志场景,逐步扩展为可观测数据采集、本地计算、服务发现的统一体。 -[![GitHub stars](https://camo.githubusercontent.com/674a26318ece2d770231086a733bebdbb174c15721f03714f5b79930574a800a/68747470733a2f2f696d672e736869656c64732e696f2f6769746875622f73746172732f616c69626162612f696c6f677461696c)](https://github.com/alibaba/loongcollector/stargazers) [![GitHub issues](https://camo.githubusercontent.com/4266ec67b48f666bc0d440f9d1399e4b56ffc4eca3af3764e062731be83b2873/68747470733a2f2f696d672e736869656c64732e696f2f6769746875622f6973737565732f616c69626162612f696c6f677461696c)](https://github.com/alibaba/loongcollector/issues) [![GitHub license](https://camo.githubusercontent.com/608afe55a7ca2ed062304f89208d3b929fddcbde8923cd09ef40edb2d2c3bf76/68747470733a2f2f696d672e736869656c64732e696f2f6769746875622f6c6963656e73652f616c69626162612f696c6f677461696c)](https://github.com/alibaba/loongcollector/blob/main/LICENSE) +[![GitHub contributors](https://img.shields.io/github/contributors/alibaba/ilogtail)](https://github.com/alibaba/loongcollector/contributors) +[![GitHub stars](https://img.shields.io/github/stars/alibaba/ilogtail)](https://github.com/alibaba/loongcollector/stargazers) +[![GitHub issues](https://img.shields.io/github/issues/alibaba/ilogtail)](https://github.com/alibaba/loongcollector/issues) +[![GitHub license](https://img.shields.io/github/license/alibaba/ilogtail)](https://github.com/alibaba/loongcollector/blob/main/LICENSE) +[![Coverity Scan Build Status](https://img.shields.io/coverity/scan/28764.svg)](https://scan.coverity.com/projects/alibaba-ilogtail) +[![Coverage Status](https://codecov.io/gh/alibaba/ilogtail/branch/main/graph/badge.svg)](https://codecov.io/gh/alibaba/ilogtail) +[![Go Report Card](https://goreportcard.com/badge/github.com/alibaba/loongcollector)](https://goreportcard.com/report/github.com/alibaba/loongcollector) ## 品牌寓意 @@ -20,11 +26,11 @@ LoongCollector 社区将紧密围绕既定的愿景蓝图,专注于核心价 LoongCollector 始终将追求极致的采集性能和超强可靠性放在首位,坚信这是实践长期主义理念的根基。我们深知,LoongCollector 核心价值在于为大规模分布式系统提供稳固、高效的可观测性数据统一采集 Agent 与端到端 Pipeline。不管在过去、现在、未来,LoongCollector 都将持续通过技术革新与优化,实现资源利用效率的提升与在极端场景下的稳定运行。 -![]() +![uncompromised_performance_and_reliability]() ### 遥测数据,无限边界 Unlimited Telemetry Data -![]() +![unlimited_telemetry_data]() LoongCollector 坚信 All-in-One 的设计理念,致力于所有的采集工作用一个 Agent 实现 Logs、Metric、Traces、Events、Profiles 的采集、处理、路由、发送等功能。展望未来,LoongCollector 将着重强化其 Prometheus 抓取能力,深度融入 eBPF(Extended Berkeley Packet Filter)技术以实现无侵入式采集,提供原生的指标采集功能,做到真正的 OneAgent。 @@ -39,7 +45,7 @@ LoongCollector 通过 SPL 与多语言 Plugin 双引擎加持,构建完善的 * 不同引擎都可以相互打通,通过灵活的组合实现预期的计算能力。 * 设计通用的 Event 数据模型,可扩展表达 Logs、Metric、Traces、Events、Profiles 等在内的多种可观测类型,为通用计算提供便捷。 -![]() +![unrestricted_programmable_pipeline]() 开发者可以根据自身需求灵活选择可编程引擎。如果看重执行效率,可以选择原生插件;如果看重算子全面性,需要处理复杂数据,可以选择 SPL 引擎;如果强调低门槛的自身定制化,可以选择扩展插件,采用 Golang 进行编程。 @@ -61,15 +67,15 @@ LoongCollector 通过 SPL 与多语言 Plugin 双引擎加持,构建完善的 同时,对于存储适配层进行了抽象,便于开发者对接符合自己环境需求的持久化存储。 -![]() +![config_server]() LoongCollector 极大地完善了自身可观测性的建设。不管是 LoongCollector 自身运行状态,还是采集 Pipeline 节点都有完整指标。开发者只需要将这些指标对接到可观测系统,即可体验对 LoongCollector 运行状态的清晰洞察。 -![]() +![self_monitor]() ## 核心场景:不仅仅是 Agent -![]() +![not_only_agent]() 作为一款高性能的可观测数据采集与处理 Pipeline,LoongCollector 的部署模式在很大程度上能够被灵活定制以满足各种不同的业务需求和技术架构。 @@ -103,7 +109,7 @@ cd output 未来,LoongCollector 社区将持续围绕长期主义进行建设,打造核心竞争力。同时,也期待更多小伙伴的加入。 -![]() +![roadmap]() * 通过框架能力增强,构建高性能、高可靠的基础底座。 * 通用发送重构框架 @@ -157,4 +163,4 @@ cd output * 知乎:[iLogtail社区](https://www.zhihu.com/column/c_1533139823409270785) * 扫描二维码加入微信/钉钉交流群 - +![chatgroup](https://ilogtail-community-edition.oss-cn-shanghai.aliyuncs.com/images/chatgroup/chatgroup.png) diff --git a/docs/cn/developer-guide/code-check/check-dependency-license.md b/docs/cn/developer-guide/code-check/check-dependency-license.md index 199cb150ce..8f4fc48474 100644 --- a/docs/cn/developer-guide/code-check/check-dependency-license.md +++ b/docs/cn/developer-guide/code-check/check-dependency-license.md @@ -1,6 +1,6 @@ # 检查依赖包许可证 -iLogtail 基于Apache 2.0 协议进行开源,开发者需要保证依赖包协议与Apache 2.0 协议兼容,所有依赖包或源码引入License 说明位于根目录 `licenses` 文件夹。 +LoongCollector 基于Apache 2.0 协议进行开源,开发者需要保证依赖包协议与Apache 2.0 协议兼容,所有依赖包或源码引入License 说明位于根目录 `licenses` 文件夹。 ## 检查依赖包License @@ -16,12 +16,12 @@ make check-dependency-licenses ## Fork 代码库管理 -出于某些特性不支持原因,或精简依赖包原因,iLogtail 会存在某些Fork代码库,所有Fork代码库存在于[iLogtail](https://github.com/iLogtail)组织进行管理,出于License风险问题,禁止引入私人Fork版本。 +出于某些特性不支持原因,或精简依赖包原因,LoongCollector 会存在某些Fork代码库,所有Fork代码库存在于[iLogtail](https://github.com/iLogtail)组织进行管理,出于License风险问题,禁止引入私人Fork版本。 ### go.mod 管理 1. Fork 仓库: 对于Fork代码库,出于尊重原作者,禁止修改go.mod 仓库module地址,如[样例](https://github.com/iLogtail/go-mysql/blob/master/go.mod)所示。 -2. iLogtail仓库: iLogtail 仓库对于Fork代码库要求使用replace 方式引入,用以保持代码文件声明的引入包地址保持原作者仓库地址。 +2. LoongCollector仓库: LoongCollector 仓库对于Fork代码库要求使用replace 方式引入,用以保持代码文件声明的引入包地址保持原作者仓库地址。 ```go require ( @@ -38,11 +38,11 @@ replace ( 请执行`make check-dependency-licenses` 指令,脚本程序将自动在find_licenses文件夹生成markdown 说明,请将说明放置于[LICENSE_OF_ILOGTAIL_DEPENDENCIES.md](../../../../licenses/LICENSE_OF_ILOGTAIL_DEPENDENCIES.md)文件末端,如下样例。 ```go -## iLogtail used or modified source code from these projects +## LoongCollector used or modified source code from these projects - [github.com/iLogtail/VictoriaMetrics fork from github.com/VictoriaMetrics/VictoriaMetrics](http://github.com/iLogtail/VictoriaMetrics) based on Apache-2.0 - [github.com/iLogtail/metrics fork from github.com/VictoriaMetrics/metrics](http://github.com/iLogtail/metrics) based on MIT ``` ### 建议 -如Fork 特性为原代码库的能力补充,非特定场景如精简依赖包等因素,建议对原始代码库提出PullRequest, 如原始仓库接受此次PullRequest,请将iLogtail 仓库依赖包地址修改为原始仓库地址,并删除Fork仓库。 +如Fork 特性为原代码库的能力补充,非特定场景如精简依赖包等因素,建议对原始代码库提出PullRequest, 如原始仓库接受此次PullRequest,请将 LoongCollector 仓库依赖包地址修改为原始仓库地址,并删除Fork仓库。 diff --git a/docs/cn/developer-guide/codestyle.md b/docs/cn/developer-guide/codestyle.md index 5895364e84..4cf7526938 100644 --- a/docs/cn/developer-guide/codestyle.md +++ b/docs/cn/developer-guide/codestyle.md @@ -1,6 +1,6 @@ # 代码风格 -iLogtail C++遵循基于[Google代码规范](https://google.github.io/styleguide/cppguide.html)的风格,详细格式约束见[.clang-format](https://github.com/alibaba/loongcollector/blob/main/.clang-format)。 +LoongCollector C++遵循基于[Google代码规范](https://google.github.io/styleguide/cppguide.html)的风格,详细格式约束见[.clang-format](https://github.com/alibaba/loongcollector/blob/main/.clang-format)。 Go遵循[Effective Go](https://go.dev/doc/effective_go)风格。 diff --git a/docs/cn/developer-guide/data-model.md b/docs/cn/developer-guide/data-model.md index 1cc9c5b909..3e2185835d 100644 --- a/docs/cn/developer-guide/data-model.md +++ b/docs/cn/developer-guide/data-model.md @@ -1,6 +1,6 @@ # 数据模型 -iLogtail 目前支持 `SLS Log Protocol` 和 `Pipeline Event` 两种数据模型,两种模型的描述和对比如下: +LoongCollector 目前支持 `SLS Log Protocol` 和 `Pipeline Event` 两种数据模型,两种模型的描述和对比如下: | | SLS Log Protocol | Pipeline Event | | ---- | ---- | ---- | | 描述 | SLS 日志的专用处理结构 | 可扩展的可观测性数据模型,支持Metrics、Trace、Logging、Bytes、Profile等 | @@ -72,7 +72,7 @@ type MetricEvent struct { } ``` -主流的metrics数据有单值(eg. Prometheus)和多值(eg. influxdb)两种设计,iLogtail 中也需要支持两种不同的设计,基于此设计了 MetricValue 接口和MetricSingleValue 和 MetricMultiValue 两个不同的实现 +主流的metrics数据有单值(eg. Prometheus)和多值(eg. influxdb)两种设计,LoongCollector 中也需要支持两种不同的设计,基于此设计了 MetricValue 接口和MetricSingleValue 和 MetricMultiValue 两个不同的实现 ```go type MetricValue interface { diff --git a/docs/cn/developer-guide/development-environment.md b/docs/cn/developer-guide/development-environment.md index 0a0b597253..ef24430f99 100644 --- a/docs/cn/developer-guide/development-environment.md +++ b/docs/cn/developer-guide/development-environment.md @@ -1,20 +1,20 @@ # 开发环境 -虽然[源代码编译](../installation/sources/build.md)已经提供了方便的iLogtail编译方法,但却不适合开发场景。因为开发过程中需要不断进行编译调试,重复全量编译的速度太慢,因此需要构建支持增量编译开发环境。 +虽然[源代码编译](../installation/sources/build.md)已经提供了方便的 LoongCollector 编译方法,但却不适合开发场景。因为开发过程中需要不断进行编译调试,重复全量编译的速度太慢,因此需要构建支持增量编译开发环境。 ## 进程结构 -iLogtail为了支持插件系统,引入了 libPluginAdaptor 和 libPluginBase(以下简称 adaptor 和 base)这两个动态库,它们与 iLogtail 之间的关系如下:
-iLogtail 动态依赖于这两个动态库(即 binary 中不依赖),在初始化时,iLogtail 会尝试使用动态库接口(如 dlopen)动态加载它们,获取所需的符号。
-Adaptor 充当一个中间层,iLogtail 和 base 均依赖它,iLogtail 向 adaptor 注册回调,adpator 将这些回调记录下来以接口的形式暴露给 base 使用。
-Base 是插件系统的主体,它包含插件系统所必须的采集、处理、聚合以及输出(向 iLogtail 递交可以视为其中一种)等功能。
-因此,完整的iLogtail包含ilogtail、libPluginAdaptor.so 和 libPluginBase.so 3个二进制文件。 +LoongCollector 为了支持插件系统,引入了 libPluginAdaptor 和 libPluginBase(以下简称 adaptor 和 base)这两个动态库,它们与 LoongCollector 之间的关系如下:
+LoongCollector 动态依赖于这两个动态库(即 binary 中不依赖),在初始化时,LoongCollector 会尝试使用动态库接口(如 dlopen)动态加载它们,获取所需的符号。
+Adaptor 充当一个中间层,LoongCollector 和 base 均依赖它,LoongCollector 向 adaptor 注册回调,adpator 将这些回调记录下来以接口的形式暴露给 base 使用。
+Base 是插件系统的主体,它包含插件系统所必须的采集、处理、聚合以及输出(向 LoongCollector 递交可以视为其中一种)等功能。
+因此,完整的 LoongCollector 包含loongcollector 、libPluginAdaptor.so 和 libPluginBase.so 3个二进制文件。 ![image.png](https://sls-opensource.oss-us-west-1.aliyuncs.com/ilogtail/ilogtail-adapter-cgo.png) ## 目录结构 -iLogtail的大致目录结构如下: +LoongCollector 的大致目录结构如下: ```shell . @@ -28,15 +28,17 @@ iLogtail的大致目录结构如下: └── Makefile # 编译描述文件 ``` -core目录包含了iLogtail C++核心代码,ilogtail.cpp是其主函数入口文件。C++项目使用CMake描述,CMakeLists.txt是总入口,各子目录中还有CMakeLists.txt描述子目录下的编译目标。 +core目录包含了 LoongCollector C++核心代码,ilogtail.cpp是其主函数入口文件。C++项目使用CMake描述,CMakeLists.txt是总入口,各子目录中还有CMakeLists.txt描述子目录下的编译目标。 -顶层目录.本身就是一个Go项目,该项目为iLogtail插件,go.mod为其描述文件。插件代码主体在plugins目录。 +顶层目录.本身就是一个Go项目,该项目为 LoongCollector 插件,go.mod为其描述文件。插件代码主体在plugins目录。 -docker目录和scripts目录分别为辅助编译的镜像描述目录和脚本目录。Makefile为整个iLogtail的编译描述文件,对编译命令进行了封装。 +docker目录和scripts目录分别为辅助编译的镜像描述目录和脚本目录。Makefile为整个 LoongCollector 的编译描述文件,对编译命令进行了封装。 ## 开发镜像 -loongcollector 依赖了诸多第三方库(详见[编译依赖](../installation/sources/dependencies.md)),为了简化编译流程ilogtail提供了预编译依赖的镜像辅助编译。开发镜像的地址在`sls-opensource-registry.cn-shanghai.cr.aliyuncs.com/loongcollector-community-edition/loongcollector-build-linux`,可通过下面命令获取。 +LoongCollector 依赖了诸多第三方库(详见[编译依赖](../installation/sources/dependencies.md) + +LoongCollector 提供了预编译依赖的镜像辅助编译。开发镜像的地址在`sls-opensource-registry.cn-shanghai.cr.aliyuncs.com/loongcollector-community-edition/loongcollector-build-linux`,可通过下面命令获取。 ```shell docker pull sls-opensource-registry.cn-shanghai.cr.aliyuncs.com/loongcollector-community-edition/loongcollector-build-linux @@ -70,7 +72,7 @@ go install ... ## 使用VS Code构建开发环境 -[VS Code](https://code.visualstudio.com/)通过[Remote Development](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.vscode-remote-extensionpack)插件可以实现远程开发、在镜像中开发,甚至远程+镜像中开发,在镜像中开发的功能使得编译环境在不同部署间都能保持统一。由于VS Code免费而功能强大,因此我们选用VS Code来为iLogtail创建一致的、可移植的开发环境。 +[VS Code](https://code.visualstudio.com/)通过[Remote Development](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.vscode-remote-extensionpack)插件可以实现远程开发、在镜像中开发,甚至远程+镜像中开发,在镜像中开发的功能使得编译环境在不同部署间都能保持统一。由于VS Code免费而功能强大,因此我们选用VS Code来为 LoongCollector 创建一致的、可移植的开发环境。 ### 1. 安装插件 @@ -78,7 +80,7 @@ go install ... ### 2. 创建镜像开发环境配置 -在iLogtail代码库的顶层目录创建`.devcontainer`目录,并在里面创建`devcontainer.json`文件,文件的内容如下: +在 LoongCollector 代码库的顶层目录创建`.devcontainer`目录,并在里面创建`devcontainer.json`文件,文件的内容如下: ```json { @@ -94,10 +96,9 @@ go install ... } } } - ``` -其中,image指定了ilogtail的开发镜像地址,customizations.vscode.extensions指定了开发环境的插件。部分插件介绍如下,开发者也可以按照自己的习惯进行修改,[欢迎讨论](https://github.com/alibaba/loongcollector/discussions/299)。 +其中,image指定了 LoongCollector 的开发镜像地址,customizations.vscode.extensions指定了开发环境的插件。部分插件介绍如下,开发者也可以按照自己的习惯进行修改,[欢迎讨论](https://github.com/alibaba/loongcollector/discussions/299)。 | **插件名** | **用途** | | --- | --- | @@ -161,7 +162,7 @@ cmake -DBUILD_LOGTAIL_UT=ON .. 可以将C++核心的构建结果拷贝到`./output`目录组装出完整的构建结果。 ```bash -cp -a ./core/build/ilogtail ./output +cp -a ./core/build/loongcollector ./output cp -a ./core/build/go_pipeline/libPluginAdapter.so ./output ``` @@ -169,7 +170,7 @@ cp -a ./core/build/go_pipeline/libPluginAdapter.so ./output ```text ./output -├── ilogtail (主程序) +├── loongcollector (主程序) ├── libPluginAdapter.so(插件接口) ├── libPluginBase.h └── libPluginBase.so (插件lib) @@ -184,7 +185,7 @@ cp -a ./core/build/go_pipeline/libPluginAdapter.so ./output ### 2. 创建编译容器,并挂载代码目录 ```bash -docker run --name ilogtail-build -d \ +docker run --name loongcollector-build -d \ -v `pwd`:/src -w /src \ sls-opensource-registry.cn-shanghai.cr.aliyuncs.com/loongcollector-community-edition/loongcollector-build-linux:2.0.5 \ bash -c "sleep infinity" @@ -193,7 +194,7 @@ docker run --name ilogtail-build -d \ ### 3. 进入容器 ```bash -docker exec -it ilogtail-build bash +docker exec -it loongcollector-build bash ``` ### 4. 在容器内编译 @@ -206,12 +207,12 @@ docker exec -it ilogtail-build bash ### 1. 修改官方镜像entrypoint -基于官方镜像包进行调试,首先用bash覆盖官方镜像的entrypoint,避免杀死ilogtail后容器直接退出。 +基于官方镜像包进行调试,首先用bash覆盖官方镜像的entrypoint,避免杀死 LoongCollector 后容器直接退出。 - docker:指定CMD ```bash -docker run -it --name docker_ilogtail -v /:/logtail_host:ro -v /var/run:/var/run aliyun/ilogtail: bash +docker run -it --name docker_loongcollector -v /:/logtail_host:ro -v /var/run:/var/run aliyun/loongcollector: bash ``` - k8s:用command覆盖entrypoint @@ -225,17 +226,17 @@ docker run -it --name docker_ilogtail -v /:/logtail_host:ro -v /var/run:/var/run ### 2. 将自己编的二进制文件、so,替换到容器里 -由于ilogtail容器挂载了主机目录,因此将需要替换掉文件放到主机目录上容器内就能访问。 +由于 LoongCollector 容器挂载了主机目录,因此将需要替换掉文件放到主机目录上容器内就能访问。 ```bash # 将开发机上编译的so scp到container所在node上 scp libPluginBase.so @:/home/ ``` -主机的根路径在ilogtail容器中位于/logtail_host,找到对应目录进行copy即可。 +主机的根路径在 LoongCollector 容器中位于/logtail_host,找到对应目录进行copy即可。 ```bash -cp /logtail_host/home//libPluginBase.so /usr/local/ilogtail +cp /logtail_host/home//libPluginBase.so /usr/local/loongcollector ``` ## 常见问题 diff --git a/docs/cn/developer-guide/log-protocol/How-to-add-new-protocol.md b/docs/cn/developer-guide/log-protocol/How-to-add-new-protocol.md index 1bb4acbd4b..7517792d15 100644 --- a/docs/cn/developer-guide/log-protocol/How-to-add-new-protocol.md +++ b/docs/cn/developer-guide/log-protocol/How-to-add-new-protocol.md @@ -1,6 +1,6 @@ # 增加新的日志协议 -如果iLogtail暂时不支持您所需的日志协议,您可以为iLogtail增加该协议并添加相应的协议转换函数,具体步骤如下: +如果 LoongCollector 暂时不支持您所需的日志协议,您可以为 LoongCollector 增加该协议并添加相应的协议转换函数,具体步骤如下: 1. 如果您的协议支持Protobuf或其它可通过模式定义生成相应内存结构的编码方式,您需要首先在`./pkg/protocol`目录下新建一个以协议名为命名的文件夹,然后在该文件夹中增加一个以编码方式为命名的子文件夹,在该文件中存放相应的模式定义文件,然后将由代码生成工具生成的与该模式定义文件相对应的Go代码文件放置在父目录中。目录组织结构如下: @@ -32,7 +32,7 @@ 3. 能够根据`targetFields`找到对应字段的值 4. 对于部分编码格式,能够根据`c.ProtocolKeyRenameMap`重命名协议字段的Key - 为了完成上述第2和第3点,iLogtail提供了下列帮助函数: + 为了完成上述第2和第3点,LoongCollector 提供了下列帮助函数: ```Go func convertLogToMap(log *sls.Log, logTags []*sls.LogTag, src, topic string, tagKeyRenameMap map[string]string) (contents map[string]string, tags map[string]string) diff --git a/docs/cn/developer-guide/log-protocol/README.md b/docs/cn/developer-guide/log-protocol/README.md index cec487ae17..0db26822db 100644 --- a/docs/cn/developer-guide/log-protocol/README.md +++ b/docs/cn/developer-guide/log-protocol/README.md @@ -1,8 +1,8 @@ # 日志协议 -iLogtail的日志数据默认以sls自定义协议的形式与外部进行交互,但也支持日志数据在sls协议和其它标准协议或自定义协议之间的转换。除此之外,对于某种协议,iLogtail还支持对日志数据进行不同方式的编码。 +LoongCollector 的日志数据默认以sls自定义协议的形式与外部进行交互,但也支持日志数据在sls协议和其它标准协议或自定义协议之间的转换。除此之外,对于某种协议,LoongCollector 还支持对日志数据进行不同方式的编码。 -目前,iLogtail日志数据支持的协议及相应的编码方式如下表所示,其中协议类型可分为自定义协议和标准协议: +目前,LoongCollector 日志数据支持的协议及相应的编码方式如下表所示,其中协议类型可分为自定义协议和标准协议: | 协议类型 | 协议名称 | 支持的编码方式 | |-------|--------------------------------------------------------------------------------------------------|---------------| diff --git a/docs/cn/developer-guide/log-protocol/converter.md b/docs/cn/developer-guide/log-protocol/converter.md index 187a331b0f..f791036107 100644 --- a/docs/cn/developer-guide/log-protocol/converter.md +++ b/docs/cn/developer-guide/log-protocol/converter.md @@ -1,6 +1,6 @@ # 协议转换 -在开发Flusher插件时,用户往往需要将日志数据从sls协议转换成其他协议。在扩展Metric数据模型后,v2版本的Flusher插件还需要支持从PipelineGroupEvents数据转换成其他协议的场景。为了加快开发插件流程,iLogtail提供了通用协议转换模块,用户只需要指定目标协议的名称和编码方式即可获得编码后的字节流。 +在开发Flusher插件时,用户往往需要将日志数据从sls协议转换成其他协议。在扩展Metric数据模型后,v2版本的Flusher插件还需要支持从PipelineGroupEvents数据转换成其他协议的场景。为了加快开发插件流程,LoongCollector 提供了通用协议转换模块,用户只需要指定目标协议的名称和编码方式即可获得编码后的字节流。 ## Converter结构 @@ -129,12 +129,12 @@ c, err := protocol.NewConverter("custom_single", "json", map[string]string{"host | 字段名 | 描述 | | ------ | ------ | - | host.ip | iLogtail所属机器或容器的ip地址 | + | host.ip | LoongCollector所属机器或容器的ip地址 | | log.topic | 日志的topic | | log.file.path | 被采集文件的路径 | - | host.name | iLogtail所属机器或容器的主机名 | - | k8s.node.ip | iLogtail容器所处K8s节点的ip | - | k8s.node.name | iLogtail容器所处K8s节点的名称 | + | host.name | LoongCollector所属机器或容器的主机名 | + | k8s.node.ip | LoongCollector容器所处K8s节点的ip | + | k8s.node.name | LoongCollector容器所处K8s节点的名称 | | k8s.namespace.name | 业务容器所属的K8s命名空间 | | k8s.pod.name | 业务容器所属的K8s Pod名称 | | k8s.pod.ip | 业务容器所属的K8s Pod ip | diff --git a/docs/cn/developer-guide/log-protocol/protocol-spec/sls.md b/docs/cn/developer-guide/log-protocol/protocol-spec/sls.md index 608d03754e..f0e8bac1cf 100644 --- a/docs/cn/developer-guide/log-protocol/protocol-spec/sls.md +++ b/docs/cn/developer-guide/log-protocol/protocol-spec/sls.md @@ -39,7 +39,7 @@ LogGroup(日志组)是对多条日志的包装: - Logs:包含所有日志。 - Category:日志服务Logstore,可以类比Kafka 独立集群, 数据存储的独立单元。 - Topic: 日志服务Topic,一个Category(Logstore)可以划分为多个topic,不填写时Topic 为空字符串,可以类比Kafka 独立集群下的Topic概念。 -- Source/MachineUUID:iLogtail 所在节点的信息,前者为 IP,后者为 UUID。 +- Source/MachineUUID:LoongCollector 所在节点的信息,前者为 IP,后者为 UUID。 - LogTags:所有日志共同的 tag,同样由 key/value 列表组成。 ```protobuf diff --git a/docs/cn/developer-guide/plugin-development/extended-plugins/how-to-custom-builtin-plugins.md b/docs/cn/developer-guide/plugin-development/extended-plugins/how-to-custom-builtin-plugins.md index 06641e0ad5..254ad0bedc 100644 --- a/docs/cn/developer-guide/plugin-development/extended-plugins/how-to-custom-builtin-plugins.md +++ b/docs/cn/developer-guide/plugin-development/extended-plugins/how-to-custom-builtin-plugins.md @@ -2,9 +2,9 @@ ## 插件引用机制 -iLogtail 通过 [插件引用配置文件](https://github.com/alibaba/loongcollector/blob/main/plugins.yml) 来定义要包含在构建产物中的插件,该文件中默认包含了iLogtail仓库中的所有插件。 +LoongCollector 通过 [插件引用配置文件](https://github.com/alibaba/loongcollector/blob/main/plugins.yml) 来定义要包含在构建产物中的插件,该文件中默认包含了LoongCollector 仓库中的所有插件。 -同时,iLogtail 也以同样的机制支持引入外部私有插件,关于如何开发外部插件,请参阅[如何构建外部私有插件](how-to-write-external-plugins.md)。iLogtail 默认会检测仓库根目录下的 `external_plugins.yml` 文件来查找外部插件定义。 +同时,LoongCollector 也以同样的机制支持引入外部私有插件,关于如何开发外部插件,请参阅[如何构建外部私有插件](how-to-write-external-plugins.md)。LoongCollector 默认会检测仓库根目录下的 `external_plugins.yml` 文件来查找外部插件定义。 当执行诸如 `make all` 等构建指令时,该配置文件会被解析并生成 go import 文件到 [plugins/all](https://github.com/alibaba/loongcollector/tree/main/plugins/all) 目录下。 diff --git a/docs/cn/developer-guide/plugin-development/extended-plugins/how-to-write-external-plugins.md b/docs/cn/developer-guide/plugin-development/extended-plugins/how-to-write-external-plugins.md index 6a51bd6888..92f7e155a9 100644 --- a/docs/cn/developer-guide/plugin-development/extended-plugins/how-to-write-external-plugins.md +++ b/docs/cn/developer-guide/plugin-development/extended-plugins/how-to-write-external-plugins.md @@ -2,7 +2,7 @@ ## 场景 -某些情况下,您可能想要开发自己的非公开插件,但又希望能够及时更新使用到社区iLogtail不断迭代的更新功能(而不是在社区版本上分叉),iLogtail 的外部插件开发机制可以满足您这样的需求。 +某些情况下,您可能想要开发自己的非公开插件,但又希望能够及时更新使用到社区 LoongCollector 不断迭代的更新功能(而不是在社区版本上分叉),LoongCollector 的外部插件开发机制可以满足您这样的需求。 ## 步骤 @@ -153,9 +153,9 @@ import ( ### 7. 编写插件引用配置文件 -**以下内容在 iLogtail 主仓库执行**。 +**以下内容在 LoongCollector 主仓库执行**。 -在 iLogtail 仓库根目录创建名为 `external_plugins.yml` 的配置文件,写入如下内容: +在 LoongCollector 仓库根目录创建名为 `external_plugins.yml` 的配置文件,写入如下内容: ```yaml plugins: diff --git a/docs/cn/developer-guide/plugin-development/extended-plugins/how-to-write-flusher-plugins.md b/docs/cn/developer-guide/plugin-development/extended-plugins/how-to-write-flusher-plugins.md index d0371eae50..744d0837ed 100644 --- a/docs/cn/developer-guide/plugin-development/extended-plugins/how-to-write-flusher-plugins.md +++ b/docs/cn/developer-guide/plugin-development/extended-plugins/how-to-write-flusher-plugins.md @@ -10,7 +10,7 @@ Flusher 插件与外部系统进行交互,将数据发送到外部,以下将 - Flush 接口是插件系统向 flusher 插件实例提交数据的入口,用于将数据输出到外部系统。为了映射到日志服务的概念中,我们增加了三个 string 参数,它代表这个 flusher 实例所属的 project/logstore/config。详细解释请参与[数据结构](../data-structure.md) 与 [基本结构](../../../principle/plugin-system.md) 。 -- SetUrgent: 标识iLogtail 即将退出,将系统状态传递给具体Flusher 插件,可以供Flusher 插件自动适应系统状态,比如加快输出速率等。(SetUrgent调用发生在其他类型插件的Stop之前,当前尚无有意义的实现) +- SetUrgent: 标识 LoongCollector 即将退出,将系统状态传递给具体Flusher 插件,可以供Flusher 插件自动适应系统状态,比如加快输出速率等。(SetUrgent调用发生在其他类型插件的Stop之前,当前尚无有意义的实现) - Stop:停止Flusher 插件,比如断开与外部系统交互的链接 ```go diff --git a/docs/cn/developer-guide/plugin-development/plugin-debug/logger-api.md b/docs/cn/developer-guide/plugin-development/plugin-debug/logger-api.md index c1b3a023a5..f57634e31d 100644 --- a/docs/cn/developer-guide/plugin-development/plugin-debug/logger-api.md +++ b/docs/cn/developer-guide/plugin-development/plugin-debug/logger-api.md @@ -35,7 +35,7 @@ func (p *plugin) func1() { ## 打印采集配置元信息 -对于iLogtail,具有多租户的特点,可以支持多份采集配置同时工作,iLogtail 支持将采集配置的元信息打印到日志中,便于问题的排查与定位。 +对于 LoongCollector,具有多租户的特点,可以支持多份采集配置同时工作,LoongCollector 支持将采集配置的元信息打印到日志中,便于问题的排查与定位。 ```go import ( @@ -139,20 +139,20 @@ func Test_plugin_func1(t *testing.T) { ## 启动时控制日志行为 -启动iLogtail 时,默认的日志行为是异步文件Info级别输出,如果需要动态调整,可以参考以下内容进行设置: +启动 LoongCollector 时,默认的日志行为是异步文件Info级别输出,如果需要动态调整,可以参考以下内容进行设置: ### 调整日志级别 启动时如果启动程序相对路径下没有 plugin_logger.xml 文件,则可以使用以下命令设置: ```shell -./ilogtail --logger-level=debug +./loongcollector --logger-level=debug ``` 如果存在 plugin_logger.xml 文件,可以修改文件,或使用以下命令强制重新生成日志配置文件: ```shell -./ilogtail --logger-level=info --logger-retain=false +./loongcollector --logger-level=info --logger-retain=false ``` ### 是否开启控制台打印 @@ -160,11 +160,11 @@ func Test_plugin_func1(t *testing.T) { 默认生成环境关闭控制台打印,如果本地调试环境想开启控制台日志,相对路径下没有 plugin_logger.xml 文件,则可以使用以下命令: ```shell -./ilogtail --logger-console=true +./loongcollector --logger-console=true ``` 如果存在 plugin_logger.xml 文件,可以修改文件,或使用以下命令强制重新生成日志配置文件: ```shell -./ilogtail --logger-console=true --logger-retain=false +./loongcollector --logger-console=true --logger-retain=false ``` diff --git a/docs/cn/developer-guide/plugin-development/plugin-debug/plugin-self-monitor-guide.md b/docs/cn/developer-guide/plugin-development/plugin-debug/plugin-self-monitor-guide.md index 562897b4ad..6b3f56c98f 100644 --- a/docs/cn/developer-guide/plugin-development/plugin-debug/plugin-self-monitor-guide.md +++ b/docs/cn/developer-guide/plugin-development/plugin-debug/plugin-self-monitor-guide.md @@ -1,5 +1,6 @@ # 插件自监控接口 -iLogtail提供了指标接口,可以方便地为插件增加一些自监控指标,目前支持Counter,Gauge,String,Latency等类型。 + +LoongCollector 提供了指标接口,可以方便地为插件增加一些自监控指标,目前支持Counter,Gauge,String,Latency等类型。 接口: @@ -10,6 +11,7 @@ iLogtail提供了指标接口,可以方便地为插件增加一些自监控指 用户使用时需要引入pkg/helper包: + ```go import ( "github.com/alibaba/ilogtail/pkg/helper" @@ -17,7 +19,9 @@ import ( ``` ## 创建指标 + 指标必须先定义后使用,在插件的结构体内声明具体指标。 + ```go type ProcessorRateLimit struct { // other fields @@ -27,13 +31,16 @@ type ProcessorRateLimit struct { } ``` -创建指标时,需要将其注册到iLogtail Context 的 MetricRecord 中,以便 iLogtail 能够采集上报数据,在插件的Init方法中,调用context 的 GetMetricRecord()方法来获取MetricRecord,然后调用helper.New**XXX**MetricAndRegister函数去注册一个指标,例如: +创建指标时,需要将其注册到 LoongCollector Context 的 MetricRecord 中,以便 LoongCollector 能够采集上报数据,在插件的Init方法中,调用context 的 GetMetricRecord()方法来获取MetricRecord,然后调用helper.New**XXX**MetricAndRegister函数去注册一个指标,例如: + ```go metricsRecord := p.context.GetMetricRecord() p.limitMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_limited", pluginType)) p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginType)) ``` + 用户在声明一个Metric时可以还额外注入一些插件级别的静态Label,这是一个可选参数,例如flusher_http就把RemoteURL等配置进行上报: + ```go metricsRecord := f.context.GetMetricRecord() metricLabels := f.buildLabels() @@ -41,35 +48,46 @@ f.matchedEvents = helper.NewCounterMetricAndRegister(metricsRecord, "http_flushe ``` ## 指标打点 + 不同类型的指标有不同的打点方法,直接调用对应Metric类型的方法即可。 Counter: + ```go p.processedMetric.Add(1) ``` + Latency: + ```go tracker.ProcessLatency.Observe(float64(time.Since(startProcessTime))) ``` + StringMetric: + ```go sc.lastBinLogMetric.Set(string(r.NextLogName)) ``` ## 指标上报 -iLogtial会自动采集所有注册的指标,默认采集间隔为60s,然后通过default_flusher上报,数据格式为LogGroup,格式如下: + +LoongCollector 会自动采集所有注册的指标,默认采集间隔为60s,然后通过default_flusher上报,数据格式为LogGroup,格式如下: + ```json {"Logs":[{"Time":0,"Contents":[{"Key":"http_flusher_matched_events","Value":"2.0000"},{"Key":"__name__","Value":"http_flusher_matched_events"},{"Key":"RemoteURL","Value":"http://testeof.com/write"},{"Key":"db","Value":"%{metadata.db}"},{"Key":"flusher_http_id","Value":"0"},{"Key":"project","Value":"p"},{"Key":"config_name","Value":"c"},{"Key":"plugins","Value":""},{"Key":"category","Value":"p"},{"Key":"source_ip","Value":"100.80.230.110"}]},{"Time":0,"Contents":[{"Key":"http_flusher_unmatched_events","Value":"0.0000"},{"Key":"__name__","Value":"http_flusher_unmatched_events"},{"Key":"db","Value":"%{metadata.db}"},{"Key":"flusher_http_id","Value":"0"},{"Key":"RemoteURL","Value":"http://testeof.com/write"},{"Key":"project","Value":"p"},{"Key":"config_name","Value":"c"},{"Key":"plugins","Value":""},{"Key":"category","Value":"p"},{"Key":"source_ip","Value":"100.80.230.110"}]},{"Time":0,"Contents":[{"Key":"http_flusher_dropped_events","Value":"0.0000"},{"Key":"__name__","Value":"http_flusher_dropped_events"},{"Key":"RemoteURL","Value":"http://testeof.com/write"},{"Key":"db","Value":"%{metadata.db}"},{"Key":"flusher_http_id","Value":"0"},{"Key":"project","Value":"p"},{"Key":"config_name","Value":"c"},{"Key":"plugins","Value":""},{"Key":"category","Value":"p"},{"Key":"source_ip","Value":"100.80.230.110"}]},{"Time":0,"Contents":[{"Key":"http_flusher_retry_count","Value":"2.0000"},{"Key":"__name__","Value":"http_flusher_retry_count"},{"Key":"RemoteURL","Value":"http://testeof.com/write"},{"Key":"db","Value":"%{metadata.db}"},{"Key":"flusher_http_id","Value":"0"},{"Key":"project","Value":"p"},{"Key":"config_name","Value":"c"},{"Key":"plugins","Value":""},{"Key":"category","Value":"p"},{"Key":"source_ip","Value":"100.80.230.110"}]},{"Time":0,"Contents":[{"Key":"http_flusher_flush_failure_count","Value":"2.0000"},{"Key":"__name__","Value":"http_flusher_flush_failure_count"},{"Key":"db","Value":"%{metadata.db}"},{"Key":"flusher_http_id","Value":"0"},{"Key":"RemoteURL","Value":"http://testeof.com/write"},{"Key":"project","Value":"p"},{"Key":"config_name","Value":"c"},{"Key":"plugins","Value":""},{"Key":"category","Value":"p"},{"Key":"source_ip","Value":"100.80.230.110"}]},{"Time":0,"Contents":[{"Key":"http_flusher_flush_latency_ns","Value":"2504448312.5000"},{"Key":"__name__","Value":"http_flusher_flush_latency_ns"},{"Key":"db","Value":"%{metadata.db}"},{"Key":"flusher_http_id","Value":"0"},{"Key":"RemoteURL","Value":"http://testeof.com/write"},{"Key":"project","Value":"p"},{"Key":"config_name","Value":"c"},{"Key":"plugins","Value":""},{"Key":"category","Value":"p"},{"Key":"source_ip","Value":"100.80.230.110"}]}],"Category":"","Topic":"","Source":"","MachineUUID":""} ``` + 一组LogGroup中会有多条Log,每一条Log都对应一条指标,其中` {"Key":"__name__","Value":"http_flusher_matched_events"} `是一个特殊的Label,代表指标的名字。 - ## 高级功能 + ### 动态Label -和Prometheus SDK类似,iLogtail也允许用户在自监控时上报可变Label,对于这些带可变Label的指标集合,iLogtail称之为MetricVector, + +和Prometheus SDK类似,LoongCollector 也允许用户在自监控时上报可变Label,对于这些带可变Label的指标集合,LoongCollector 称之为MetricVector, MetricVector同样也支持上述的指标类型,因此把上面的Metric看作是MetricVector不带动态Label的特殊实现。 用例: + ```go type FlusherHTTP struct { // other fields @@ -78,8 +96,10 @@ type FlusherHTTP struct { statusCodeStatistics pipeline.MetricVector[pipeline.CounterMetric] // 带有动态Label的指标 } ``` + 声明并注册MetricVector时,可以使用helper.New**XXX**MetricVectorAndRegister方法, 需要将其带有哪些动态Label的Name也进行声明: + ```go f.statusCodeStatistics = helper.NewCounterMetricVectorAndRegister(metricsRecord, "http_flusher_status_code_count", @@ -89,15 +109,17 @@ f.statusCodeStatistics = helper.NewCounterMetricVectorAndRegister(metricsRecord, ``` 打点时通过WithLabels API传入动态Label的值,拿到一个Metric对象,然后进行打点: + ```go f.statusCodeStatistics.WithLabels(pipeline.Label{Key: "status_code", Value: strconv.Itoa(response.StatusCode)}).Add(1) ``` ## 示例 + 可以参考内置的一些插件: 限流插件: http flusher插件: - \ No newline at end of file + diff --git a/docs/cn/developer-guide/plugin-development/plugin-debug/pure-plugin-start.md b/docs/cn/developer-guide/plugin-development/plugin-debug/pure-plugin-start.md index 2765e976b6..32e1bbe621 100644 --- a/docs/cn/developer-guide/plugin-development/plugin-debug/pure-plugin-start.md +++ b/docs/cn/developer-guide/plugin-development/plugin-debug/pure-plugin-start.md @@ -1,28 +1,29 @@ # 纯插件模式启动 -纯插件模式为iLogtail插件开发提供轻量级测试能力,以下我们将介绍如以纯插件模式启动iLogtail。 +纯插件模式为 LoongCollector 插件开发提供轻量级测试能力,以下我们将介绍如以纯插件模式启动 LoongCollector。 ## 本地启动 -在根目录下执行 `make plugin_main` 命令,会得到 `output/ilogtail` 可执行文件,使用以下命令可以快速启动iLogtail 程序,并将日志使用控制台输出。 +在根目录下执行 `make plugin_main` 命令,会得到 `output/loongcollector` 可执行文件,使用以下命令可以快速启动 LoongCollector 程序,并将日志使用控制台输出。 ```shell # 默认插件启动行为是使用metric_mock 插件mock 数据,并将数据进行日志模式打印。 - ./output/ilogtail --logger-console=true --logger-retain=false +cd output +./loongcollector --logger-console=true --logger-retain=false ``` ## 配置 -iLogtail 目前提供以下4种模式进行配置设置: +LoongCollector 目前提供以下4种模式进行配置设置: * 指定配置文件模式启动。 * 文件至文件快速测试 -* iLogtail 暴露Http 端口,可以进行配置变更。 -* iLogtail-C程序通过程序API进行配置变更。 +* LoongCollector 暴露Http 端口,可以进行配置变更。 +* loongcollector-C程序通过程序API进行配置变更。 ### 指定配置文件模式启动 -在使用独立模式编译得到 ilogtail 这个可执行程序后,你可以通过为其指定一个配置文件(不指定的话默认为当前目录下 plugin.json)来启动它。 +在使用独立模式编译得到 LoongCollector 这个可执行程序后,你可以通过为其指定一个配置文件(不指定的话默认为当前目录下 plugin.json)来启动它。 ```json { @@ -105,14 +106,14 @@ iLogtail 目前提供以下4种模式进行配置设置: } ``` -执行 `./output/ilogtail --plugin=plugin.quickstart.json`,在一段时间后,使用 ctrl+c 中断运行。通过查看目录,会发现生成了 quickstart\_1.stdout 和 quickstart\_2.stdout 两个文件,并且它们的内容一致。查看内容可以发现,其中的每条数据都包含 Index 和 Content 两个键,并且由于有两个输入插件,Content 会有所不同。 +执行 `./loongcollector --plugin=plugin.quickstart.json`,在一段时间后,使用 ctrl+c 中断运行。通过查看目录,会发现生成了 quickstart\_1.stdout 和 quickstart\_2.stdout 两个文件,并且它们的内容一致。查看内容可以发现,其中的每条数据都包含 Index 和 Content 两个键,并且由于有两个输入插件,Content 会有所不同。 ### 文件至文件快速测试 可以使用如下指令,从文件输入数据并输出到文件,快速进行配置测试 ```shell -./output/ilogtail --plugin=plugin.json --file-io=true +./output/loongcollector --plugin=plugin.json --file-io=true ``` 在测试前,需要创建上文所说的json格式的配置文件。与上文不同的是,这里的配置文件不需要配置inputs和flushers(如果配置,inputs会失效,flushers会保留)。当file-io开关被打开时,会自动指定为文件输入,并输出到文件。默认的输入文件是input.log,默认的输出文件是output.log,也可以设置input-file和output-file参数来修改输入和输出文件。 @@ -148,7 +149,7 @@ stdin.log内容如下: 执行如下命令: ```shell -./output/ilogtail --plugin=plugin.file2filetest.json --file-io=true --input-file=stdin.log --output-file=stdout.log +./loongcollector --plugin=plugin.file2filetest.json --file-io=true --input-file=stdin.log --output-file=stdout.log ``` 可以发现生成了一个stdout.log文件,内容如下: @@ -159,14 +160,14 @@ stdin.log内容如下: ### HTTP API 配置变更 -当iLogtail 以独立模式运行时,可以使用HTTP API 进行配置文件变更。 +当 LoongCollector 以独立模式运行时,可以使用HTTP API 进行配置文件变更。 -* 端口: iLogtail 独立运行时,默认启动18689 端口进行监听配置输入。 +* 端口: LoongCollector 独立运行时,默认启动18689 端口进行监听配置输入。 * 接口:/loadconfig 接下来我们将使用HTTP 模式重新进行动态加载**指定配置文件模式启动**篇幅中的静态配置案例。 -1. 首先我们启动 iLogtail 程序: `./output/ilogtail` +1. 首先我们启动 LoongCollector 程序: `./loongcollector` 2. 使用以下命令进行配置重新加载。 ```shell diff --git a/docs/cn/developer-guide/plugin-development/plugin-development-guide.md b/docs/cn/developer-guide/plugin-development/plugin-development-guide.md index 98543bc6dd..bbaf97a466 100644 --- a/docs/cn/developer-guide/plugin-development/plugin-development-guide.md +++ b/docs/cn/developer-guide/plugin-development/plugin-development-guide.md @@ -1,12 +1,24 @@ # 开源插件开发引导 -## 了解 ilogtail 插件 +## 了解 LoongCollector 插件 -ilogtail插件的实现原理、整体架构、系统设计等介绍,请参考[插件系统](../../principle/plugin-system.md)。 +LoongCollector 插件的实现原理、整体架构、系统设计等介绍,请参考[插件系统](../../principle/plugin-system.md)。 -## 开发流程 +## 原生插件开发流程(C++语言) -ilogtail 插件的开发主要有以下步骤: +LoongCollector 原生插件的开发主要有以下步骤: + +1. 创建Issue,描述开发插件功能,会有社区同学参与讨论插件开发的可行性,如果社区review 通过,请参考步骤2继续进行。 +2. 开发对应插件,可以参考以下文档: + * [如何开发原生Input插件](native-plugins/how-to-write-native-input-plugins.md) + * [如何开发原生Flusher插件](native-plugins/how-to-write-native-flusher-plugins.md) + * [插件配置项基本原则](extended-plugins/principles-of-plugin-configuration.md) +3. 进行单测或者E2E测试,请参考[如何使用单测](../test/unit-test.md) 与 [如何使用E2E测试](../test/e2e-test.md). +4. 提交Pull Request。 + +## 扩展插件开发流程(go语言) + +LoongCollector 插件的开发主要有以下步骤: 1. 创建Issue,描述开发插件功能,会有社区同学参与讨论插件开发的可行性,如果社区review 通过,请参考步骤2继续进行。 2. 实现相应接口。 @@ -16,16 +28,18 @@ ilogtail 插件的开发主要有以下步骤: 6. 使用 *make lint* 检查代码规范。 7. 提交Pull Request。 -在开发时,[Logger接口](plugin-debug/logger-api.md)或许能对您有所帮助。此外,可以使用[纯插件模式启动](plugin-debug/pure-plugin-start.md) iLogtail,用于对插件进行轻量级测试。 +在开发时,[Logger接口](plugin-debug/logger-api.md)和[自监控指标接口](plugin-debug/plugin-self-monitor-guide.md)或许能对您有所帮助。此外,可以使用[纯插件模式启动](plugin-debug/pure-plugin-start.md) LoongCollector,用于对插件进行轻量级测试。 更详细的开发细节,请参考: -* [如何开发Input插件](extended-plugins/how-to-write-input-plugins.md) -* [如何开发Processor插件](extended-plugins/how-to-write-processor-plugins.md) -* [如何开发Aggregator插件](extended-plugins/how-to-write-aggregator-plugins.md) -* [如何开发Flusher插件](extended-plugins/how-to-write-flusher-plugins.md) -* [如何开发Extension插件](extended-plugins/how-to-write-extension-plugins.md) +* [如何开发扩展Input插件](extended-plugins/how-to-write-input-plugins.md) +* [如何开发扩展Processor插件](extended-plugins/how-to-write-processor-plugins.md) +* [如何开发扩展Aggregator插件](extended-plugins/how-to-write-aggregator-plugins.md) +* [如何开发扩展Flusher插件](extended-plugins/how-to-write-flusher-plugins.md) +* [如何开发扩展Extension插件](extended-plugins/how-to-write-extension-plugins.md) * [插件配置项基本原则](extended-plugins/principles-of-plugin-configuration.md) +* [如何开发外部私有插件](extended-plugins/how-to-write-external-plugins.md) +* [如何自定义构建产物中默认包含的插件](extended-plugins/how-to-custom-builtin-plugins.md) ## 文档撰写流程 diff --git a/docs/cn/developer-guide/plugin-development/plugin-docs/plugin-doc-templete.md b/docs/cn/developer-guide/plugin-development/plugin-docs/plugin-doc-templete.md index 38755ef886..63ecda7ac4 100644 --- a/docs/cn/developer-guide/plugin-development/plugin-docs/plugin-doc-templete.md +++ b/docs/cn/developer-guide/plugin-development/plugin-docs/plugin-doc-templete.md @@ -1,6 +1,6 @@ # 插件文档规范 -这是一份 ilogtail 插件的中文文档的模版及格式说明。 +这是一份 LoongCollector 插件的中文文档的模版及格式说明。 ## 格式说明 diff --git a/docs/cn/developer-guide/test/How-to-add-subscriber.md b/docs/cn/developer-guide/test/How-to-add-subscriber.md index a60310e390..4690ee07e2 100644 --- a/docs/cn/developer-guide/test/How-to-add-subscriber.md +++ b/docs/cn/developer-guide/test/How-to-add-subscriber.md @@ -1,6 +1,6 @@ # 如何编写Subscriber插件 -订阅器(Subscribe)插件是测试引擎中用于接收数据的组件,在接收到数据后,订阅器会进一步将数据发送至验证器(Validator)进行校验。如果您为iLogtail开发了新的输出插件,则您必须为该输出插件编写一个对应的订阅器,用于从您输出插件对应的存储单元中拉取iLogtail写入的数据,并在集成测试中使用该订阅器。 +订阅器(Subscribe)插件是测试引擎中用于接收数据的组件,在接收到数据后,订阅器会进一步将数据发送至验证器(Validator)进行校验。如果您为 LoongCollector 开发了新的输出插件,则您必须为该输出插件编写一个对应的订阅器,用于从您输出插件对应的存储单元中拉取 LoongCollector 写入的数据,并在集成测试中使用该订阅器。 ## Subscriber接口定义 @@ -8,7 +8,7 @@ - `Start()`:启动订阅器,不断地从目标存储单元中拉取所需要的数据,在对数据进行转换后,将数据发送至`SubscribeChan()`返回的通道; - `Stop()`:停止订阅器; - `SubscribeChan()`:返回用于向验证器发送接收到的数据的通道,其中通道的数据类型所对应协议的具体信息可参见[LogGroup](../../docs/cn/developer-guide/data-structure.md); -- `FlusherConfig()`:返回与该订阅器相对应的iLogtail输出插件的默认配置,可直接返回空字符串。 +- `FlusherConfig()`:返回与该订阅器相对应的 LoongCollector 输出插件的默认配置,可直接返回空字符串。 ```go type Subscriber interface { @@ -21,7 +21,7 @@ type Subscriber interface { Stop() // SubscribeChan returns the channel used to transmit received data to validator SubscribeChan() <-chan *protocol.LogGroup - // FlusherConfig returns the default flusher config for Ilogtail container correspoding to this subscriber + // FlusherConfig returns the default flusher config for LoongCollector container correspoding to this subscriber FlusherConfig() string } ``` diff --git a/docs/cn/developer-guide/test/benchmark.md b/docs/cn/developer-guide/test/benchmark.md index 06755d0776..93cb84ff20 100644 --- a/docs/cn/developer-guide/test/benchmark.md +++ b/docs/cn/developer-guide/test/benchmark.md @@ -35,7 +35,7 @@ Feature: performance file to blackhole vector - `@e2e-performance @docker-compose`: 表示测试场景为e2e-performance,测试场景由本地docker-compose运行 - `Given {docker-compose} environment`: 配置启动测试环境,以docker-compose环境启动测试 -- `Given docker-compose boot type {benchmark}`: 配置docker-compose启动模式,以benchmark模式启动docker-compose,`{}`中参数有两种选项,`e2e`/`benchmark`。以`e2e`模式启动会默认启动ilogtail、goc-server容器,用作e2e测试;以`benchmark`模式启动会默认启动cadvisor容器,用于监控容器运行过程中的资源占用;若在配置文件中不配置该参数,则默认以上一个scenario的启动模式启动。 +- `Given docker-compose boot type {benchmark}`: 配置docker-compose启动模式,以benchmark模式启动docker-compose,`{}`中参数有两种选项,`e2e`/`benchmark`。以`e2e`模式启动会默认启动 LoongCollector、goc-server容器,用作e2e测试;以`benchmark`模式启动会默认启动cadvisor容器,用于监控容器运行过程中的资源占用;若在配置文件中不配置该参数,则默认以上一个scenario的启动模式启动。 - `When start docker-compose {directory}`: `{}`中参数为当前scenario的文件夹名,该行动作会读取`directory`文件夹下的docker-compose.yaml文件,通过docker-compose命令启动所有容器 - `When start monitor {vector}`: `{}`中参数为待监控的容器,该参数需要与docker-compose中的service name相同 - `When generate random logs to file`: 向文件中按照速率生成json格式测试数据,其他生成测试数据的方法请参考[e2e-test-step.md](e2e-test-step.md) diff --git a/docs/cn/developer-guide/test/e2e-test-step.md b/docs/cn/developer-guide/test/e2e-test-step.md index f9c2cc49a5..578231c615 100644 --- a/docs/cn/developer-guide/test/e2e-test-step.md +++ b/docs/cn/developer-guide/test/e2e-test-step.md @@ -1,6 +1,6 @@ # E2E测试——如何添加新的测试行为 -iLogtail提供了一个完整的E2E测试引擎,方便您快速开展集成测试,从而进一步保证代码质量。在大部分情况下,您只需要编写一个配置文件来定义测试行为,即可轻松完成测试。 +LoongCollector 提供了一个完整的E2E测试引擎,方便您快速开展集成测试,从而进一步保证代码质量。在大部分情况下,您只需要编写一个配置文件来定义测试行为,即可轻松完成测试。 ## 目前支持的测试行为 @@ -9,14 +9,14 @@ iLogtail提供了一个完整的E2E测试引擎,方便您快速开展集成测 | 行为类型 | 模板 | 参数 | 说明 | | --- | --- | --- | --- | | Given | ^\{(\S+)\} environment$ | 环境类型 | 初始化远程测试环境 | -| Given | ^iLogtail depends on containers \{(.*)\} | 容器 | iLogtail依赖容器,可多次执行,累积添加 | -| Given | ^iLogtail expose port \{(.*)\} to \{(.*)\} | 端口号 | iLogtail暴露端口,可多次执行,累积添加 | +| Given | ^LoongCollector depends on containers \{(.*)\} | 容器 | LoongCollector依赖容器,可多次执行,累积添加 | +| Given | ^LoongCollector expose port \{(.*)\} to \{(.*)\} | 端口号 | LoongCollector暴露端口,可多次执行,累积添加 | | Given | ^\{(.*)\} local config as below | 1. 配置名 2. 配置文件内容 | 添加本地配置 | | Given | ^\{(.*)\} http config as below | 1. 配置名 2. 配置文件内容 | 通过http添加配置 | | Given | ^remove http config \{(.*)\} | 配置名 | 通过http移除配置 | | Given | ^subcribe data from \{(\S+)\} with config | 1. 数据源 2. 配置文件内容 | 订阅数据源 | | When | ^generate \{(\d+)\} regex logs, with interval \{(\d+)\}ms$ | 1. 生成日志数量 2. 生成日志间隔 | 生成正则文本日志(路径为/tmp/ilogtail/regex_single.log) | -| When | ^generate \{(\d+)\} http logs, with interval \{(\d+)\}ms, url: \{(.*)\}, method: \{(.*)\}, body: | 1. 生成日志数量 2. 生成日志间隔 3. url 4. method 5. body | 生成http日志,发送到iLogtail input_http_server | +| When | ^generate \{(\d+)\} http logs, with interval \{(\d+)\}ms, url: \{(.*)\}, method: \{(.*)\}, body: | 1. 生成日志数量 2. 生成日志间隔 3. url 4. method 5. body | 生成http日志,发送到LoongCollector input_http_server | | When | ^add k8s label \{(.*)\} | k8s标签 | 为k8s资源添加标签 | | When | ^remove k8s label \{(.*)\} | k8s标签 | 为k8s资源移除标签 | | When | ^start docker-compose dependencies \{(\S+)\} | 依赖服务 | 启动docker-compose依赖服务 | @@ -39,6 +39,7 @@ iLogtail提供了一个完整的E2E测试引擎,方便您快速开展集成测 ### 1. 编写行为函数 如果您需要添加新的行为函数,可以在`engine`目录下添加一个Go函数。不同目录下的行为函数的职责有所不同: + - `cleanup`:清理测试环境,其中的测试函数会默认在测试结束后执行。无需在配置文件中显式声明使用。 - `control`:管控相关的行为函数,如初始化环境、添加配置等。 - `setup`:初始化测试环境,并提供远程调用的相关功能。 @@ -65,12 +66,12 @@ return context.WithValue(ctx, key, value), nil ```go func scenarioInitializer(ctx *godog.ScenarioContext) { - // Given + // Given - // When + // When - // Then - ctx.Then(`^there is \{(\d+)\} logs$`, verify.LogCount) + // Then + ctx.Then(`^there is \{(\d+)\} logs$`, verify.LogCount) } ``` @@ -92,4 +93,4 @@ ctx.Then(`^there is \{(\d+)\} logs$`, verify.LogCount) Then there is {100} logs ``` -在运行测试时,测试框架会根据配置文件中的行为,调用对应的行为函数,并传递参数。 \ No newline at end of file +在运行测试时,测试框架会根据配置文件中的行为,调用对应的行为函数,并传递参数。 diff --git a/docs/cn/developer-guide/test/e2e-test.md b/docs/cn/developer-guide/test/e2e-test.md index 1b7d1596fa..ae55c336f9 100644 --- a/docs/cn/developer-guide/test/e2e-test.md +++ b/docs/cn/developer-guide/test/e2e-test.md @@ -1,11 +1,13 @@ # E2E测试 -iLogtail提供了一个完整的E2E测试引擎,方便您快速开展集成测试,从而进一步保证代码质量。在大部分情况下,您只需要编写一个配置文件来定义测试行为,即可轻松完成测试。 +LoongCollector 提供了一个完整的E2E测试引擎,方便您快速开展集成测试,从而进一步保证代码质量。在大部分情况下,您只需要编写一个配置文件来定义测试行为,即可轻松完成测试。 ## 工作原理 -E2E测试采用行为驱动开发(Behavior-Driven Development)的设计思路,通过定义一系列测试行为,并通过配置文件的方式来描述测试场景,从而实现对插件的集成测试。测试引擎会根据配置文件中的内容,正则匹配对应的函数,并解析配置文件中的参数,传递给对应的函数。从而完成自动创建测试环境、启动iLogtail、触发日志生成、验证日志内容等一系列操作,最终输出测试报告。 +E2E测试采用行为驱动开发(Behavior-Driven Development)的设计思路,通过定义一系列测试行为,并通过配置文件的方式来描述测试场景,从而实现对插件的集成测试。测试引擎会根据配置文件中的内容,正则匹配对应的函数,并解析配置文件中的参数,传递给对应的函数。从而完成自动创建测试环境、启动 LoongCollector、触发日志生成、验证日志内容等一系列操作,最终输出测试报告。 + 相关参考: + - [https://cucumber.io/docs/bdd/](https://cucumber.io/docs/bdd/) - [https://github.com/cucumber/godog](https://github.com/cucumber/godog) @@ -16,7 +18,7 @@ E2E测试采用行为驱动开发(Behavior-Driven Development)的设计思 在准备开始进行集成测试前,您首先需要准备以下内容: - 测试环境:主机(可通过SSH访问)、K8s集群(可通过kubeconfig访问)、Docker-Compose环境(需在本地安装docker-compose) -- 部署 iLogtail +- 部署 LoongCollector ### 配置文件 @@ -47,7 +49,7 @@ Feature: input static file - "/root/test/**/a*.log" MaxDirSearchDepth: 10 """ - Given iLogtail container mount {./a.log} to {/root/test/1/2/3/axxxx.log} + Given LoongCollector container mount {./a.log} to {/root/test/1/2/3/axxxx.log} When start docker-compose {input_static_file} Then there is at least {1000} logs Then the log fields match kv @@ -84,4 +86,4 @@ TEST_CASE=input_canal go test -v -timeout 30m -run ^TestE2EOnDockerCompose$ gith ### 拓展 -如果目前engine中已有的测试行为无法满足您的需求,您可以参考以下[添加指南](e2e-test-step.md),自行拓展测试行为。 \ No newline at end of file +如果目前engine中已有的测试行为无法满足您的需求,您可以参考以下[添加指南](e2e-test-step.md),自行拓展测试行为。 diff --git a/docs/cn/developer-guide/test/unit-test.md b/docs/cn/developer-guide/test/unit-test.md index ebfef61dc9..ad4f58395a 100644 --- a/docs/cn/developer-guide/test/unit-test.md +++ b/docs/cn/developer-guide/test/unit-test.md @@ -10,9 +10,11 @@ C++部分单测基于 gtest 实现,具体编写方法可以参考已有测试 1. 进入开发容器中,参考[开发环境](../development-environment.md)。 2. CMake 设置参数 BUILD_LOGTAIL_UT=ON,编译单测 -``` -cmake -DBUILD_LOGTAIL_UT=ON <其他编译参数> .. -``` + + ```shell + cmake -DBUILD_LOGTAIL_UT=ON <其他编译参数> .. + ``` + 3. 运行脚本 `./scripts/run_core_ut.sh`,运行单测 4. 生成覆盖率报告 @@ -32,7 +34,7 @@ python3 tools/coverage-diff/main.py coverage-report/index.txt ### 测试工具 -从插件开发以及 [日志打印](How-to-use-logger.md) 篇幅可以看到,ilogtail.Context 接口包含了iLogtail 的元配置信息,因此提供了Mock Context 以及Mock Collector 实现进行单元测试。 +从插件开发以及 [日志打印](How-to-use-logger.md) 篇幅可以看到,ilogtail.Context 接口包含了 LoongCollector 的元配置信息,因此提供了Mock Context 以及Mock Collector 实现进行单元测试。 ```go import ( diff --git a/docs/cn/installation/release-notes/release-notes.md b/docs/cn/installation/release-notes/release-notes.md index cdf1d507ee..110b65ecfd 100644 --- a/docs/cn/installation/release-notes/release-notes.md +++ b/docs/cn/installation/release-notes/release-notes.md @@ -97,4 +97,5 @@ docker pull sls-opensource-registry.cn-shanghai.cr.aliyuncs.com/loongcollector-c ## iLogtail 版本 [iLogtail 发布记录(2.x版本)](release-notes-ilogtail-2x.md) + [iLogtail 发布记录(1.x版本)](release-notes-ilogtail-1x.md) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 6b5e6f6e61..03074abc80 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -123,7 +123,11 @@ func initNormalLogger() { for _, option := range defaultProductionOptions { option() } - setLogConf(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorConfDir, "plugin_logger.xml")) + confDir := config.LoongcollectorGlobalConfig.LoongcollectorConfDir + if _, err := os.Stat(confDir); os.IsNotExist(err) { + _ = os.MkdirAll(confDir, os.ModePerm) + } + setLogConf(path.Join(confDir, "plugin_logger.xml")) } // initTestLogger extracted from Init method for unit test. diff --git a/scripts/plugin_build.sh b/scripts/plugin_build.sh index fa70e344de..74f651d0f3 100755 --- a/scripts/plugin_build.sh +++ b/scripts/plugin_build.sh @@ -30,7 +30,7 @@ OUT_DIR=${3:-output} VERSION=${4:-0.0.1} PLUGINS_CONFIG_FILE=${5:-${PLUGINS_CONFIG_FILE:-plugins.yml,external_plugins.yml}} GO_MOD_FILE=${6:-${GO_MOD_FILE:-go.mod}} -NAME=ilogtail +NAME=loongcollector LDFLAGS="${GO_LDFLAGS:-}"' -X "github.com/alibaba/ilogtail/pkg/config.BaseVersion='$VERSION'"' BUILD_FLAG=${BUILD_FLAG:-} From a88cb5923bb94424cd19576c58d28af0877b4bd3 Mon Sep 17 00:00:00 2001 From: Takuka0311 <1914426213@qq.com> Date: Tue, 10 Dec 2024 16:30:16 +0800 Subject: [PATCH 2/8] update plugins and fix bug (#1953) --- docs/cn/README.md | 2 +- docs/cn/SUMMARY.md | 2 +- .../aggregator-content-value-group.md | 2 +- docs/cn/plugins/overview.md | 208 ++++++++++-------- .../processor/spl/processor-spl-native.md | 66 ++++++ 5 files changed, 185 insertions(+), 95 deletions(-) diff --git a/docs/cn/README.md b/docs/cn/README.md index 224bd2c605..ca14bc25bf 100644 --- a/docs/cn/README.md +++ b/docs/cn/README.md @@ -1,6 +1,6 @@ # 什么是LoongCollector -![logo]() +![logo](https://ilogtail-community-edition.oss-cn-shanghai.aliyuncs.com/images/readme/loongcollector-icon.png) LoongCollector 是一款集卓越性能、超强稳定性和灵活可编程性于一身的数据采集器,专为构建下一代可观测 Pipeline 设计。源自阿里云可观测性团队所开源的 iLogtail 项目,在继承了 iLogtail 强大的日志采集与处理能力的基础上,进行了全面的功能升级与扩展。从原来单一日志场景,逐步扩展为可观测数据采集、本地计算、服务发现的统一体。 diff --git a/docs/cn/SUMMARY.md b/docs/cn/SUMMARY.md index 6222d90442..c56199a93f 100644 --- a/docs/cn/SUMMARY.md +++ b/docs/cn/SUMMARY.md @@ -147,7 +147,7 @@ * [增加新的日志协议](developer-guide/log-protocol/How-to-add-new-protocol.md) * 协议 * [sls协议](developer-guide/log-protocol/protocol-spec/sls.md) - * [单条协议](developer-guide/log-protocol/protocol-spec/single.md) + * [单条协议](developer-guide/log-protocol/protocol-spec/custom_single.md) * [raw协议](developer-guide/log-protocol/protocol-spec/raw.md) * 插件开发 * [开源插件开发引导](developer-guide/plugin-development/plugin-development-guide.md) diff --git a/docs/cn/plugins/aggregator/aggregator-content-value-group.md b/docs/cn/plugins/aggregator/aggregator-content-value-group.md index 72089632ff..dec7cde635 100644 --- a/docs/cn/plugins/aggregator/aggregator-content-value-group.md +++ b/docs/cn/plugins/aggregator/aggregator-content-value-group.md @@ -1,4 +1,4 @@ -# 基础聚合 +# 按Key聚合 ## 简介 diff --git a/docs/cn/plugins/overview.md b/docs/cn/plugins/overview.md index 46449ce436..741c9d26af 100644 --- a/docs/cn/plugins/overview.md +++ b/docs/cn/plugins/overview.md @@ -2,122 +2,146 @@ ## 输入 -| 名称 | 提供方 | 简介 | -|-------------------------------------------------------------------------------|------------------------------------------------------------|-------------------------------------------------------| -| [`input_file`](input/native/input-file.md)
文本日志 | SLS官方 | 文本采集。 | -| [`input_container_stdio`](input/native/input-container-stdio.md)
容器标准输出(原生插件) | SLS官方 | 从容器标准输出/标准错误流中采集日志。 | -| [`input_observer_network`](input/native/metric-observer.md)
eBPF网络调用数据 | SLS官方 | 支持从网络系统调用中收集四层网络调用,并借助网络解析模块,可以观测七层网络调用细节。 | -| [`input_command`](input/extended/input-command.md)
脚本执行数据 | 社区
[`didachuxing`](https://github.com/didachuxing) | 采集脚本执行数据。 | -| [`input_docker_stdout`](input/extended/service-docker-stdout.md)
容器标准输出 | SLS官方 | 从容器标准输出/标准错误流中采集日志。 | -| [`metric_debug_file`](input/extended/metric-debug-file.md)
文本日志(debug) | SLS官方 | 用于调试的读取文件内容的插件。 | -| [`metric_input_example`](input/extended/metric-input-example.md)
MetricInput示例插件 | SLS官方 | MetricInput示例插件。 | -| [`metric_meta_host`](input/extended/metric-meta-host.md)
主机Meta数据 | SLS官方 | 主机Meta数据。 | -| [`metric_mock`](input/extended/metric-mock.md)
Mock数据-Metric | SLS官方 | 生成metric模拟数据的插件。 | -| [`metric_system_v2`](input/extended/metric-system.md)
主机监控数据 | SLS官方 | 主机监控数据。 | -| [`service_canal`](input/extended/service-canal.md)
MySQL Binlog | SLS官方 | 将MySQL Binlog输入到iLogtail。 | -| [`service_go_profile`](input/extended/service-goprofile.md)
GO Profile | SLS官方 | 采集Golang pprof 性能数据。 | -| [`service_gpu_metric`](input/extended/service-gpu.md)
GPU数据 | SLS官方 | 支持收集英伟达GPU指标。 | -| [`service_http_server`](input/extended/service-http-server.md)
HTTP数据 | SLS官方 | 接收来自unix socket、http/https、tcp的请求,并支持sls协议、otlp等多种协议。 | -| [`service_input_example`](input/extended/service-input-example.md)
ServiceInput示例插件 | SLS官方 | ServiceInput示例插件。 | -| [`service_journal`](input/extended/service-journal.md)
Journal数据 | SLS官方 | 从原始的二进制文件中采集Linux系统的Journal(systemd)日志。 | -| [`service_kafka`](input/extended/service-kafka.md)
Kafka | SLS官方 | 将Kafka数据输入到iLogtail。 | -| [`service_mock`](input/extended/service-mock.md)
Mock数据-Service | SLS官方 | 生成service模拟数据的插件。 | -| [`service_mssql`](input/extended/service-mssql.md)
SqlServer查询数据 | SLS官方 | 将Sql Server数据输入到iLogtail。 | -| [`service_otlp`](input/extended/service-otlp.md)
OTLP数据 | 社区
[`Zhu Shunjia`](https://github.com/shunjiazhu) | 通过http/grpc协议,接收OTLP数据。 | -| [`service_pgsql`](input/extended/service-pgsql.md)
PostgreSQL查询数据 | SLS官方 | 将PostgresSQL数据输入到iLogtail。 | -| [`service_syslog`](input/extended/service-syslog.md)
Syslog数据 | SLS官方 | 采集syslog数据。 | -| [`input_file_security`](input/native/input-file-security.md)
文件安全数据 | SLS官方 | 文件安全数据采集。 | -| [`input_network_observer`](input/native/input-network-observer.md)
网络可观测数据 | SLS官方 | 网络可观测数据采集。 | -| [`input_network_security`](input/native/input-network-security.md)
网络安全数据 | SLS官方 | 网络安全数据采集。 | -| [`input_process_security`](input/native/input-process-security.md)
进程安全数据 | SLS官方 | 进程安全数据采集。 | +### 原生插件 + +| 名称 | 提供方 | 简介 | +| --- | --- | --- | +| `input_file`
[文本日志](input/native/input-file.md) | SLS官方 | 文本采集。 | +| `input_container_stdio`
[容器标准输出](input/native/input-container-stdio.md) | SLS官方 | 从容器标准输出/标准错误流中采集日志。 | +| `input_ebpf_file_security`
[eBPF文件安全数据](input/native/input-file-security.md) | SLS官方 | eBPF文件安全数据采集。 | +| `input_ebpf_network_observer`
[eBPF网络可观测数据](input/native/input-network-observer.md) | SLS官方 | eBPF网络可观测数据采集。 | +| `input_ebpf_network_security`
[eBPF网络安全数据](input/native/input-network-security.md) | SLS官方 | eBPF网络安全数据采集。 | +| `input_ebpf_process_security`
[eBPF进程安全数据](input/native/input-process-security.md) | SLS官方 | eBPF进程安全数据采集。 | +| `input_observer_network`
[eBPF网络调用数据](input/native/metric-observer.md) | SLS官方 | 支持从网络系统调用中收集四层网络调用,并借助网络解析模块,可以观测七层网络调用细节。 | + +### 扩展插件 + +| 名称 | 提供方 | 简介 | +| --- | --- | --- | +| `input_command`
[脚本执行数据](input/extended/input-command.md) | 社区
[didachuxing](https://github.com/didachuxing) | 采集脚本执行数据。 | +| `input_docker_stdout`
[容器标准输出](input/extended/service-docker-stdout.md) | SLS官方 | 从容器标准输出/标准错误流中采集日志。 | +| `metric_debug_file`
[文本日志(debug)](input/extended/metric-debug-file.md) | SLS官方 | 用于调试的读取文件内容的插件。 | +| `metric_input_example`
[MetricInput示例插件](input/extended/metric-input-example.md) | SLS官方 | MetricInput示例插件。 | +| `metric_meta_host`
[主机Meta数据](input/extended/metric-meta-host.md) | SLS官方 | 主机Meta数据。 | +| `metric_mock`
[Mock数据-Metric](input/extended/metric-mock.md) | SLS官方 | 生成metric模拟数据的插件。 | +| `metric_system_v2`
[主机监控数据](input/extended/metric-system.md) | SLS官方 | 主机监控数据。 | +| `service_canal`
[MySQL Binlog](input/extended/service-canal.md) | SLS官方 | 将MySQL Binlog输入到iLogtail。 | +| `service_go_profile`
[GO Profile](input/extended/service-goprofile.md) | SLS官方 | 采集Golang pprof 性能数据。 | +| `service_gpu_metric`
[GPU数据](input/extended/service-gpu.md) | SLS官方 | 支持收集英伟达GPU指标。 | +| `service_http_server`
[HTTP数据](input/extended/service-http-server.md) | SLS官方 | 接收来自unix socket、http/https、tcp的请求,并支持sls协议、otlp等多种协议。 | +| `service_input_example`
[ServiceInput示例插件](input/extended/service-input-example.md) | SLS官方 | ServiceInput示例插件。 | +| `service_journal`
[Journal数据](input/extended/service-journal.md) | SLS官方 | 从原始的二进制文件中采集Linux系统的Journal(systemd)日志。 | +| `service_kafka`
[Kafka](input/extended/service-kafka.md) | SLS官方 | 将Kafka数据输入到iLogtail。 | +| `service_mock`
[Mock数据-Service](input/extended/service-mock.md) | SLS官方 | 生成service模拟数据的插件。 | +| `service_mssql`
[SqlServer查询数据](input/extended/service-mssql.md) | SLS官方 | 将Sql Server数据输入到iLogtail。 | +| `service_otlp`
[OTLP数据](input/extended/service-otlp.md) | 社区
[Zhu Shunjia](https://github.com/shunjiazhu) | 通过http/grpc协议,接收OTLP数据。 | +| `service_pgsql`
[PostgreSQL查询数据](input/extended/service-pgsql.md) | SLS官方 | 将PostgresSQL数据输入到iLogtail。 | +| `service_syslog`
[Syslog数据](input/extended/service-syslog.md) | SLS官方 | 采集syslog数据。 | ## 处理 +### SPL 处理 + +| 名称 | 提供方 | 简介 | +| --- | --- | --- | +| `processor_spl`
[SPL 处理](processor/spl/processor-spl-native.md) | SLS官方 | 通过SPL语言解析数据 | + ### 原生插件 | 名称 | 提供方 | 简介 | -| --- | ----- | ---- | -| [`processor_parse_regex_native`](processor/native/processor-parse-regex-native.md)
正则解析原生处理插件 | SLS官方 | 通过正则匹配解析事件指定字段内容并提取新字段。 | -| [`processor_parse_json_native`](processor/native/processor-parse-json-native.md)
Json解析原生处理插件 | SLS官方 | 解析事件中`Json`格式字段内容并提取新字段。 | -| [`processor_parse_delimiter_native`](processor/native/processor-parse-delimiter-native.md)
分隔符解析原生处理插件 | SLS官方 | 解析事件中分隔符格式字段内容并提取新字段。 | -| [`processor_parse_timestamp_native`](processor/native/processor-parse-timestamp-native.md)
时间解析原生处理插件 | SLS官方 | 解析事件中记录时间的字段,并将结果置为事件的__time__字段。 | -| [`processor_filter_regex_native`](processor/native/processor-filter-regex-native.md)
过滤原生处理插件 | SLS官方 | 根据事件字段内容来过滤事件。 | -| [`processor_desensitize_native`](processor/native/processor-desensitize-native.md)
脱敏原生处理插件 | SLS官方 | 对事件指定字段内容进行脱敏。 | +| --- | --- | --- | +| `processor_parse_regex_native`
[正则解析原生处理插件](processor/native/processor-parse-regex-native.md) | SLS官方 | 通过正则匹配解析事件指定字段内容并提取新字段。 | +| `processor_parse_json_native`
[Json解析原生处理插件](processor/native/processor-parse-json-native.md) | SLS官方 | 解析事件中 Json 格式字段内容并提取新字段。 | +| `processor_parse_delimiter_native`
[分隔符解析原生处理插件](processor/native/processor-parse-delimiter-native.md) | SLS官方 | 解析事件中分隔符格式字段内容并提取新字段。 | +| `processor_parse_timestamp_native`
[时间解析原生处理插件](processor/native/processor-parse-timestamp-native.md) | SLS官方 | 解析事件中记录时间的字段,并将结果置为事件的 \_\_time\_\_ 字段。 | +| `processor_filter_regex_native`
[过滤原生处理插件](processor/native/processor-filter-regex-native.md) | SLS官方 | 根据事件字段内容来过滤事件。 | +| `processor_desensitize_native`
[脱敏原生处理插件](processor/native/processor-desensitize-native.md) | SLS官方 | 对事件指定字段内容进行脱敏。 | -### 拓展插件 +### 扩展插件 | 名称 | 提供方 | 简介 | -| --- | ----- | ---- | -| [`processor_add_fields`](processor/extended/extenprocessor-add-fields.md)
添加字段 | SLS官方 | 添加字段。 | -| [`processor_cloud_meta`](processor/extended/processor-cloudmeta.md)
添加云资产信息 | SLS官方 | 为日志增加云平台元数据信息。 | -| [`processor_default`](processor/extended/processor-default.md)
原始数据 | SLS官方 | 不对数据任何操作,只是简单的数据透传。 | -| [`processor_desensitize`](processor/extended/processor-desensitize.md)
数据脱敏 | SLS官方
[`Takuka0311`](https://github.com/Takuka0311) | 对敏感数据进行脱敏处理。 | -| [`processor_drop`](processor/extended/processor-drop.md)
丢弃字段 | SLS官方 | 丢弃字段。 | -| [`processor_encrypt`](processor/extended/processor-encrypy.md)
字段加密 | SLS官方 | 加密字段 | -| [`processor_fields_with_conditions`](processor/extended/processor-fields-with-condition.md)
条件字段处理 | 社区
[`pj1987111`](https://github.com/pj1987111) | 根据日志部分字段的取值,动态进行字段扩展或删除。 | -| [`processor_filter_regex`](processor/extended/processor-filter-regex.md)
日志过滤 | SLS官方 | 通过正则匹配过滤日志。 | -| [`processor_gotime`](processor/extended/processor-gotime.md)
Gotime | SLS官方 | 以 Go 语言时间格式解析原始日志中的时间字段。 | -| [`processor_grok`](processor/extended/processor-grok.md)
Grok | SLS官方
[`Takuka0311`](https://github.com/Takuka0311) | 通过 Grok 语法对数据进行处理 | -| [`processor_json`](processor/extended/processor-json.md)
Json | SLS官方 | 实现对Json格式日志的解析。 | -| [`processor_log_to_sls_metric`](processor/extended/processor-log-to-sls-metric.md)
日志转sls metric | SLS官方 | 将日志转sls metric | -| [`processor_regex`](processor/extended/processor-regex.md)
正则 | SLS官方 | 通过正则匹配的模式实现文本日志的字段提取。 | -| [`processor_rename`](processor/extended/processor-rename.md)
重命名字段 | SLS官方 | 重命名字段。 | -| [`processor_split_char`](processor/extended/processor-delimiter.md)
分隔符 | SLS官方 | 通过单字符的分隔符提取字段。 | -| [`processor_split_string`](processor/extended/processor-delimiter.md)
分隔符 | SLS官方 | 通过多字符的分隔符提取字段。 | -| [`processor_split_key_value`](processor/extended/processor-split-key-value.md)
键值对 | SLS官方 | 通过切分键值对的方式提取字段。 | -| [`processor_split_log_regex`](processor/extended/processor-split-log-regex.md)
多行切分 | SLS官方 | 实现多行日志(例如Java程序日志)的采集。 | -| [`processor_string_replace`](processor/extended/processor-string-replace.md)
字符串替换 | SLS官方
[`pj1987111`](https://github.com/pj1987111) | 通过全文匹配、正则匹配、去转义字符等方式对文本日志进行内容替换。 | +| --- | --- | --- | +| `processor_add_fields`
[添加字段](processor/extended/extenprocessor-add-fields.md) | SLS官方 | 添加字段。 | +| `processor_cloud_meta`
[添加云资产信息](processor/extended/processor-cloudmeta.md) | SLS官方 | 为日志增加云平台元数据信息。 | +| `processor_default`
[原始数据](processor/extended/processor-default.md) | SLS官方 | 不对数据任何操作,只是简单的数据透传。 | +| `processor_desensitize`
[数据脱敏](processor/extended/processor-desensitize.md) | SLS官方
[Takuka0311](https://github.com/Takuka0311) | 对敏感数据进行脱敏处理。 | +| `processor_drop`
[丢弃字段](processor/extended/processor-drop.md) | SLS官方 | 丢弃字段。 | +| `processor_encrypt`
[字段加密](processor/extended/processor-encrypy.md) | SLS官方 | 加密字段 | +| `processor_fields_with_conditions`
[条件字段处理](processor/extended/processor-fields-with-condition.md) | 社区
[pj1987111](https://github.com/pj1987111) | 根据日志部分字段的取值,动态进行字段扩展或删除。 | +| `processor_filter_regex`
[日志过滤](processor/extended/processor-filter-regex.md) | SLS官方 | 通过正则匹配过滤日志。 | +| `processor_gotime`
[Gotime](processor/extended/processor-gotime.md) | SLS官方 | 以 Go 语言时间格式解析原始日志中的时间字段。 | +| `processor_grok`
[Grok](processor/extended/processor-grok.md) | SLS官方
[Takuka0311](https://github.com/Takuka0311) | 通过 Grok 语法对数据进行处理 | +| `processor_json`
[Json](processor/extended/processor-json.md) | SLS官方 | 实现对Json格式日志的解析。 | +| `processor_log_to_sls_metric`
[日志转sls metric](processor/extended/processor-log-to-sls-metric.md) | SLS官方 | 将日志转sls metric | +| `processor_regex`
[正则](processor/extended/processor-regex.md) | SLS官方 | 通过正则匹配的模式实现文本日志的字段提取。 | +| `processor_rename`
[重命名字段](processor/extended/processor-rename.md) | SLS官方 | 重命名字段。 | +| `processor_split_char`
[分隔符](processor/extended/processor-delimiter.md) | SLS官方 | 通过单字符的分隔符提取字段。 | +| `processor_split_string`
[分隔符](processor/extended/processor-delimiter.md) | SLS官方 | 通过多字符的分隔符提取字段。 | +| `processor_split_key_value`
[键值对](processor/extended/processor-split-key-value.md) | SLS官方 | 通过切分键值对的方式提取字段。 | +| `processor_split_log_regex`
[多行切分](processor/extended/processor-split-log-regex.md) | SLS官方 | 实现多行日志(例如Java程序日志)的采集。 | +| `processor_string_replace`
[字符串替换](processor/extended/processor-string-replace.md) | SLS官方
[pj1987111](https://github.com/pj1987111) | 通过全文匹配、正则匹配、去转义字符等方式对文本日志进行内容替换。 | ## 聚合 -| 名称 | 提供方 | 简介 | -|----------------------------------------------------------------------------------|-----------------------------------------------------|-----------------------------------| -| [`aggregator_content_value_group`](aggregator/aggregator-content-value-group.md) | 社区
[`snakorse`](https://github.com/snakorse) | 按照指定的Key对采集到的数据进行分组聚合 | -| [`aggregator_metadata_group`](aggregator/aggregator-metadata-group.md) | 社区
[`urnotsally`](https://github.com/urnotsally) | 按照指定的Metadata Keys对采集到的数据进行重新分组聚合 | +| 名称 | 提供方 | 简介 | +| --- | --- | --- | +| `aggregator_base`
[基础聚合](aggregator/aggregator-base.md) | SLS官方 | 对单条日志进行聚合 | +| `aggregator_context`
[上下文聚合](aggregator/aggregator-context.md) | SLS官方 | 根据日志来源对单条日志进行聚合 | +| `aggregator_content_value_group`
[按Key聚合](aggregator/aggregator-content-value-group.md)| 社区
[snakorse](https://github.com/snakorse) | 按照指定的Key对采集到的数据进行分组聚合 | +| `aggregator_metadata_group`
[GroupMetadata聚合](aggregator/aggregator-metadata-group.md) | 社区
[urnotsally](https://github.com/urnotsally) | 按照指定的Metadata Keys对采集到的数据进行重新分组聚合 | ## 输出 -| 名称 | 提供方 | 简介 | -|------------------------------------------------------------------------------|-----------------------------------------------------|-------------------------------------------| -| [`flusher_kafka`](flusher/extended/flusher-kafka.md)
Kafka | 社区 | 将采集到的数据输出到Kafka。推荐使用下面的flusher_kafka_v2 | -| [`flusher_kafka_v2`](flusher/extended/flusher-kafka_v2.md)
Kafka | 社区
[`shalousun`](https://github.com/shalousun) | 将采集到的数据输出到Kafka。 | -| [`flusher_sls`](flusher/native/flusher-sls.md)
SLS | SLS官方 | 将采集到的数据输出到SLS。 | -| [`flusher_stdout`](flusher/extended/flusher-stdout.md)
标准输出/文件 | SLS官方 | 将采集到的数据输出到标准输出或文件。 | -| [`flusher_otlp_log`](flusher/extended/flusher-otlp.md)
OTLP日志 | 社区
[`liuhaoyang`](https://github.com/liuhaoyang) | 将采集到的数据支持`Opentelemetry log protocol`的后端。 | -| [`flusher_http`](flusher/extended/flusher-http.md)
HTTP | 社区
[`snakorse`](https://github.com/snakorse) | 将采集到的数据以http方式输出到指定的后端。 | -| [`flusher_pulsar`](flusher/extended/flusher-pulsar.md)
Kafka | 社区
[`shalousun`](https://github.com/shalousun) | 将采集到的数据输出到Pulsar。 | -| [`flusher_clickhouse`](flusher/extended/flusher-clickhouse.md)
ClickHouse | 社区
[`kl7sn`](https://github.com/kl7sn) | 将采集到的数据输出到ClickHouse。 | -| [`flusher_elasticsearch`](flusher/extended/flusher-elasticsearch.md)
ElasticSearch | 社区
[`joeCarf`](https://github.com/joeCarf) | 将采集到的数据输出到ElasticSearch。 | -| [`flusher_loki`](flusher/extended/loki.md)
Loki | 社区
[`abingcbc`](https://github.com/abingcbc) | 将采集到的数据输出到Loki。 | +### 原生插件 + +| 名称 | 提供方 | 简介 | +| --- | --- | --- | +| `flusher_sls`
[SLS](flusher/native/flusher-sls.md) | SLS官方 | 将采集到的数据输出到SLS。 | +| `flusher_blackhole`
[原生Flusher测试](flusher/native/flusher-blackhole.md) | SLS官方 | 直接丢弃采集的事件,属于原生输出插件,主要用于测试。 | + +### 扩展插件 + +| 名称 | 提供方 | 简介 | +| --- | --- | --- | +| `flusher_kafka`
[Kafka](flusher/extended/flusher-kafka.md) | 社区 | 将采集到的数据输出到Kafka。推荐使用下面的flusher_kafka_v2 | +| `flusher_kafka_v2`
[Kafka V2](flusher/extended/flusher-kafka_v2.md) | 社区
[shalousun](https://github.com/shalousun) | 将采集到的数据输出到Kafka。 | +| `flusher_stdout`
[标准输出/文件](flusher/extended/flusher-stdout.md) | SLS官方 | 将采集到的数据输出到标准输出或文件。 | +| `flusher_otlp_log`
[OTLP日志](flusher/extended/flusher-otlp.md) | 社区
[liuhaoyang](https://github.com/liuhaoyang) | 将采集到的数据支持`Opentelemetry log protocol`的后端。 | +| `flusher_http`
[HTTP](flusher/extended/flusher-http.md) | 社区
[snakorse](https://github.com/snakorse) | 将采集到的数据以http方式输出到指定的后端。 | +| `flusher_pulsar`
[Pulsar](flusher/extended/flusher-pulsar.md) | 社区
[shalousun](https://github.com/shalousun) | 将采集到的数据输出到Pulsar。 | +| `flusher_clickhouse`
[ClickHouse](flusher/extended/flusher-clickhouse.md) | 社区
[kl7sn](https://github.com/kl7sn) | 将采集到的数据输出到ClickHouse。 | +| `flusher_elasticsearch`
[ElasticSearch](flusher/extended/flusher-elasticsearch.md) | 社区
[joeCarf](https://github.com/joeCarf) | 将采集到的数据输出到ElasticSearch。 | +| `flusher_loki`
[Loki](flusher/extended/loki.md) | 社区
[abingcbc](https://github.com/abingcbc) | 将采集到的数据输出到Loki。 | +| `flusher_prometheus`
[Prometheus](flusher/extended/flusher-prometheus.md) | 社区
| 将采集到的数据,经过处理后,通过http格式发送到指定的 Prometheus RemoteWrite 地址。 | ## 扩展 -### ClientAuthenticator +* ClientAuthenticator -| 名称 | 提供方 | 简介 | -|-----------------------------------------------------------|-------------------------------------------------|--------------------------------| -| [`ext_basicauth`](extension/ext-basicauth.md)
Basic认证 | 社区
[`snakorse`](https://github.com/snakorse) | 为 http_flusher 插件提供 basic 认证能力 | + | 名称 | 提供方 | 简介 | + | --- | --- | --- | + | `ext_basicauth`
[Basic认证](extension/ext-basicauth.md) | 社区
[snakorse](https://github.com/snakorse) | 为 http_flusher 插件提供 basic 认证能力 | -### FlushInterceptor +* FlushInterceptor -| 名称 | 提供方 | 简介 | -|-----------------------------------------------------------------------------|-------------------------------------------------|-------------------------------------------| -| [`ext_groupinfo_filter`](extension/ext-groupinfo-filter.md)
GroupInfo过滤 | 社区
[`snakorse`](https://github.com/snakorse) | 为 http_flusher 插件提供根据GroupInfo筛选最终提交数据的能力 | + | 名称 | 提供方 | 简介 | + | --- | --- | --- | + | `ext_groupinfo_filter`
[GroupInfo过滤](extension/ext-groupinfo-filter.md) | 社区
[snakorse](https://github.com/snakorse) | 为 http_flusher 插件提供根据GroupInfo筛选最终提交数据的能力 | -### RequestInterceptor +* RequestInterceptor -| 名称 | 提供方 | 简介 | -|---------------------------------------------------------------------|-------------------------------------------------|---------------------------| -| [`ext_request_breaker`](extension/ext-request-breaker.md)
请求熔断器 | 社区
[`snakorse`](https://github.com/snakorse) | 为 http_flusher 插件提供请求熔断能力 | + | 名称 | 提供方 | 简介 | + | --- | --- | --- | + | `ext_request_breaker`
[请求熔断器](extension/ext-request-breaker.md) | 社区
[snakorse](https://github.com/snakorse) | 为 http_flusher 插件提供请求熔断能力 | -### Decoder +* Decoder -| 名称 | 提供方 | 简介 | -|----------------------------------------------------------------------------|-------------------------------------------------|-----------------------------| -| [`ext_default_decoder`](extension/ext-default-decoder.md)
默认的decoder扩展 | 社区
[`snakorse`](https://github.com/snakorse) | 将内置支持的Format以Decoder扩展的形式封装 | + | 名称 | 提供方 | 简介 | + | --- | --- | --- | + | `ext_default_decoder`
[默认的decoder扩展](extension/ext-default-decoder.md) | 社区
[snakorse](https://github.com/snakorse) | 将内置支持的Format以Decoder扩展的形式封装 | -### Encoder +* Encoder -| 名称 | 提供方 | 简介 | -|----------------------------------------------------------------------------|--------------------------------------------------------|-----------------------------| -| [`ext_default_encoder`](extension/ext-default-encoder.md)
默认的encoder扩展 | 社区
[`yuanshuai.1900`](https://github.com/aiops1900) | 将内置支持的Format以Encoder扩展的形式封装 | + | 名称 | 提供方 | 简介 | + | --- | --- | --- | + | `ext_default_encoder`
[默认的encoder扩展](extension/ext-default-encoder.md) | 社区
[yuanshuai.1900](https://github.com/aiops1900) | 将内置支持的Format以Encoder扩展的形式封装 | diff --git a/docs/cn/plugins/processor/spl/processor-spl-native.md b/docs/cn/plugins/processor/spl/processor-spl-native.md index e69de29bb2..a0038211a6 100644 --- a/docs/cn/plugins/processor/spl/processor-spl-native.md +++ b/docs/cn/plugins/processor/spl/processor-spl-native.md @@ -0,0 +1,66 @@ +# SPL处理 + +## 简介 + +`processor_spl`插件通过SPL语句处理数据 + +## 版本 + +[Stable](../../stability-level.md) + +## 配置参数 + +| **参数** | **类型** | **是否必填** | **默认值** | **说明** | +| --- | --- | --- | --- | --- | +| Type | string | 是 | / | 插件类型。固定为processor_spl。 | +| Script | string | 是 | / | SPL语句。日志内容默认存在content字段中。 | +| TimeoutMilliSeconds | int | 否 | 1000 | 单次SPL语句执行的超时时间。 | +| MaxMemoryBytes | int | 否 | 50 \* 1024 \* 1024 | SPL引擎可使用的最大内存。 | + +## 样例 + +采集文件`/workspaces/ilogtail/debug/simple.log`,通过正则表达式解析日志内容并提取字段,并将结果输出到stdout。 + ++ 输入 + +```plain +127.0.0.1 - - [07/Jul/2022:10:43:30 +0800] "POST /PutData?Category=YunOsAccountOpLog" 0.024 18204 200 37 "-" "aliyun-sdk-java" +``` + ++ 采集配置 + +```yaml +enable: true +inputs: + - Type: input_file + FilePaths: + - /workspaces/ilogtail/debug/simple.log +processors: + - Type: processor_spl + Script: | + * + | parse-regexp content, '([\d\.]+) \S+ \S+ \[(\S+) \S+\] \"(\w+) ([^\\"]*)\" ([\d\.]+) (\d+) (\d+) (\d+|-) \"([^\\"]*)\" \"([^\\"]*)\"' as ip, time, method, url, request_time, request_length, status, length, ref_url, browser + | project-away content +flushers: + - Type: flusher_stdout + OnlyStdout: true +``` + ++ 输出 + +```json +{ + "ip": "127.0.0.1", + "time": "07/Jul/2022:10:43:30", + "method": "POST", + "url": "/PutData?Category=YunOsAccountOpLog", + "request_time": "0.024", + "request_length": "18204", + "status": "200", + "length": "37", + "ref_url": "-", + "browser": "aliyun-sdk-java" +} +``` + +更多样例可参考:[一文教会你如何使用iLogtail SPL处理日志](https://open.observability.cn/article/gpgqx50m2ry4h2mx/) From c4a0666b6a05544092c077b7679f6c0c39182a56 Mon Sep 17 00:00:00 2001 From: Kai <33246768+KayzzzZ@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:31:52 +0800 Subject: [PATCH 3/8] update ebpf export file to avoid memory corruption (#1951) * update export file Signed-off-by: qianlu.kk * fix: fix memory management issues when passing config to observer library * set cid filter to false --------- Signed-off-by: qianlu.kk Co-authored-by: xunfei Co-authored-by: Tom Yu --- core/ebpf/SelfMonitor.cpp | 2 +- core/ebpf/SelfMonitor.h | 2 +- core/ebpf/SourceManager.cpp | 36 +++++----- core/ebpf/config.cpp | 8 +-- core/ebpf/eBPFServer.cpp | 41 ++++++----- core/ebpf/handler/ObserveHandler.cpp | 44 ++++++------ core/ebpf/handler/ObserveHandler.h | 14 ++-- core/ebpf/handler/SecurityHandler.cpp | 8 +-- core/ebpf/handler/SecurityHandler.h | 2 +- core/ebpf/include/SysAkApi.h | 10 +-- core/ebpf/include/export.h | 83 ++++++++++++++++++++--- core/unittest/ebpf/eBPFServerUnittest.cpp | 8 +-- 12 files changed, 160 insertions(+), 98 deletions(-) diff --git a/core/ebpf/SelfMonitor.cpp b/core/ebpf/SelfMonitor.cpp index d352cfbaeb..f04692262a 100644 --- a/core/ebpf/SelfMonitor.cpp +++ b/core/ebpf/SelfMonitor.cpp @@ -232,7 +232,7 @@ void eBPFSelfMonitorMgr::Suspend(const nami::PluginType type) { mInited[int(type)] = false; } -void eBPFSelfMonitorMgr::HandleStatistic(std::vector&& stats) { +void eBPFSelfMonitorMgr::HandleStatistic(std::vector& stats) { for (auto& stat : stats) { if (!stat.updated_) { continue; diff --git a/core/ebpf/SelfMonitor.h b/core/ebpf/SelfMonitor.h index 7189f3e9c4..9d31f96550 100644 --- a/core/ebpf/SelfMonitor.h +++ b/core/ebpf/SelfMonitor.h @@ -125,7 +125,7 @@ class eBPFSelfMonitorMgr { void Init(const nami::PluginType type, PluginMetricManagerPtr mgr, const std::string& name, const std::string& project); void Release(const nami::PluginType type); void Suspend(const nami::PluginType type); - void HandleStatistic(std::vector&& stats); + void HandleStatistic(std::vector& stats); private: // `mLock` is used to protect mSelfMonitors ReadWriteLock mLock; diff --git a/core/ebpf/SourceManager.cpp b/core/ebpf/SourceManager.cpp index 93aff008bf..3f833af299 100644 --- a/core/ebpf/SourceManager.cpp +++ b/core/ebpf/SourceManager.cpp @@ -188,7 +188,7 @@ bool SourceManager::StartPlugin(nami::PluginType plugin_type, std::unique_ptrtype = UpdataType::SECURE_UPDATE_TYPE_CONFIG_CHAGE; FillCommonConf(conf); #ifdef APSARA_UNIT_TEST_MAIN @@ -212,20 +213,21 @@ bool SourceManager::UpdatePlugin(nami::PluginType plugin_type, std::unique_ptr(i)); } @@ -241,13 +243,13 @@ bool SourceManager::StopAll() { } bool SourceManager::SuspendPlugin(nami::PluginType plugin_type) { - if (!CheckPluginRunning(plugin_type)) { - LOG_WARNING(sLogger, ("plugin not started, cannot suspend. type", int(plugin_type))); - return false; - } - auto config = std::make_unique(); - config->plugin_type_ = plugin_type; - config->type = UpdataType::SECURE_UPDATE_TYPE_SUSPEND_PROBE; + if (!CheckPluginRunning(plugin_type)) { + LOG_WARNING(sLogger, ("plugin not started, cannot suspend. type", int(plugin_type))); + return false; + } + auto config = std::make_unique(); + config->plugin_type_ = plugin_type; + config->type = UpdataType::SECURE_UPDATE_TYPE_SUSPEND_PROBE; #ifdef APSARA_UNIT_TEST_MAIN mConfig = std::move(config); return true; @@ -260,15 +262,15 @@ bool SourceManager::SuspendPlugin(nami::PluginType plugin_type) { } auto suspend_f = (suspend_func)f; - int res = suspend_f(config.release()); + int res = suspend_f(config.get()); return !res; } bool SourceManager::StopPlugin(nami::PluginType plugin_type) { if (!CheckPluginRunning(plugin_type)) { - LOG_WARNING(sLogger, ("plugin not started, do nothing. type", int(plugin_type))); - return true; + LOG_WARNING(sLogger, ("plugin not started, do nothing. type", int(plugin_type))); + return true; } auto config = std::make_unique(); @@ -288,7 +290,7 @@ bool SourceManager::StopPlugin(nami::PluginType plugin_type) { } auto remove_f = (remove_func)f; - int res = remove_f(config.release()); + int res = remove_f(config.get()); if (!res) mRunning[int(plugin_type)] = false; return !res; } diff --git a/core/ebpf/config.cpp b/core/ebpf/config.cpp index 026d37c70f..559b026bfd 100644 --- a/core/ebpf/config.cpp +++ b/core/ebpf/config.cpp @@ -368,10 +368,10 @@ bool SecurityOptions::Init(SecurityProbeType probeType, } nami::SecurityOption thisSecurityOption; GetSecurityProbeDefaultCallName(probeType, thisSecurityOption.call_names_); - mOptionList.emplace_back(thisSecurityOption); + mOptionList.emplace_back(std::move(thisSecurityOption)); return true; } - auto innerConfig = config["ProbeConfig"]; + const auto& innerConfig = config["ProbeConfig"]; nami::SecurityOption thisSecurityOption; // Genral Filter (Optional) std::variant thisFilter; @@ -402,8 +402,8 @@ bool SecurityOptions::Init(SecurityProbeType probeType, mContext->GetRegion()); } thisSecurityOption.filter_ = thisFilter; - GetSecurityProbeDefaultCallName(probeType, thisSecurityOption.call_names_); - mOptionList.emplace_back(thisSecurityOption); + GetSecurityProbeDefaultCallName(probeType, thisSecurityOption.call_names_); + mOptionList.emplace_back(std::move(thisSecurityOption)); mProbeType = probeType; return true; } diff --git a/core/ebpf/eBPFServer.cpp b/core/ebpf/eBPFServer.cpp index c87c14f63b..9c08757fe8 100644 --- a/core/ebpf/eBPFServer.cpp +++ b/core/ebpf/eBPFServer.cpp @@ -189,7 +189,7 @@ void eBPFServer::Stop() { for (int i = 0; i < int(nami::PluginType::MAX); i ++) { UpdatePipelineName(static_cast(i), "", ""); } - + // UpdateContext must after than StopPlugin if (mEventCB) mEventCB->UpdateContext(nullptr, -1, -1); if (mMeterCB) mMeterCB->UpdateContext(nullptr, -1, -1); @@ -199,11 +199,12 @@ void eBPFServer::Stop() { if (mFileSecureCB) mFileSecureCB->UpdateContext(nullptr, -1, -1); } -bool eBPFServer::StartPluginInternal(const std::string& pipeline_name, uint32_t plugin_index, - nami::PluginType type, - const logtail::PipelineContext* ctx, - const std::variant options, PluginMetricManagerPtr mgr) { - +bool eBPFServer::StartPluginInternal(const std::string& pipeline_name, + uint32_t plugin_index, + nami::PluginType type, + const logtail::PipelineContext* ctx, + const std::variant options, + PluginMetricManagerPtr mgr) { std::string prev_pipeline_name = CheckLoadedPipelineName(type); if (prev_pipeline_name.size() && prev_pipeline_name != pipeline_name) { LOG_WARNING(sLogger, ("pipeline already loaded, plugin type", int(type)) @@ -217,62 +218,59 @@ bool eBPFServer::StartPluginInternal(const std::string& pipeline_name, uint32_t mMonitorMgr->Init(type, mgr, pipeline_name, ctx->GetProjectName()); // step1: convert options to export type - std::variant config; bool ret = false; auto eBPFConfig = std::make_unique(); eBPFConfig->plugin_type_ = type; - eBPFConfig->stats_handler_ = [this](auto stats){ return mMonitorMgr->HandleStatistic(std::move(stats)); }; + eBPFConfig->stats_handler_ = [this](auto& stats){ return mMonitorMgr->HandleStatistic(stats); }; // call update function // step2: call init function switch(type) { case nami::PluginType::PROCESS_SECURITY: { nami::ProcessConfig pconfig; - pconfig.process_security_cb_ = [this](auto events) { return mProcessSecureCB->handle(std::move(events)); }; + pconfig.process_security_cb_ = [this](std::vector>& events) { return mProcessSecureCB->handle(events); }; SecurityOptions* opts = std::get(options); pconfig.options_ = opts->mOptionList; - config = std::move(pconfig); // UpdateContext must ahead of StartPlugin mProcessSecureCB->UpdateContext(ctx, ctx->GetProcessQueueKey(), plugin_index); - eBPFConfig->config_ = config; + eBPFConfig->config_ = std::move(pconfig); ret = mSourceManager->StartPlugin(type, std::move(eBPFConfig)); break; } case nami::PluginType::NETWORK_OBSERVE:{ nami::NetworkObserveConfig nconfig; + nconfig.enable_cid_filter = false; nami::ObserverNetworkOption* opts = std::get(options); if (opts->mEnableMetric) { nconfig.enable_metric_ = true; - nconfig.measure_cb_ = [this](auto events, auto ts) { return mMeterCB->handle(std::move(events), ts); }; + nconfig.measure_cb_ = [this](std::vector>& events, auto ts) { return mMeterCB->handle(events, ts); }; nconfig.enable_metric_ = true; mMeterCB->UpdateContext(ctx, ctx->GetProcessQueueKey(), plugin_index); } if (opts->mEnableSpan) { nconfig.enable_span_ = true; - nconfig.span_cb_ = [this](auto events) { return mSpanCB->handle(std::move(events)); }; + nconfig.span_cb_ = [this](std::vector>& events) { return mSpanCB->handle(events); }; nconfig.enable_span_ = true; mSpanCB->UpdateContext(ctx, ctx->GetProcessQueueKey(), plugin_index); } if (opts->mEnableLog) { nconfig.enable_event_ = true; - nconfig.event_cb_ = [this](auto events) { return mEventCB->handle(std::move(events)); }; + nconfig.event_cb_ = [this](std::vector>& events) { return mEventCB->handle(events); }; nconfig.enable_event_ = true; mEventCB->UpdateContext(ctx, ctx->GetProcessQueueKey(), plugin_index); } - config = std::move(nconfig); - eBPFConfig->config_ = config; + eBPFConfig->config_ = std::move(nconfig); ret = mSourceManager->StartPlugin(type, std::move(eBPFConfig)); break; } case nami::PluginType::NETWORK_SECURITY:{ nami::NetworkSecurityConfig nconfig; - nconfig.network_security_cb_ = [this](auto events) { return mNetworkSecureCB->handle(std::move(events)); }; + nconfig.network_security_cb_ = [this](std::vector>& events) { return mNetworkSecureCB->handle(events); }; SecurityOptions* opts = std::get(options); nconfig.options_ = opts->mOptionList; - config = std::move(nconfig); - eBPFConfig->config_ = config; + eBPFConfig->config_ = std::move(nconfig); // UpdateContext must ahead of StartPlugin mNetworkSecureCB->UpdateContext(ctx, ctx->GetProcessQueueKey(), plugin_index); ret = mSourceManager->StartPlugin(type, std::move(eBPFConfig)); @@ -281,11 +279,10 @@ bool eBPFServer::StartPluginInternal(const std::string& pipeline_name, uint32_t case nami::PluginType::FILE_SECURITY:{ nami::FileSecurityConfig fconfig; - fconfig.file_security_cb_ = [this](auto events) { return mFileSecureCB->handle(std::move(events)); }; + fconfig.file_security_cb_ = [this](std::vector>& events) { return mFileSecureCB->handle(events); }; SecurityOptions* opts = std::get(options); fconfig.options_ = opts->mOptionList; - config = std::move(fconfig); - eBPFConfig->config_ = config; + eBPFConfig->config_ = std::move(fconfig); // UpdateContext must ahead of StartPlugin mFileSecureCB->UpdateContext(ctx, ctx->GetProcessQueueKey(), plugin_index); ret = mSourceManager->StartPlugin(type, std::move(eBPFConfig)); diff --git a/core/ebpf/handler/ObserveHandler.cpp b/core/ebpf/handler/ObserveHandler.cpp index 2165cff094..e3e47bc01c 100644 --- a/core/ebpf/handler/ObserveHandler.cpp +++ b/core/ebpf/handler/ObserveHandler.cpp @@ -33,7 +33,7 @@ namespace ebpf { #define ADD_STATUS_METRICS(METRIC_NAME, FIELD_NAME, VALUE) \ {if (!inner->FIELD_NAME) return; \ auto event = group.AddMetricEvent(); \ - for (auto& tag : measure->tags_) { \ + for (const auto& tag : measure->tags_) { \ event->SetTag(tag.first, tag.second); \ } \ event->SetTag(std::string("status_code"), std::string(VALUE)); \ @@ -47,7 +47,7 @@ void FUNC_NAME(PipelineEventGroup& group, std::unique_ptr& measure, uin auto inner = static_cast(measure->inner_measure_.get()); \ if (!inner->FIELD_NAME) return; \ auto event = group.AddMetricEvent(); \ - for (auto& tag : measure->tags_) { \ + for (const auto& tag : measure->tags_) { \ event->SetTag(tag.first, tag.second); \ } \ event->SetName(METRIC_NAME); \ @@ -55,17 +55,17 @@ void FUNC_NAME(PipelineEventGroup& group, std::unique_ptr& measure, uin event->SetValue(UntypedSingleValue{(double)inner->FIELD_NAME}); \ } -void OtelMeterHandler::handle(std::vector>&& measures, uint64_t timestamp) { +void OtelMeterHandler::handle(std::vector>& measures, uint64_t timestamp) { if (measures.empty()) return; - for (auto& appBatchMeasures : measures) { + for (const auto& appBatchMeasures : measures) { PipelineEventGroup eventGroup(std::make_shared()); - for (auto& measure : appBatchMeasures->measures_) { + for (const auto& measure : appBatchMeasures->measures_) { auto type = measure->type_; if (type == MeasureType::MEASURE_TYPE_APP) { auto inner = static_cast(measure->inner_measure_.get()); auto event = eventGroup.AddMetricEvent(); - for (auto& tag : measure->tags_) { + for (const auto& tag : measure->tags_) { event->SetTag(tag.first, tag.second); } event->SetName("service_requests_total"); @@ -86,15 +86,15 @@ void OtelMeterHandler::handle(std::vector>&& spans) { +void OtelSpanHandler::handle(std::vector>& spans) { if (spans.empty()) return; - for (auto& span : spans) { + for (const auto& span : spans) { std::shared_ptr sourceBuffer = std::make_shared(); PipelineEventGroup eventGroup(sourceBuffer); - for (auto& x : span->single_spans_) { + for (const auto& x : span->single_spans_) { auto spanEvent = eventGroup.AddSpanEvent(); - for (auto& tag : x->tags_) { + for (const auto& tag : x->tags_) { spanEvent->SetTag(tag.first, tag.second); } spanEvent->SetName(x->span_name_); @@ -118,24 +118,24 @@ void OtelSpanHandler::handle(std::vector>& return; } -void EventHandler::handle(std::vector>&& events) { +void EventHandler::handle(std::vector>& events) { if (events.empty()) return; - for (auto& appEvents : events) { + for (const auto& appEvents : events) { if (!appEvents || appEvents->events_.empty()) continue; std::shared_ptr sourceBuffer = std::make_shared(); PipelineEventGroup eventGroup(sourceBuffer); - for (auto& event : appEvents->events_) { + for (const auto& event : appEvents->events_) { if (!event || event->GetAllTags().empty()) continue; auto logEvent = eventGroup.AddLogEvent(); - for (auto& tag : event->GetAllTags()) { + for (const auto& tag : event->GetAllTags()) { logEvent->SetContent(tag.first, tag.second); auto seconds = std::chrono::duration_cast(std::chrono::nanoseconds(event->GetTimestamp())); logEvent->SetTimestamp(seconds.count(), event->GetTimestamp() - seconds.count() * 1e9); } mProcessTotalCnt ++; } - for (auto& tag : appEvents->tags_) { + for (const auto& tag : appEvents->tags_) { eventGroup.SetTag(tag.first, tag.second); } #ifdef APSARA_UNIT_TEST_MAIN @@ -195,16 +195,16 @@ GENERATE_METRICS(GenerateTcpRecvBytesTotalMetrics, MeasureType::MEASURE_TYPE_NET GENERATE_METRICS(GenerateTcpSendPktsTotalMetrics, MeasureType::MEASURE_TYPE_NET, NetSingleMeasure, npm_send_pkt_total, send_pkt_total_) GENERATE_METRICS(GenerateTcpSendBytesTotalMetrics, MeasureType::MEASURE_TYPE_NET, NetSingleMeasure, npm_send_byte_total, send_byte_total_) -void ArmsSpanHandler::handle(std::vector>&& spans) { +void ArmsSpanHandler::handle(std::vector>& spans) { if (spans.empty()) return; - for (auto& span : spans) { + for (const auto& span : spans) { std::shared_ptr sourceBuffer = std::make_shared(); PipelineEventGroup eventGroup(sourceBuffer); eventGroup.SetTag(app_id_key, span->app_id_); - for (auto& x : span->single_spans_) { + for (const auto& x : span->single_spans_) { auto spanEvent = eventGroup.AddSpanEvent(); - for (auto& tag : x->tags_) { + for (const auto& tag : x->tags_) { spanEvent->SetTag(tag.first, tag.second); } spanEvent->SetName(x->span_name_); @@ -227,17 +227,17 @@ void ArmsSpanHandler::handle(std::vector>& return; } -void ArmsMeterHandler::handle(std::vector>&& measures, uint64_t timestamp) { +void ArmsMeterHandler::handle(std::vector>& measures, uint64_t timestamp) { if (measures.empty()) return; - for (auto& appBatchMeasures : measures) { + for (const auto& appBatchMeasures : measures) { std::shared_ptr sourceBuffer = std::make_shared();; PipelineEventGroup eventGroup(sourceBuffer); // source_ip eventGroup.SetTag(std::string(app_id_key), appBatchMeasures->app_id_); eventGroup.SetTag(std::string(ip_key), appBatchMeasures->ip_); - for (auto& measure : appBatchMeasures->measures_) { + for (const auto& measure : appBatchMeasures->measures_) { auto type = measure->type_; if (type == MeasureType::MEASURE_TYPE_APP) { GenerateRequestsTotalMetrics(eventGroup, measure, timestamp); diff --git a/core/ebpf/handler/ObserveHandler.h b/core/ebpf/handler/ObserveHandler.h index de5241a7e9..d70cf33470 100644 --- a/core/ebpf/handler/ObserveHandler.h +++ b/core/ebpf/handler/ObserveHandler.h @@ -26,31 +26,31 @@ class MeterHandler : public AbstractHandler { public: MeterHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : AbstractHandler(ctx, key, idx) {} - virtual void handle(std::vector>&&, uint64_t) = 0; + virtual void handle(std::vector>&, uint64_t) = 0; }; class OtelMeterHandler : public MeterHandler { public: OtelMeterHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : MeterHandler(ctx, key, idx) {} - void handle(std::vector>&& measures, uint64_t timestamp) override; + void handle(std::vector>& measures, uint64_t timestamp) override; }; class SpanHandler : public AbstractHandler { public: SpanHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : AbstractHandler(ctx, key, idx) {} - virtual void handle(std::vector>&&) = 0; + virtual void handle(std::vector>&) = 0; }; class OtelSpanHandler : public SpanHandler { public: OtelSpanHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : SpanHandler(ctx, key, idx) {} - void handle(std::vector>&&) override; + void handle(std::vector>&) override; }; class EventHandler : public AbstractHandler { public: EventHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : AbstractHandler(ctx, key, idx) {} - void handle(std::vector>&&); + void handle(std::vector>&); }; #ifdef __ENTERPRISE__ @@ -58,13 +58,13 @@ class EventHandler : public AbstractHandler { class ArmsMeterHandler : public MeterHandler { public: ArmsMeterHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : MeterHandler(ctx, key, idx) {} - void handle(std::vector>&& measures, uint64_t timestamp) override; + void handle(std::vector>& measures, uint64_t timestamp) override; }; class ArmsSpanHandler : public SpanHandler { public: ArmsSpanHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : SpanHandler(ctx, key, idx) {} - void handle(std::vector>&&) override; + void handle(std::vector>&) override; }; #endif diff --git a/core/ebpf/handler/SecurityHandler.cpp b/core/ebpf/handler/SecurityHandler.cpp index 5121f01462..ee6ea0acd6 100644 --- a/core/ebpf/handler/SecurityHandler.cpp +++ b/core/ebpf/handler/SecurityHandler.cpp @@ -34,7 +34,7 @@ SecurityHandler::SecurityHandler(const logtail::PipelineContext* ctx, logtail::Q mHostIp = GetHostIp(); } -void SecurityHandler::handle(std::vector>&& events) { +void SecurityHandler::handle(std::vector>& events) { if (events.empty()) { return ; } @@ -48,9 +48,9 @@ void SecurityHandler::handle(std::vector> const static std::string host_name_key = "host.name"; event_group.SetTag(host_ip_key, mHostIp); event_group.SetTag(host_name_key, mHostName); - for (auto& x : events) { - auto event = event_group.AddLogEvent(); - for (auto& tag : x->GetAllTags()) { + for (const auto& x : events) { + auto* event = event_group.AddLogEvent(); + for (const auto& tag : x->GetAllTags()) { event->SetContent(tag.first, tag.second); } auto seconds = std::chrono::duration_cast(std::chrono::nanoseconds(x->GetTimestamp())); diff --git a/core/ebpf/handler/SecurityHandler.h b/core/ebpf/handler/SecurityHandler.h index a181457553..f3a6e2642d 100644 --- a/core/ebpf/handler/SecurityHandler.h +++ b/core/ebpf/handler/SecurityHandler.h @@ -26,7 +26,7 @@ namespace ebpf { class SecurityHandler : public AbstractHandler { public: SecurityHandler(const logtail::PipelineContext* ctx, logtail::QueueKey key, uint32_t idx); - void handle(std::vector>&& events); + void handle(std::vector>& events); private: // TODO 后续这两个 key 需要移到 group 的 metadata 里,在 processortagnative 中转成tag std::string mHostIp; diff --git a/core/ebpf/include/SysAkApi.h b/core/ebpf/include/SysAkApi.h index 8559bcd55e..80818e5abb 100644 --- a/core/ebpf/include/SysAkApi.h +++ b/core/ebpf/include/SysAkApi.h @@ -4,8 +4,10 @@ #pragma once -using init_func = int (*)(void *); -using remove_func = int (*)(void *); -using suspend_func = int(*)(void *); +#include "ebpf/include/export.h" + +using init_func = int (*)(nami::eBPFConfig*); +using remove_func = int (*)(nami::eBPFConfig*); using deinit_func = void (*)(void); -using update_func = int(*)(void*); +using suspend_func = int (*)(nami::eBPFConfig*); +using update_func = int (*)(nami::eBPFConfig*); \ No newline at end of file diff --git a/core/ebpf/include/export.h b/core/ebpf/include/export.h index c72e00f9c8..cf8c5c6304 100644 --- a/core/ebpf/include/export.h +++ b/core/ebpf/include/export.h @@ -4,12 +4,14 @@ #pragma once -#include -#include -#include #include +#include #include +#include +#include #include +#include +#include enum class SecureEventType { SECURE_EVENT_TYPE_SOCKET_SECURE, @@ -48,14 +50,16 @@ class BatchAbstractSecurityEvent { std::vector> events; }; -using HandleSingleDataEventFn = std::function&& event)>; -using HandleBatchDataEventFn = std::function>&& events)>; +using HandleSingleDataEventFn = std::function& event)>; +using HandleBatchDataEventFn = std::function>& events)>; enum class UpdataType { SECURE_UPDATE_TYPE_ENABLE_PROBE, SECURE_UPDATE_TYPE_CONFIG_CHAGE, SECURE_UPDATE_TYPE_SUSPEND_PROBE, SECURE_UPDATE_TYPE_DISABLE_PROBE, + OBSERVER_UPDATE_TYPE_CHANGE_WHITELIST, + OBSERVER_UPDATE_TYPE_UPDATE_PROBE, SECURE_UPDATE_TYPE_MAX, }; @@ -104,8 +108,9 @@ struct Measure { // process struct ApplicationBatchMeasure { std::string app_id_; - std::string region_id_; + std::string app_name_; std::string ip_; + std::string host_; std::vector> measures_; }; @@ -123,6 +128,9 @@ struct SingleSpan { struct ApplicationBatchSpan { std::string app_id_; + std::string app_name_; + std::string host_ip_; + std::string host_name_; std::vector> single_spans_; }; @@ -176,13 +184,13 @@ enum class PluginType { }; // observe metrics -using NamiHandleBatchMeasureFunc = std::function>&& measures, uint64_t timestamp)>; +using NamiHandleBatchMeasureFunc = std::function>& measures, uint64_t timestamp)>; // observe spans -using NamiHandleBatchSpanFunc = std::function>&&)>; +using NamiHandleBatchSpanFunc = std::function>&)>; // observe events -using NamiHandleBatchEventFunc = std::function>&&)>; +using NamiHandleBatchEventFunc = std::function>&)>; // observe security -using NamiHandleBatchDataEventFn = std::function>&& events)>; +using NamiHandleBatchDataEventFn = std::function>& events)>; struct ObserverNetworkOption { std::vector mEnableProtocols; @@ -192,6 +200,9 @@ struct ObserverNetworkOption { bool mEnableSpan = false; bool mEnableMetric = false; bool mEnableLog = true; + bool mEnableCidFilter = true; + std::vector mEnableCids; + std::vector mDisableCids; std::string mMeterHandlerType; std::string mSpanHandlerType; }; @@ -227,12 +238,52 @@ struct SecurityNetworkFilter { struct SecurityOption { std::vector call_names_; std::variant filter_; + + SecurityOption() = default; + + SecurityOption(const SecurityOption& other) = default; + + SecurityOption(SecurityOption&& other) noexcept + : call_names_(std::move(other.call_names_)), filter_(std::move(other.filter_)) {} + + SecurityOption& operator=(const SecurityOption& other) = default; + + SecurityOption& operator=(SecurityOption&& other) noexcept { + call_names_ = other.call_names_; + filter_ = other.filter_; + return *this; + } + + ~SecurityOption() {} + bool operator==(const SecurityOption& other) const { return call_names_ == other.call_names_ && filter_ == other.filter_; } }; +class PodMeta { +public: + PodMeta(const std::string& app_id, const std::string& app_name, + const std::string& ns, + const std::string& workload_name, + const std::string& workload_kind, + const std::string& pod_name, const std::string& pod_ip, const std::string& service_name) + : app_id_(app_id), app_name_(app_name), namespace_(ns), workload_name_(workload_name), workload_kind_(workload_kind), pod_name_(pod_name), pod_ip_(pod_ip), service_name_(service_name){} + std::string app_id_; + std::string app_name_; + std::string namespace_; + std::string workload_name_; + std::string workload_kind_; + std::string pod_name_; + std::string pod_ip_; + std::string service_name_; +}; + +using K8sMetadataCacheCallback = std::function(const std::string&)>; +using K8sMetadataCallback = std::function&, std::vector>&)>; +using AsyncK8sMetadataCallback = std::function(std::vector&, std::vector>&)>; + struct NetworkObserveConfig { bool enable_libbpf_debug_ = false; bool enable_so_ = false; @@ -247,9 +298,19 @@ struct NetworkObserveConfig { bool enable_span_ = false; bool enable_metric_ = false; bool enable_event_ = false; + bool enable_cid_filter = false; NamiHandleBatchMeasureFunc measure_cb_ = nullptr; NamiHandleBatchSpanFunc span_cb_ = nullptr; NamiHandleBatchEventFunc event_cb_ = nullptr; + K8sMetadataCallback metadata_by_cid_cb_ = nullptr; + K8sMetadataCallback metadata_by_ip_cb_ = nullptr; + AsyncK8sMetadataCallback async_metadata_by_cid_cb_ = nullptr; + AsyncK8sMetadataCallback async_metadata_by_ip_cb_ = nullptr; + K8sMetadataCacheCallback metadata_by_cid_cache_ = nullptr; + K8sMetadataCacheCallback metadata_by_ip_cache_ = nullptr; + std::vector enable_container_ids_; + std::vector disable_container_ids_; + bool operator==(const NetworkObserveConfig& other) const { return enable_libbpf_debug_ == other.enable_libbpf_debug_ && enable_so_ == other.enable_so_ && @@ -342,4 +403,4 @@ struct eBPFConfig { } }; -}; +}; // namespace nami diff --git a/core/unittest/ebpf/eBPFServerUnittest.cpp b/core/unittest/ebpf/eBPFServerUnittest.cpp index 9dd6f31d04..53838ba3f9 100644 --- a/core/unittest/ebpf/eBPFServerUnittest.cpp +++ b/core/unittest/ebpf/eBPFServerUnittest.cpp @@ -434,7 +434,7 @@ void eBPFServerUnittest::GenerateBatchMeasure(nami::NamiHandleBatchMeasureFunc c batch_app_measures.emplace_back(std::move(app_measure_ptr)); } } - cb(std::move(batch_app_measures), 100000); + cb(batch_app_measures, 100000); } void eBPFServerUnittest::GenerateBatchAppEvent(nami::NamiHandleBatchEventFunc cb) { @@ -452,7 +452,7 @@ void eBPFServerUnittest::GenerateBatchAppEvent(nami::NamiHandleBatchEventFunc cb batch_app_events.emplace_back(std::move(appEvent)); } - if (cb) cb(std::move(batch_app_events)); + if (cb) cb(batch_app_events); return; } @@ -539,7 +539,7 @@ void eBPFServerUnittest::GenerateBatchSpan(nami::NamiHandleBatchSpanFunc cb) { batch_spans->single_spans_.emplace_back(std::move(single_span)); } batch_app_spans.emplace_back(std::move(batch_spans)); - cb(std::move(batch_app_spans)); + cb(batch_app_spans); } void eBPFServerUnittest::GenerateBatchEvent(nami::NamiHandleBatchDataEventFn cb, SecureEventType type) { @@ -555,7 +555,7 @@ void eBPFServerUnittest::GenerateBatchEvent(nami::NamiHandleBatchDataEventFn cb, auto event = std::make_unique (std::move(tags), type, 1000); events.emplace_back(std::move(event)); } - cb(std::move(events)); + cb(events); } void eBPFServerUnittest::InitSecurityOpts() { From fc6820f3e0d2ca2c05df71db838eccc2d9b30370 Mon Sep 17 00:00:00 2001 From: Bingchang Chen Date: Wed, 11 Dec 2024 09:43:48 +0800 Subject: [PATCH 4/8] feat: support singleton input (#1933) * feat: support singleton input * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix --- core/config/ConfigDiff.h | 2 + core/config/PipelineConfig.cpp | 16 +- core/config/PipelineConfig.h | 7 +- core/config/watcher/PipelineConfigWatcher.cpp | 165 +- core/config/watcher/PipelineConfigWatcher.h | 41 +- core/pipeline/Pipeline.cpp | 1 + core/pipeline/Pipeline.h | 2 + core/pipeline/plugin/PluginRegistry.cpp | 41 +- core/pipeline/plugin/PluginRegistry.h | 21 +- core/unittest/config/CMakeLists.txt | 4 + core/unittest/config/ConfigUpdateUnittest.cpp | 37 +- .../config/PipelineConfigWatcherUnittest.cpp | 1454 +++++++++++++++++ core/unittest/config/PipelineManagerMock.h | 58 + 13 files changed, 1770 insertions(+), 79 deletions(-) create mode 100644 core/unittest/config/PipelineConfigWatcherUnittest.cpp create mode 100644 core/unittest/config/PipelineManagerMock.h diff --git a/core/config/ConfigDiff.h b/core/config/ConfigDiff.h index 38fa7ba00a..5bd9c8d205 100644 --- a/core/config/ConfigDiff.h +++ b/core/config/ConfigDiff.h @@ -38,4 +38,6 @@ using PipelineConfigDiff = ConfigDiff; using TaskConfigDiff = ConfigDiff; using InstanceConfigDiff = ConfigDiff; +enum ConfigDiffEnum { Added, Modified, Removed, Unchanged }; + } // namespace logtail diff --git a/core/config/PipelineConfig.cpp b/core/config/PipelineConfig.cpp index e0642b6ee1..4aeea027ca 100644 --- a/core/config/PipelineConfig.cpp +++ b/core/config/PipelineConfig.cpp @@ -191,6 +191,20 @@ bool PipelineConfig::Parse() { mRegion); } const string pluginType = it->asString(); + // when input is singleton, there should only one input to simpify config load transaction + if (PluginRegistry::GetInstance()->IsGlobalSingletonInputPlugin(pluginType)) { + mSingletonInput = pluginType; + if (itr->size() > 1) { + PARAM_ERROR_RETURN(sLogger, + alarm, + "more than 1 input plugin is given when global singleton input plugin is used", + noModule, + mName, + mProject, + mLogstore, + mRegion); + } + } if (i == 0) { if (PluginRegistry::GetInstance()->IsValidGoPlugin(pluginType)) { mHasGoInput = true; @@ -241,7 +255,7 @@ bool PipelineConfig::Parse() { if (hasFileInput && (*mDetail)["inputs"].size() > 1) { PARAM_ERROR_RETURN(sLogger, alarm, - "more than 1 input_file or input_container_stdio plugin is given", + "more than 1 input_file or input_container_stdio is given", noModule, mName, mProject, diff --git a/core/config/PipelineConfig.h b/core/config/PipelineConfig.h index 7d845126e9..6a9219cdcd 100644 --- a/core/config/PipelineConfig.h +++ b/core/config/PipelineConfig.h @@ -32,6 +32,7 @@ struct PipelineConfig { uint32_t mCreateTime = 0; const Json::Value* mGlobal = nullptr; std::vector mInputs; + std::optional mSingletonInput; std::vector mProcessors; std::vector mAggregators; std::vector mFlushers; @@ -49,7 +50,8 @@ struct PipelineConfig { std::string mLogstore; std::string mRegion; - PipelineConfig(const std::string& name, std::unique_ptr&& detail) : mName(name), mDetail(std::move(detail)) {} + PipelineConfig(const std::string& name, std::unique_ptr&& detail) + : mName(name), mDetail(std::move(detail)) {} bool Parse(); @@ -67,7 +69,8 @@ struct PipelineConfig { // bool IsProcessRunnerInvolved() const { // // 长期过渡使用,待C++部分的时序聚合能力与Go持平后恢复下面的正式版 // return !(mHasGoInput && !mHasNativeProcessor); - // // return !(mHasGoInput && !mHasNativeProcessor && (mHasGoProcessor || (mHasGoFlusher && !mHasNativeFlusher))); + // // return !(mHasGoInput && !mHasNativeProcessor && (mHasGoProcessor || (mHasGoFlusher && + // !mHasNativeFlusher))); // } bool HasGoPlugin() const { return mHasGoFlusher || mHasGoProcessor || mHasGoInput; } diff --git a/core/config/watcher/PipelineConfigWatcher.cpp b/core/config/watcher/PipelineConfigWatcher.cpp index d68d7cedf3..fb914c4d55 100644 --- a/core/config/watcher/PipelineConfigWatcher.cpp +++ b/core/config/watcher/PipelineConfigWatcher.cpp @@ -41,11 +41,13 @@ pair PipelineConfigWatcher::CheckConfigDiff( PipelineConfigDiff pDiff; TaskConfigDiff tDiff; unordered_set configSet; + SingletonConfigCache singletonCache; // builtin pipeline configs - InsertBuiltInPipelines(pDiff, tDiff, configSet); - // file pipeline configs - InsertPipelines(pDiff, tDiff, configSet); + InsertBuiltInPipelines(pDiff, tDiff, configSet, singletonCache); + // file pipeline configs + InsertPipelines(pDiff, tDiff, configSet, singletonCache); + CheckSingletonInput(pDiff, singletonCache); for (const auto& name : mPipelineManager->GetAllConfigNames()) { if (configSet.find(name) == configSet.end()) { pDiff.mRemoved.push_back(name); @@ -88,8 +90,9 @@ pair PipelineConfigWatcher::CheckConfigDiff( } void PipelineConfigWatcher::InsertBuiltInPipelines(PipelineConfigDiff& pDiff, - TaskConfigDiff& tDiff, - unordered_set& configSet) { + TaskConfigDiff& tDiff, + unordered_set& configSet, + SingletonConfigCache& singletonCache) { #ifdef __ENTERPRISE__ const std::map& builtInPipelines = EnterpriseConfigProvider::GetInstance()->GetAllBuiltInPipelineConfigs(); @@ -120,7 +123,7 @@ void PipelineConfigWatcher::InsertBuiltInPipelines(PipelineConfigDiff& pDiff, LOG_INFO(sLogger, ("new config found and disabled", "skip current object")("config", pipelineName)); continue; } - if (!CheckAddedConfig(pipelineName, std::move(detail), pDiff, tDiff)) { + if (!CheckAddedConfig(pipelineName, std::move(detail), pDiff, tDiff, singletonCache)) { continue; } } else if (pipleineDetail != iter->second) { @@ -161,7 +164,7 @@ void PipelineConfigWatcher::InsertBuiltInPipelines(PipelineConfigDiff& pDiff, } continue; } - if (!CheckModifiedConfig(pipelineName, std::move(detail), pDiff, tDiff)) { + if (!CheckModifiedConfig(pipelineName, std::move(detail), pDiff, tDiff, singletonCache)) { continue; } } else { @@ -175,7 +178,8 @@ void PipelineConfigWatcher::InsertBuiltInPipelines(PipelineConfigDiff& pDiff, void PipelineConfigWatcher::InsertPipelines(PipelineConfigDiff& pDiff, TaskConfigDiff& tDiff, - std::unordered_set& configSet) { + std::unordered_set& configSet, + SingletonConfigCache& singletonCache) { for (const auto& dir : mSourceDir) { error_code ec; filesystem::file_status s = filesystem::status(dir, ec); @@ -231,7 +235,7 @@ void PipelineConfigWatcher::InsertPipelines(PipelineConfigDiff& pDiff, LOG_INFO(sLogger, ("new config found and disabled", "skip current object")("config", configName)); continue; } - if (!CheckAddedConfig(configName, std::move(detail), pDiff, tDiff)) { + if (!CheckAddedConfig(configName, std::move(detail), pDiff, tDiff, singletonCache)) { continue; } } else if (iter->second.first != size || iter->second.second != mTime) { @@ -270,11 +274,12 @@ void PipelineConfigWatcher::InsertPipelines(PipelineConfigDiff& pDiff, } continue; } - if (!CheckModifiedConfig(configName, std::move(detail), pDiff, tDiff)) { + if (!CheckModifiedConfig(configName, std::move(detail), pDiff, tDiff, singletonCache)) { continue; } } else { LOG_DEBUG(sLogger, ("existing config file unchanged", "skip current object")); + CheckUnchangedConfig(configName, path, pDiff, tDiff, singletonCache); } } } @@ -283,7 +288,8 @@ void PipelineConfigWatcher::InsertPipelines(PipelineConfigDiff& pDiff, bool PipelineConfigWatcher::CheckAddedConfig(const string& configName, unique_ptr&& configDetail, PipelineConfigDiff& pDiff, - TaskConfigDiff& tDiff) { + TaskConfigDiff& tDiff, + SingletonConfigCache& singletonCache) { switch (GetConfigType(*configDetail)) { case ConfigType::Pipeline: { PipelineConfig config(configName, std::move(configDetail)); @@ -297,7 +303,7 @@ bool PipelineConfigWatcher::CheckAddedConfig(const string& configName, config.mRegion); return false; } - pDiff.mAdded.push_back(std::move(config)); + PushPipelineConfig(std::move(config), ConfigDiffEnum::Added, pDiff, singletonCache); LOG_INFO(sLogger, ("new config found and passed topology check", "prepare to build pipeline")("config", configName)); break; @@ -322,7 +328,8 @@ bool PipelineConfigWatcher::CheckAddedConfig(const string& configName, bool PipelineConfigWatcher::CheckModifiedConfig(const string& configName, unique_ptr&& configDetail, PipelineConfigDiff& pDiff, - TaskConfigDiff& tDiff) { + TaskConfigDiff& tDiff, + SingletonConfigCache& singletonCache) { switch (GetConfigType(*configDetail)) { case ConfigType::Pipeline: { shared_ptr p = mPipelineManager->FindConfigByName(configName); @@ -341,10 +348,10 @@ bool PipelineConfigWatcher::CheckModifiedConfig(const string& configName, config.mRegion); return false; } - pDiff.mAdded.push_back(std::move(config)); LOG_INFO(sLogger, ("existing invalid config modified and passed topology check", "prepare to build pipeline")("config", configName)); + PushPipelineConfig(std::move(config), ConfigDiffEnum::Added, pDiff, singletonCache); } else if (*configDetail != p->GetConfig()) { PipelineConfig config(configName, std::move(configDetail)); if (!config.Parse()) { @@ -360,10 +367,10 @@ bool PipelineConfigWatcher::CheckModifiedConfig(const string& configName, config.mRegion); return false; } - pDiff.mModified.push_back(std::move(config)); LOG_INFO(sLogger, ("existing valid config modified and passed topology check", "prepare to rebuild pipeline")("config", configName)); + PushPipelineConfig(std::move(config), ConfigDiffEnum::Modified, pDiff, singletonCache); } else { LOG_DEBUG(sLogger, ("existing valid config file modified, but no change found", "skip current object")); } @@ -412,4 +419,132 @@ bool PipelineConfigWatcher::CheckModifiedConfig(const string& configName, return true; } +bool PipelineConfigWatcher::CheckUnchangedConfig(const std::string& configName, + const filesystem::path& path, + PipelineConfigDiff& pDiff, + TaskConfigDiff& tDiff, + SingletonConfigCache& singletonCache) { + auto pipeline = mPipelineManager->FindConfigByName(configName); + auto task = mTaskPipelineManager->FindPipelineByName(configName).get(); + if (task) { + return true; + } else if (pipeline) { // running pipeline in last config update + std::unique_ptr configDetail = make_unique(); + PipelineConfig config(configName, std::move(configDetail)); + config.mCreateTime = pipeline->GetContext().GetCreateTime(); + config.mSingletonInput = pipeline->GetSingletonInput(); + PushPipelineConfig(std::move(config), ConfigDiffEnum::Unchanged, pDiff, singletonCache); + } else { // low priority singleton input in last config update, sort it again + unique_ptr detail = make_unique(); + if (!LoadConfigDetailFromFile(path, *detail)) { + return false; + } + if (!IsConfigEnabled(configName, *detail)) { + LOG_INFO(sLogger, ("unchanged config found and disabled", "skip current object")("config", configName)); + return false; + } + PipelineConfig config(configName, std::move(detail)); + if (!config.Parse()) { + LOG_ERROR(sLogger, ("new config found but invalid", "skip current object")("config", configName)); + AlarmManager::GetInstance()->SendAlarm(CATEGORY_CONFIG_ALARM, + "new config found but invalid: skip current object, config: " + + configName, + config.mProject, + config.mLogstore, + config.mRegion); + return false; + } + if (config.mSingletonInput) { + singletonCache[config.mSingletonInput.value()].push_back( + make_shared(std::move(config), ConfigDiffEnum::Added)); + } + } + return true; +} + +void PipelineConfigWatcher::PushPipelineConfig(PipelineConfig&& config, + ConfigDiffEnum diffEnum, + PipelineConfigDiff& pDiff, + SingletonConfigCache& singletonCache) { + // singleton input + if (config.mSingletonInput) { + if (diffEnum == ConfigDiffEnum::Added || diffEnum == ConfigDiffEnum::Modified + || diffEnum == ConfigDiffEnum::Unchanged) { + singletonCache[config.mSingletonInput.value()].push_back( + make_shared(std::move(config), diffEnum)); + } else { + LOG_ERROR(sLogger, ("should not reach here", "invalid diff enum")("diff", diffEnum)); + } + return; + } + // no singleton input + switch (diffEnum) { + case ConfigDiffEnum::Added: + pDiff.mAdded.push_back(std::move(config)); + break; + case ConfigDiffEnum::Modified: + pDiff.mModified.push_back(std::move(config)); + break; + default: + break; + } +} + +void PipelineConfigWatcher::CheckSingletonInput(PipelineConfigDiff& pDiff, SingletonConfigCache& singletonCache) { + for (auto& [name, configs] : singletonCache) { + std::sort(configs.begin(), + configs.end(), + [](const std::shared_ptr& a, + const std::shared_ptr& b) -> bool { + if (a->config.mCreateTime == b->config.mCreateTime) { + return a->config.mName < b->config.mName; + } + return a->config.mCreateTime < b->config.mCreateTime; + }); + for (size_t i = 0; i < configs.size(); ++i) { + const auto& diffEnum = configs[i]->diffEnum; + const auto& configName = configs[i]->config.mName; + if (i == 0) { + switch (diffEnum) { + // greatest priority config + case ConfigDiffEnum::Added: + LOG_INFO(sLogger, + ("new config with singleton input found and passed topology check", + "prepare to build pipeline")("config", configName)); + pDiff.mAdded.push_back(std::move(configs[0]->config)); + break; + case ConfigDiffEnum::Modified: + LOG_INFO(sLogger, + ("existing config with singleton input modified and passed topology check", + "prepare to build pipeline")("config", configName)); + pDiff.mModified.push_back(std::move(configs[0]->config)); + break; + default: + break; + } + } else { + // other low priority configs + switch (diffEnum) { + case ConfigDiffEnum::Modified: + LOG_WARNING(sLogger, + ("global singleton plugin found, but another older config or smaller name config " + "already exists", + "skip current object")("config", configName)); + pDiff.mRemoved.push_back(configName); + break; + case ConfigDiffEnum::Unchanged: + LOG_WARNING(sLogger, + ("existing valid config with global singleton plugin, but another older config or " + "smaller name config found", + "prepare to stop current running pipeline")("config", configName)); + pDiff.mRemoved.push_back(configName); + break; + default: + break; + } + } + } + } +} + } // namespace logtail diff --git a/core/config/watcher/PipelineConfigWatcher.h b/core/config/watcher/PipelineConfigWatcher.h index 98c2677264..766b55c512 100644 --- a/core/config/watcher/PipelineConfigWatcher.h +++ b/core/config/watcher/PipelineConfigWatcher.h @@ -16,6 +16,9 @@ #pragma once +#include +#include +#include #include #include "config/ConfigDiff.h" @@ -26,6 +29,14 @@ namespace logtail { class PipelineManager; class TaskPipelineManager; +struct PipelineConfigWithDiffInfo { + PipelineConfig config; + ConfigDiffEnum diffEnum; + PipelineConfigWithDiffInfo(PipelineConfig&& config, ConfigDiffEnum diffEnum) + : config(std::move(config)), diffEnum(diffEnum) {} +}; +using SingletonConfigCache = std::unordered_map>>; + class PipelineConfigWatcher : public ConfigWatcher { public: PipelineConfigWatcher(const PipelineConfigWatcher&) = delete; @@ -46,19 +57,41 @@ class PipelineConfigWatcher : public ConfigWatcher { PipelineConfigWatcher(); ~PipelineConfigWatcher() = default; - void InsertBuiltInPipelines(PipelineConfigDiff& pDiff, TaskConfigDiff& tDiff, std::unordered_set& configSet); - void InsertPipelines(PipelineConfigDiff& pDiff, TaskConfigDiff& tDiff, std::unordered_set& configSet); + void InsertBuiltInPipelines(PipelineConfigDiff& pDiff, + TaskConfigDiff& tDiff, + std::unordered_set& configSet, + SingletonConfigCache& singletonCache); + void InsertPipelines(PipelineConfigDiff& pDiff, + TaskConfigDiff& tDiff, + std::unordered_set& configSet, + SingletonConfigCache& singletonCache); bool CheckAddedConfig(const std::string& configName, std::unique_ptr&& configDetail, PipelineConfigDiff& pDiff, - TaskConfigDiff& tDiff); + TaskConfigDiff& tDiff, + SingletonConfigCache& singletonCache); bool CheckModifiedConfig(const std::string& configName, std::unique_ptr&& configDetail, PipelineConfigDiff& pDiff, - TaskConfigDiff& tDiff); + TaskConfigDiff& tDiff, + SingletonConfigCache& singletonCache); + bool CheckUnchangedConfig(const std::string& configName, + const std::filesystem::path& path, + PipelineConfigDiff& pDiff, + TaskConfigDiff& tDiff, + SingletonConfigCache& singletonCache); + void PushPipelineConfig(PipelineConfig&& config, + ConfigDiffEnum diffEnum, + PipelineConfigDiff& pDiff, + SingletonConfigCache& singletonCache); + void CheckSingletonInput(PipelineConfigDiff& pDiff, SingletonConfigCache& singletonCache); const PipelineManager* mPipelineManager = nullptr; const TaskPipelineManager* mTaskPipelineManager = nullptr; + +#ifdef APSARA_UNIT_TEST_MAIN + friend class PipelineConfigWatcherUnittest; +#endif }; } // namespace logtail diff --git a/core/pipeline/Pipeline.cpp b/core/pipeline/Pipeline.cpp index 1f8e4d5868..3ed21f7d2b 100644 --- a/core/pipeline/Pipeline.cpp +++ b/core/pipeline/Pipeline.cpp @@ -70,6 +70,7 @@ void AddExtendedGlobalParamToGoPipeline(const Json::Value& extendedParams, Json: bool Pipeline::Init(PipelineConfig&& config) { mName = config.mName; mConfig = std::move(config.mDetail); + mSingletonInput = config.mSingletonInput; mContext.SetConfigName(mName); mContext.SetCreateTime(config.mCreateTime); mContext.SetPipeline(*this); diff --git a/core/pipeline/Pipeline.h b/core/pipeline/Pipeline.h index d6a55911c7..29666c68c1 100644 --- a/core/pipeline/Pipeline.h +++ b/core/pipeline/Pipeline.h @@ -61,6 +61,7 @@ class Pipeline { const std::string& Name() const { return mName; } PipelineContext& GetContext() const { return mContext; } const Json::Value& GetConfig() const { return *mConfig; } + const std::optional& GetSingletonInput() const { return mSingletonInput; } const std::vector>& GetFlushers() const { return mFlushers; } bool IsFlushingThroughGoPipeline() const { return !mGoPipelineWithoutInput.isNull(); } const std::unordered_map>& GetPluginStatistics() const { @@ -100,6 +101,7 @@ class Pipeline { mutable PipelineContext mContext; std::unordered_map> mPluginCntMap; std::unique_ptr mConfig; + std::optional mSingletonInput; std::atomic_uint16_t mPluginID; std::atomic_int16_t mInProcessCnt; diff --git a/core/pipeline/plugin/PluginRegistry.cpp b/core/pipeline/plugin/PluginRegistry.cpp index 469b2c06b1..6a0061c68a 100644 --- a/core/pipeline/plugin/PluginRegistry.cpp +++ b/core/pipeline/plugin/PluginRegistry.cpp @@ -130,13 +130,13 @@ bool PluginRegistry::IsValidNativeFlusherPlugin(const string& name) const { void PluginRegistry::LoadStaticPlugins() { RegisterInputCreator(new StaticInputCreator()); RegisterInputCreator(new StaticInputCreator()); - RegisterInputCreator(new StaticInputCreator()); + RegisterInputCreator(new StaticInputCreator(), true); #if defined(__linux__) && !defined(__ANDROID__) RegisterInputCreator(new StaticInputCreator()); - RegisterInputCreator(new StaticInputCreator()); - RegisterInputCreator(new StaticInputCreator()); - RegisterInputCreator(new StaticInputCreator()); - RegisterInputCreator(new StaticInputCreator()); + RegisterInputCreator(new StaticInputCreator(), true); + RegisterInputCreator(new StaticInputCreator(), true); + RegisterInputCreator(new StaticInputCreator(), true); + RegisterInputCreator(new StaticInputCreator(), true); #endif RegisterProcessorCreator(new StaticProcessorCreator()); @@ -189,16 +189,16 @@ void PluginRegistry::LoadDynamicPlugins(const set& plugins) { } } -void PluginRegistry::RegisterInputCreator(PluginCreator* creator) { - RegisterCreator(INPUT_PLUGIN, creator); +void PluginRegistry::RegisterInputCreator(PluginCreator* creator, bool isSingleton) { + RegisterCreator(INPUT_PLUGIN, creator, isSingleton); } void PluginRegistry::RegisterProcessorCreator(PluginCreator* creator) { - RegisterCreator(PROCESSOR_PLUGIN, creator); + RegisterCreator(PROCESSOR_PLUGIN, creator, false); } -void PluginRegistry::RegisterFlusherCreator(PluginCreator* creator) { - RegisterCreator(FLUSHER_PLUGIN, creator); +void PluginRegistry::RegisterFlusherCreator(PluginCreator* creator, bool isSingleton) { + RegisterCreator(FLUSHER_PLUGIN, creator, isSingleton); } PluginCreator* PluginRegistry::LoadProcessorPlugin(DynamicLibLoader& loader, const string pluginType) { @@ -223,11 +223,12 @@ PluginCreator* PluginRegistry::LoadProcessorPlugin(DynamicLibLoader& loader, con return new DynamicCProcessorCreator(plugin, loader.Release()); } -void PluginRegistry::RegisterCreator(PluginCat cat, PluginCreator* creator) { +void PluginRegistry::RegisterCreator(PluginCat cat, PluginCreator* creator, bool isSingleton) { if (!creator) { return; } - mPluginDict.emplace(PluginKey(cat, creator->Name()), shared_ptr(creator)); + mPluginDict.emplace(PluginKey(cat, creator->Name()), + PluginCreatorWithInfo(shared_ptr(creator), isSingleton)); } unique_ptr @@ -235,9 +236,21 @@ PluginRegistry::Create(PluginCat cat, const string& name, const PluginInstance:: unique_ptr ins; auto creatorEntry = mPluginDict.find(PluginKey(cat, name)); if (creatorEntry != mPluginDict.end()) { - ins = creatorEntry->second->Create(pluginMeta); + ins = creatorEntry->second.first->Create(pluginMeta); } return ins; } -} // namespace logtail \ No newline at end of file +bool PluginRegistry::IsGlobalSingletonInputPlugin(const string& name) const { + return IsGlobalSingleton(INPUT_PLUGIN, name); +} + +bool PluginRegistry::IsGlobalSingleton(PluginCat cat, const string& name) const { + auto creatorEntry = mPluginDict.find(PluginKey(cat, name)); + if (creatorEntry != mPluginDict.end()) { + return creatorEntry->second.second; + } + return false; +} + +} // namespace logtail diff --git a/core/pipeline/plugin/PluginRegistry.h b/core/pipeline/plugin/PluginRegistry.h index 22213d6c39..1c552da6dc 100644 --- a/core/pipeline/plugin/PluginRegistry.h +++ b/core/pipeline/plugin/PluginRegistry.h @@ -46,12 +46,15 @@ class PluginRegistry { void LoadPlugins(); void UnloadPlugins(); std::unique_ptr CreateInput(const std::string& name, const PluginInstance::PluginMeta& pluginMeta); - std::unique_ptr CreateProcessor(const std::string& name, const PluginInstance::PluginMeta& pluginMeta); - std::unique_ptr CreateFlusher(const std::string& name, const PluginInstance::PluginMeta& pluginMeta); + std::unique_ptr CreateProcessor(const std::string& name, + const PluginInstance::PluginMeta& pluginMeta); + std::unique_ptr CreateFlusher(const std::string& name, + const PluginInstance::PluginMeta& pluginMeta); bool IsValidGoPlugin(const std::string& name) const; bool IsValidNativeInputPlugin(const std::string& name) const; bool IsValidNativeProcessorPlugin(const std::string& name) const; bool IsValidNativeFlusherPlugin(const std::string& name) const; + bool IsGlobalSingletonInputPlugin(const std::string& name) const; private: enum PluginCat { INPUT_PLUGIN, PROCESSOR_PLUGIN, FLUSHER_PLUGIN }; @@ -69,19 +72,23 @@ class PluginRegistry { } }; + using PluginCreatorWithInfo = std::pair, bool>; + PluginRegistry() {} ~PluginRegistry() = default; void LoadStaticPlugins(); void LoadDynamicPlugins(const std::set& plugins); - void RegisterInputCreator(PluginCreator* creator); + void RegisterInputCreator(PluginCreator* creator, bool isSingleton = false); void RegisterProcessorCreator(PluginCreator* creator); - void RegisterFlusherCreator(PluginCreator* creator); + void RegisterFlusherCreator(PluginCreator* creator, bool isSingleton = false); PluginCreator* LoadProcessorPlugin(DynamicLibLoader& loader, const std::string pluginType); - void RegisterCreator(PluginCat cat, PluginCreator* creator); - std::unique_ptr Create(PluginCat cat, const std::string& name, const PluginInstance::PluginMeta& pluginMeta); + void RegisterCreator(PluginCat cat, PluginCreator* creator, bool isSingleton); + std::unique_ptr + Create(PluginCat cat, const std::string& name, const PluginInstance::PluginMeta& pluginMeta); + bool IsGlobalSingleton(PluginCat cat, const std::string& name) const; - std::unordered_map, PluginKeyHash> mPluginDict; + std::unordered_map mPluginDict; #ifdef APSARA_UNIT_TEST_MAIN friend class PluginRegistryUnittest; diff --git a/core/unittest/config/CMakeLists.txt b/core/unittest/config/CMakeLists.txt index 560d47d393..7efb993180 100644 --- a/core/unittest/config/CMakeLists.txt +++ b/core/unittest/config/CMakeLists.txt @@ -44,6 +44,9 @@ target_link_libraries(config_feedbackable_unittest ${UT_BASE_TARGET}) add_executable(common_config_provider_unittest CommonConfigProviderUnittest.cpp) target_link_libraries(common_config_provider_unittest ${UT_BASE_TARGET}) +add_executable(pipeline_config_watcher_unittest PipelineConfigWatcherUnittest.cpp) +target_link_libraries(pipeline_config_watcher_unittest ${UT_BASE_TARGET}) + include(GoogleTest) gtest_discover_tests(pipeline_config_unittest) gtest_discover_tests(task_config_unittest) @@ -54,3 +57,4 @@ if (ENABLE_ENTERPRISE) endif () gtest_discover_tests(config_feedbackable_unittest) gtest_discover_tests(common_config_provider_unittest) +gtest_discover_tests(pipeline_config_watcher_unittest) diff --git a/core/unittest/config/ConfigUpdateUnittest.cpp b/core/unittest/config/ConfigUpdateUnittest.cpp index 73e910d2e8..fddd53804a 100644 --- a/core/unittest/config/ConfigUpdateUnittest.cpp +++ b/core/unittest/config/ConfigUpdateUnittest.cpp @@ -29,48 +29,13 @@ #include "pipeline/plugin/PluginRegistry.h" #include "task_pipeline/TaskPipelineManager.h" #include "unittest/Unittest.h" +#include "unittest/config/PipelineManagerMock.h" #include "unittest/plugin/PluginMock.h" using namespace std; namespace logtail { -class PipelineMock : public Pipeline { -public: - bool Init(PipelineConfig&& config) { - mConfig = std::move(config.mDetail); - WriteMetrics::GetInstance()->PrepareMetricsRecordRef( - mMetricsRecordRef, - MetricCategory::METRIC_CATEGORY_PIPELINE, - {{METRIC_LABEL_KEY_PROJECT, mContext.GetProjectName()}, {METRIC_LABEL_KEY_PIPELINE_NAME, mName}}); - mStartTime = mMetricsRecordRef.CreateIntGauge(METRIC_PIPELINE_START_TIME); - return (*mConfig)["valid"].asBool(); - } -}; - -class PipelineManagerMock : public PipelineManager { -public: - static PipelineManagerMock* GetInstance() { - static PipelineManagerMock instance; - return &instance; - } - - void ClearEnvironment() { - mPipelineNameEntityMap.clear(); - mPluginCntMap.clear(); - } - -private: - shared_ptr BuildPipeline(PipelineConfig&& config) override { - // this should be synchronized with PipelineManager::BuildPipeline, except for the pointer type. - shared_ptr p = make_shared(); - if (!p->Init(std::move(config))) { - return nullptr; - } - return p; - } -}; - class ConfigUpdateUnittest : public testing::Test { public: void OnStartUp() const; diff --git a/core/unittest/config/PipelineConfigWatcherUnittest.cpp b/core/unittest/config/PipelineConfigWatcherUnittest.cpp new file mode 100644 index 0000000000..56c8c3fdd4 --- /dev/null +++ b/core/unittest/config/PipelineConfigWatcherUnittest.cpp @@ -0,0 +1,1454 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include + +#include "common/JsonUtil.h" +#include "config/watcher/PipelineConfigWatcher.h" +#include "plugin/PluginRegistry.h" +#include "unittest/Unittest.h" +#include "unittest/config/PipelineManagerMock.h" + +using namespace std; + +namespace logtail { + +class PipelineConfigWatcherUnittest : public testing::Test { +public: + void TestLoadAddedSingletonConfig(); + void TestLoadModifiedSingletonConfig(); + void TestLoadRemovedSingletonConfig(); + void TestLoadUnchangedSingletonConfig(); + +protected: + static void SetUpTestCase() { + PluginRegistry::GetInstance()->LoadPlugins(); + PipelineConfigWatcher::GetInstance()->SetPipelineManager(PipelineManagerMock::GetInstance()); + } + static void TearDownTestCase() { PluginRegistry::GetInstance()->UnloadPlugins(); } + +private: + void PrepareConfig() { + filesystem::create_directories(configDir1); + PipelineConfigWatcher::GetInstance()->AddSource(configDir1.string()); + filesystem::create_directories(configDir2); + PipelineConfigWatcher::GetInstance()->AddSource(configDir2.string()); + } + + void ClearConfig() { + PipelineManagerMock::GetInstance()->ClearEnvironment(); + PipelineConfigWatcher::GetInstance()->ClearEnvironment(); + filesystem::remove_all(configDir1); + filesystem::remove_all(configDir2); + } + + filesystem::path configDir1 = "./continuous_pipeline_config1"; + filesystem::path configDir2 = "./continuous_pipeline_config2"; + + const std::string greaterPriorityConfig = R"( + { + "createTime": 1, + "valid": true, + "inputs": [ + { + "Type": "input_network_observer" + } + ], + "flushers": [ + { + "Type": "flusher_sls" + } + ] + } + )"; + + const std::string lessPriorityConfig = R"( + { + "createTime": 2, + "valid": true, + "inputs": [ + { + "Type": "input_network_observer" + } + ], + "flushers": [ + { + "Type": "flusher_sls" + } + ] + } + )"; + + const std::string modifiedGreaterPriorityConfig = R"( + { + "createTime": 1, + "valid": true, + "inputs": [ + { + "Type": "input_network_observer" + } + ], + "processors": [], + "flushers": [ + { + "Type": "flusher_sls" + } + ] + } + )"; + + const std::string modifiedLessPriorityConfig = R"( + { + "createTime": 2, + "valid": true, + "inputs": [ + { + "Type": "input_network_observer" + } + ], + "processors": [], + "flushers": [ + { + "Type": "flusher_sls" + } + ] + } + )"; + + const std::string otherConfig = R"( + { + "createTime": 3, + "valid": true, + "inputs": [ + { + "Type": "input_process_security" + } + ], + "flushers": [ + { + "Type": "flusher_sls" + } + ] + } + )"; + + const std::string modifiedOtherConfig = R"( + { + "createTime": 3, + "valid": true, + "inputs": [ + { + "Type": "input_process_security" + } + ], + "processors": [], + "flushers": [ + { + "Type": "flusher_sls" + } + ] + } + )"; +}; + +// there are 4 kinds of a config: added, modified, removed, unchanged +// there are 4 kinds of priority relationship: first > second, first < second, +// first > second -> first < second, first < second -> first > second +// total case: 4 (first kind) * 4(second kind) * 4(priority) = 64 +void PipelineConfigWatcherUnittest::TestLoadAddedSingletonConfig() { + { // case: added -> added, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[1]); + ClearConfig(); + } + { // case: added -> added, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: added -> added, first > second -> first < second + // should not happen + } + { // case: added -> added, first < second -> first > second + // should not happen + } + { // case: added -> modified, first > second + PrepareConfig(); + ofstream fout(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << modifiedLessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << modifiedOtherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[1]); + ClearConfig(); + } + { // case: added -> modified, first < second + PrepareConfig(); + ofstream fout(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << modifiedGreaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << modifiedOtherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: added -> modified, first > second -> first < second + // should not happen + } + { // case: added -> modified, first < second -> first > second + // should not happen + } + { // case: added -> removed, first > second + PrepareConfig(); + ofstream fout(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + filesystem::remove(configDir2 / "test2.json"); + filesystem::remove(configDir2 / "test-other.json"); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(1U, allConfigNames.size()); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[0]); + ClearConfig(); + } + { // case: added -> removed, first < second + PrepareConfig(); + ofstream fout(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + filesystem::remove(configDir2 / "test2.json"); + filesystem::remove(configDir2 / "test-other.json"); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(1U, allConfigNames.size()); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[0]); + ClearConfig(); + } + { // case: added -> removed, first > second -> first < second + // should not happen + } + { // case: added -> removed, first < second -> first > second + // should not happen + } + { // case: added -> unchanged, first > second + PrepareConfig(); + ofstream fout(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[1]); + ClearConfig(); + } + { // case: added -> unchanged, first < second + PrepareConfig(); + ofstream fout(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: added -> unchanged, first > second -> first < second + // should not happen + } + { // case: added -> unchanged, first < second -> first > second + // should not happen + } +} + +void PipelineConfigWatcherUnittest::TestLoadModifiedSingletonConfig() { + { // case: modified -> added, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << modifiedGreaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[1]); + ClearConfig(); + } + { // case: modified -> added, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << modifiedLessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: modified -> added, first > second -> first < second + // should not happen + } + { // case: modified -> added, first < second -> first > second + // should not happen + } + { // case: modified -> modified, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << modifiedGreaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << modifiedLessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << modifiedOtherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[1]); + ClearConfig(); + } + { // case: modified -> modified, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << modifiedLessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << modifiedGreaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << modifiedOtherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: modified -> modified, first > second -> first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << modifiedLessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << modifiedGreaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << modifiedOtherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: modified -> modified, first < second -> first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << modifiedGreaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << modifiedLessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << modifiedOtherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[1]); + ClearConfig(); + } + { // case: modified -> removed, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << modifiedGreaterPriorityConfig; + fout.close(); + filesystem::remove(configDir2 / "test2.json"); + filesystem::remove(configDir2 / "test-other.json"); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(1U, allConfigNames.size()); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[0]); + ClearConfig(); + } + { // case: modified -> removed, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << modifiedLessPriorityConfig; + fout.close(); + filesystem::remove(configDir2 / "test2.json"); + filesystem::remove(configDir2 / "test-other.json"); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(1U, allConfigNames.size()); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[0]); + ClearConfig(); + } + { // case: modified -> removed, first > second -> first < second + // should not happen + } + { // case: modified -> removed, first < second -> first > second + // should not happen + } + { // case: modified -> unchanged, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << modifiedGreaterPriorityConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[1]); + ClearConfig(); + } + { // case: modified -> unchanged, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << modifiedLessPriorityConfig; + fout.close(); + filesystem::remove(configDir2 / "test2.json"); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[1]); + ClearConfig(); + } + { // case: modified -> unchanged, first > second -> first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: modified -> unchanged, first < second -> first > second + PrepareConfig(); + ofstream fout(configDir1 / "test3.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + + fout.open(configDir1 / "test3.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test3", allConfigNames[1]); + ClearConfig(); + } +} + +void PipelineConfigWatcherUnittest::TestLoadRemovedSingletonConfig() { + { // case: removed -> added, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + filesystem::remove(configDir1 / "test1.json"); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: removed -> added, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + filesystem::remove(configDir1 / "test1.json"); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: removed -> added, first > second -> first < second + // should not happen + } + { // case: removed -> added, first < second -> first > second + // should not happen + } + { // case: removed -> modified, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + this_thread::sleep_for(chrono::milliseconds(1)); + + filesystem::remove(configDir1 / "test1.json"); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << modifiedLessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << modifiedOtherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: removed -> modified, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + this_thread::sleep_for(chrono::milliseconds(1)); + + filesystem::remove(configDir1 / "test1.json"); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << modifiedGreaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << modifiedOtherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: removed -> modified, first > second -> first < second + // should not happen + } + { // case: removed -> modified, first < second -> first > second + // should not happen + } + { // case: removed -> removed, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + filesystem::remove(configDir1 / "test1.json"); + filesystem::remove(configDir2 / "test2.json"); + filesystem::remove(configDir2 / "test-other.json"); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(0U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + ClearConfig(); + } + { // case: removed -> removed, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + filesystem::remove(configDir1 / "test1.json"); + filesystem::remove(configDir2 / "test2.json"); + filesystem::remove(configDir2 / "test-other.json"); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(0U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + ClearConfig(); + } + { // case: removed -> removed, first > second -> first < second + // should not happen + } + { // case: removed -> removed, first < second -> first > second + // should not happen + } + { // case: removed -> unchanged, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + filesystem::remove(configDir1 / "test1.json"); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: removed -> unchanged, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + filesystem::remove(configDir1 / "test1.json"); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: removed -> unchanged, first > second -> first < second + // should not happen + } + { // case: removed -> unchanged, first < second -> first > second + // should not happen + } +} + +void PipelineConfigWatcherUnittest::TestLoadUnchangedSingletonConfig() { + { // case: unchanged -> added, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[1]); + ClearConfig(); + } + { // case: unchanged -> added, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: unchanged -> added, first > second -> first < second + // should not happen + } + { // case: unchanged -> added, first < second -> first > second + // should not happen + } + { // case: unchanged -> modified, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + fout.open(configDir2 / "test2.json", ios::trunc); + fout << modifiedLessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << modifiedOtherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[1]); + ClearConfig(); + } + { // case: unchanged -> modified, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + fout.open(configDir2 / "test2.json", ios::trunc); + fout << modifiedGreaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << modifiedOtherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: unchanged -> modified, first > second -> first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << modifiedOtherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: unchanged -> modified, first < second -> first > second + PrepareConfig(); + ofstream fout(configDir1 / "test3.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + fout.open(configDir2 / "test2.json", ios::trunc); + fout << modifiedLessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << modifiedOtherConfig; + fout.close(); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test3", allConfigNames[1]); + ClearConfig(); + } + { // case: unchanged -> removed, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + filesystem::remove(configDir2 / "test2.json"); + filesystem::remove(configDir2 / "test-other.json"); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(1U, allConfigNames.size()); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[0]); + ClearConfig(); + } + { // case: unchanged -> removed, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + filesystem::remove(configDir2 / "test2.json"); + filesystem::remove(configDir2 / "test-other.json"); + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(1, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(2, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(1U, allConfigNames.size()); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[0]); + ClearConfig(); + } + { // case: unchanged -> removed, first > second -> first < second + // should not happen + } + { // case: unchanged -> removed, first < second -> first > second + // should not happen + } + { // case: unchanged -> unchanged, first > second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test1", allConfigNames[1]); + ClearConfig(); + } + { // case: unchanged -> unchanged, first < second + PrepareConfig(); + ofstream fout(configDir1 / "test1.json", ios::trunc); + fout << lessPriorityConfig; + fout.close(); + fout.open(configDir2 / "test2.json", ios::trunc); + fout << greaterPriorityConfig; + fout.close(); + fout.open(configDir2 / "test-other.json", ios::trunc); + fout << otherConfig; + fout.close(); + auto diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + APSARA_TEST_EQUAL_FATAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); + + diff = PipelineConfigWatcher::GetInstance()->CheckConfigDiff(); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mAdded.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mModified.size()); + APSARA_TEST_EQUAL_FATAL(0, diff.first.mRemoved.size()); + + PipelineManagerMock::GetInstance()->UpdatePipelines(diff.first); + auto allConfigNames = PipelineManagerMock::GetInstance()->GetAllConfigNames(); + APSARA_TEST_EQUAL_FATAL(2U, allConfigNames.size()); + sort(allConfigNames.begin(), allConfigNames.end()); + APSARA_TEST_EQUAL_FATAL("test-other", allConfigNames[0]); + APSARA_TEST_EQUAL_FATAL("test2", allConfigNames[1]); + ClearConfig(); + } + { // case: unchanged -> unchanged, first > second -> first < second + // should not happen + } + { // case: unchanged -> unchanged, first < second -> first > second + // should not happen + } +} + +UNIT_TEST_CASE(PipelineConfigWatcherUnittest, TestLoadAddedSingletonConfig) +UNIT_TEST_CASE(PipelineConfigWatcherUnittest, TestLoadModifiedSingletonConfig) +UNIT_TEST_CASE(PipelineConfigWatcherUnittest, TestLoadRemovedSingletonConfig) +UNIT_TEST_CASE(PipelineConfigWatcherUnittest, TestLoadUnchangedSingletonConfig) + +} // namespace logtail + +UNIT_TEST_MAIN diff --git a/core/unittest/config/PipelineManagerMock.h b/core/unittest/config/PipelineManagerMock.h new file mode 100644 index 0000000000..b25e4f05b9 --- /dev/null +++ b/core/unittest/config/PipelineManagerMock.h @@ -0,0 +1,58 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "pipeline/PipelineManager.h" + +using namespace std; + +namespace logtail { + +class PipelineMock : public Pipeline { +public: + bool Init(PipelineConfig&& config) { + mConfig = std::move(config.mDetail); + WriteMetrics::GetInstance()->PrepareMetricsRecordRef( + mMetricsRecordRef, + MetricCategory::METRIC_CATEGORY_PIPELINE, + {{METRIC_LABEL_KEY_PROJECT, mContext.GetProjectName()}, {METRIC_LABEL_KEY_PIPELINE_NAME, mName}}); + mStartTime = mMetricsRecordRef.CreateIntGauge(METRIC_PIPELINE_START_TIME); + mSingletonInput = config.mSingletonInput; + mContext.SetCreateTime(config.mCreateTime); + return (*mConfig)["valid"].asBool(); + } +}; + +class PipelineManagerMock : public PipelineManager { +public: + static PipelineManagerMock* GetInstance() { + static PipelineManagerMock instance; + return &instance; + } + + void ClearEnvironment() { + mPipelineNameEntityMap.clear(); + mPluginCntMap.clear(); + } + +private: + shared_ptr BuildPipeline(PipelineConfig&& config) override { + // this should be synchronized with PipelineManager::BuildPipeline, except for the pointer type. + shared_ptr p = make_shared(); + if (!p->Init(std::move(config))) { + return nullptr; + } + return p; + } +}; +} // namespace logtail \ No newline at end of file From 13ab38a6202e645b1dc888b1d4ae68f84a0a0feb Mon Sep 17 00:00:00 2001 From: Tom Yu Date: Thu, 12 Dec 2024 22:23:25 +0800 Subject: [PATCH 5/8] fix enterprise code in ObserveHandler (#1961) --- core/ebpf/handler/ObserveHandler.cpp | 161 +++++++++++++++------------ core/ebpf/handler/ObserveHandler.h | 14 +-- 2 files changed, 96 insertions(+), 79 deletions(-) diff --git a/core/ebpf/handler/ObserveHandler.cpp b/core/ebpf/handler/ObserveHandler.cpp index e3e47bc01c..695568f873 100644 --- a/core/ebpf/handler/ObserveHandler.cpp +++ b/core/ebpf/handler/ObserveHandler.cpp @@ -31,69 +31,80 @@ namespace logtail { namespace ebpf { #define ADD_STATUS_METRICS(METRIC_NAME, FIELD_NAME, VALUE) \ - {if (!inner->FIELD_NAME) return; \ - auto event = group.AddMetricEvent(); \ - for (const auto& tag : measure->tags_) { \ - event->SetTag(tag.first, tag.second); \ - } \ - event->SetTag(std::string("status_code"), std::string(VALUE)); \ - event->SetName(METRIC_NAME); \ - event->SetTimestamp(ts); \ - event->SetValue(UntypedSingleValue{(double)inner->FIELD_NAME});} \ + { \ + if (!inner->FIELD_NAME) { \ + return; \ + } \ + auto* event = group.AddMetricEvent(); \ + for (const auto& tag : measure->tags_) { \ + event->SetTag(tag.first, tag.second); \ + } \ + event->SetTag(std::string("status_code"), std::string(VALUE)); \ + event->SetName(METRIC_NAME); \ + event->SetTimestamp(ts); \ + event->SetValue(UntypedSingleValue{(double)inner->FIELD_NAME}); \ + } #define GENERATE_METRICS(FUNC_NAME, MEASURE_TYPE, INNER_TYPE, METRIC_NAME, FIELD_NAME) \ -void FUNC_NAME(PipelineEventGroup& group, std::unique_ptr& measure, uint64_t ts) { \ - if (measure->type_ != MEASURE_TYPE) return; \ - auto inner = static_cast(measure->inner_measure_.get()); \ - if (!inner->FIELD_NAME) return; \ - auto event = group.AddMetricEvent(); \ - for (const auto& tag : measure->tags_) { \ - event->SetTag(tag.first, tag.second); \ - } \ - event->SetName(METRIC_NAME); \ - event->SetTimestamp(ts); \ - event->SetValue(UntypedSingleValue{(double)inner->FIELD_NAME}); \ -} - -void OtelMeterHandler::handle(std::vector>& measures, uint64_t timestamp) { - if (measures.empty()) return; + void FUNC_NAME(PipelineEventGroup& group, const std::unique_ptr& measure, uint64_t ts) { \ + if (measure->type_ != (MEASURE_TYPE)) { \ + return; \ + } \ + const auto* inner = static_cast(measure->inner_measure_.get()); \ + if (!inner->FIELD_NAME) { \ + return; \ + } \ + auto* event = group.AddMetricEvent(); \ + for (const auto& tag : measure->tags_) { \ + event->SetTag(tag.first, tag.second); \ + } \ + event->SetName(METRIC_NAME); \ + event->SetTimestamp(ts); \ + event->SetValue(UntypedSingleValue{(double)inner->FIELD_NAME}); \ + } - for (const auto& appBatchMeasures : measures) { - PipelineEventGroup eventGroup(std::make_shared()); - for (const auto& measure : appBatchMeasures->measures_) { - auto type = measure->type_; - if (type == MeasureType::MEASURE_TYPE_APP) { - auto inner = static_cast(measure->inner_measure_.get()); - auto event = eventGroup.AddMetricEvent(); - for (const auto& tag : measure->tags_) { - event->SetTag(tag.first, tag.second); + void OtelMeterHandler::handle(const std::vector>& measures, + uint64_t timestamp) { + if (measures.empty()) { + return; + } + for (const auto& appBatchMeasures : measures) { + PipelineEventGroup eventGroup(std::make_shared()); + for (const auto& measure : appBatchMeasures->measures_) { + auto type = measure->type_; + if (type == MeasureType::MEASURE_TYPE_APP) { + auto* inner = static_cast(measure->inner_measure_.get()); + auto* event = eventGroup.AddMetricEvent(); + for (const auto& tag : measure->tags_) { + event->SetTag(tag.first, tag.second); + } + event->SetName("service_requests_total"); + event->SetTimestamp(timestamp); + event->SetValue(UntypedSingleValue{(double)inner->request_total_}); } - event->SetName("service_requests_total"); - event->SetTimestamp(timestamp); - event->SetValue(UntypedSingleValue{(double)inner->request_total_}); + mProcessTotalCnt++; } - mProcessTotalCnt++; - } #ifdef APSARA_UNIT_TEST_MAIN continue; #endif std::unique_ptr item = std::make_unique(std::move(eventGroup), mPluginIdx); if (ProcessQueueManager::GetInstance()->PushQueue(mQueueKey, std::move(item))) { - LOG_WARNING(sLogger, ("configName", mCtx->GetConfigName())("pluginIdx",mPluginIdx)("[Otel Metrics] push queue failed!", "")); + LOG_WARNING(sLogger, + ("configName", mCtx->GetConfigName())("pluginIdx", + mPluginIdx)("[Otel Metrics] push queue failed!", "")); } - } - return; -} - -void OtelSpanHandler::handle(std::vector>& spans) { - if (spans.empty()) return; + } +void OtelSpanHandler::handle(const std::vector>& spans) { + if (spans.empty()) { + return; + } for (const auto& span : spans) { std::shared_ptr sourceBuffer = std::make_shared(); PipelineEventGroup eventGroup(sourceBuffer); for (const auto& x : span->single_spans_) { - auto spanEvent = eventGroup.AddSpanEvent(); + auto* spanEvent = eventGroup.AddSpanEvent(); for (const auto& tag : x->tags_) { spanEvent->SetTag(tag.first, tag.second); } @@ -110,30 +121,35 @@ void OtelSpanHandler::handle(std::vector>& #endif std::unique_ptr item = std::make_unique(std::move(eventGroup), mPluginIdx); if (ProcessQueueManager::GetInstance()->PushQueue(mQueueKey, std::move(item))) { - LOG_WARNING(sLogger, ("configName", mCtx->GetConfigName())("pluginIdx",mPluginIdx)("[Span] push queue failed!", "")); + LOG_WARNING( + sLogger, + ("configName", mCtx->GetConfigName())("pluginIdx", mPluginIdx)("[Span] push queue failed!", "")); } - } - - return; } -void EventHandler::handle(std::vector>& events) { - if (events.empty()) return; - +void EventHandler::handle(const std::vector>& events) { + if (events.empty()) { + return; + } for (const auto& appEvents : events) { - if (!appEvents || appEvents->events_.empty()) continue; + if (!appEvents || appEvents->events_.empty()) { + continue; + } std::shared_ptr sourceBuffer = std::make_shared(); PipelineEventGroup eventGroup(sourceBuffer); for (const auto& event : appEvents->events_) { - if (!event || event->GetAllTags().empty()) continue; - auto logEvent = eventGroup.AddLogEvent(); + if (!event || event->GetAllTags().empty()) { + continue; + } + auto* logEvent = eventGroup.AddLogEvent(); for (const auto& tag : event->GetAllTags()) { logEvent->SetContent(tag.first, tag.second); - auto seconds = std::chrono::duration_cast(std::chrono::nanoseconds(event->GetTimestamp())); + auto seconds + = std::chrono::duration_cast(std::chrono::nanoseconds(event->GetTimestamp())); logEvent->SetTimestamp(seconds.count(), event->GetTimestamp() - seconds.count() * 1e9); } - mProcessTotalCnt ++; + mProcessTotalCnt++; } for (const auto& tag : appEvents->tags_) { eventGroup.SetTag(tag.first, tag.second); @@ -169,9 +185,11 @@ GENERATE_METRICS(GenerateRequestsSlowMetrics, MeasureType::MEASURE_TYPE_APP, App GENERATE_METRICS(GenerateRequestsErrorMetrics, MeasureType::MEASURE_TYPE_APP, AppSingleMeasure, rpc_request_err_count, error_total_) GENERATE_METRICS(GenerateRequestsDurationSumMetrics, MeasureType::MEASURE_TYPE_APP, AppSingleMeasure, rpc_request_status_count, duration_ms_sum_) -void GenerateRequestsStatusMetrics(PipelineEventGroup& group, std::unique_ptr& measure, uint64_t ts) { - if (measure->type_ != MeasureType::MEASURE_TYPE_APP) return; - auto inner = static_cast(measure->inner_measure_.get()); +void GenerateRequestsStatusMetrics(PipelineEventGroup& group, const std::unique_ptr& measure, uint64_t ts) { + if (measure->type_ != MeasureType::MEASURE_TYPE_APP) { + return; + } + const auto* inner = static_cast(measure->inner_measure_.get()); ADD_STATUS_METRICS(rpc_request_status_count, status_2xx_count_, status_2xx_key); ADD_STATUS_METRICS(rpc_request_status_count, status_3xx_count_, status_3xx_key); ADD_STATUS_METRICS(rpc_request_status_count, status_4xx_count_, status_4xx_key); @@ -195,15 +213,16 @@ GENERATE_METRICS(GenerateTcpRecvBytesTotalMetrics, MeasureType::MEASURE_TYPE_NET GENERATE_METRICS(GenerateTcpSendPktsTotalMetrics, MeasureType::MEASURE_TYPE_NET, NetSingleMeasure, npm_send_pkt_total, send_pkt_total_) GENERATE_METRICS(GenerateTcpSendBytesTotalMetrics, MeasureType::MEASURE_TYPE_NET, NetSingleMeasure, npm_send_byte_total, send_byte_total_) -void ArmsSpanHandler::handle(std::vector>& spans) { - if (spans.empty()) return; - +void ArmsSpanHandler::handle(const std::vector>& spans) { + if (spans.empty()) { + return; + } for (const auto& span : spans) { std::shared_ptr sourceBuffer = std::make_shared(); PipelineEventGroup eventGroup(sourceBuffer); eventGroup.SetTag(app_id_key, span->app_id_); for (const auto& x : span->single_spans_) { - auto spanEvent = eventGroup.AddSpanEvent(); + auto* spanEvent = eventGroup.AddSpanEvent(); for (const auto& tag : x->tags_) { spanEvent->SetTag(tag.first, tag.second); } @@ -223,13 +242,13 @@ void ArmsSpanHandler::handle(std::vector>& LOG_WARNING(sLogger, ("configName", mCtx->GetConfigName())("pluginIdx",mPluginIdx)("[Span] push queue failed!", "")); } } - - return; } -void ArmsMeterHandler::handle(std::vector>& measures, uint64_t timestamp) { - if (measures.empty()) return; - +void ArmsMeterHandler::handle(const std::vector>& measures, + uint64_t timestamp) { + if (measures.empty()) { + return; + } for (const auto& appBatchMeasures : measures) { std::shared_ptr sourceBuffer = std::make_shared();; PipelineEventGroup eventGroup(sourceBuffer); @@ -264,9 +283,7 @@ void ArmsMeterHandler::handle(std::vectorPushQueue(mQueueKey, std::move(item))) { LOG_WARNING(sLogger, ("configName", mCtx->GetConfigName())("pluginIdx",mPluginIdx)("[Metrics] push queue failed!", "")); } - } - return; } #endif diff --git a/core/ebpf/handler/ObserveHandler.h b/core/ebpf/handler/ObserveHandler.h index d70cf33470..a1cc53f711 100644 --- a/core/ebpf/handler/ObserveHandler.h +++ b/core/ebpf/handler/ObserveHandler.h @@ -26,31 +26,31 @@ class MeterHandler : public AbstractHandler { public: MeterHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : AbstractHandler(ctx, key, idx) {} - virtual void handle(std::vector>&, uint64_t) = 0; + virtual void handle(const std::vector>&, uint64_t) = 0; }; class OtelMeterHandler : public MeterHandler { public: OtelMeterHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : MeterHandler(ctx, key, idx) {} - void handle(std::vector>& measures, uint64_t timestamp) override; + void handle(const std::vector>& measures, uint64_t timestamp) override; }; class SpanHandler : public AbstractHandler { public: SpanHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : AbstractHandler(ctx, key, idx) {} - virtual void handle(std::vector>&) = 0; + virtual void handle(const std::vector>&) = 0; }; class OtelSpanHandler : public SpanHandler { public: OtelSpanHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : SpanHandler(ctx, key, idx) {} - void handle(std::vector>&) override; + void handle(const std::vector>&) override; }; class EventHandler : public AbstractHandler { public: EventHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : AbstractHandler(ctx, key, idx) {} - void handle(std::vector>&); + void handle(const std::vector>&); }; #ifdef __ENTERPRISE__ @@ -58,13 +58,13 @@ class EventHandler : public AbstractHandler { class ArmsMeterHandler : public MeterHandler { public: ArmsMeterHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : MeterHandler(ctx, key, idx) {} - void handle(std::vector>& measures, uint64_t timestamp) override; + void handle(const std::vector>& measures, uint64_t timestamp) override; }; class ArmsSpanHandler : public SpanHandler { public: ArmsSpanHandler(const logtail::PipelineContext* ctx, QueueKey key, uint32_t idx) : SpanHandler(ctx, key, idx) {} - void handle(std::vector>&) override; + void handle(const std::vector>&) override; }; #endif From 156dd824588178c5db1c0332df2f088e04844de2 Mon Sep 17 00:00:00 2001 From: linrunqi08 <90741255+linrunqi08@users.noreply.github.com> Date: Fri, 13 Dec 2024 08:47:41 +0800 Subject: [PATCH 6/8] Revert "Fix the issue of missing container information caused by the event sequence when docker compose is repeatedly up. (#1875)" (#1962) This reverts commit 9005b10aa10bfeda76520e670c1d26706373309d. --- pkg/helper/docker_center.go | 55 ++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 32 deletions(-) diff --git a/pkg/helper/docker_center.go b/pkg/helper/docker_center.go index 9d481160c8..95798919e5 100644 --- a/pkg/helper/docker_center.go +++ b/pkg/helper/docker_center.go @@ -16,7 +16,6 @@ package helper import ( "context" - "errors" "hash/fnv" "path" "regexp" @@ -1023,35 +1022,6 @@ func (dc *DockerCenter) updateContainer(id string, container *DockerInfoDetail) dc.refreshLastUpdateMapTime() } -func (dc *DockerCenter) inspectOneContainer(containerID string) (types.ContainerJSON, error) { - var err error - var containerDetail types.ContainerJSON - for idx := 0; idx < 3; idx++ { - if containerDetail, err = dc.client.ContainerInspect(context.Background(), containerID); err == nil { - break - } - time.Sleep(time.Second * 5) - } - if err != nil { - dc.setLastError(err, "inspect container error "+containerID) - return types.ContainerJSON{}, err - } - if !ContainerProcessAlive(containerDetail.State.Pid) { - containerDetail.State.Status = ContainerStatusExited - finishedAt := containerDetail.State.FinishedAt - finishedAtTime, _ := time.Parse(time.RFC3339, finishedAt) - now := time.Now() - duration := now.Sub(finishedAtTime) - if duration >= ContainerInfoDeletedTimeout { - errMsg := "inspect time out container " + containerID - err = errors.New(errMsg) - dc.setLastError(err, errMsg) - return types.ContainerJSON{}, err - } - } - return containerDetail, nil -} - func (dc *DockerCenter) fetchAll() error { dc.containerStateLock.Lock() defer dc.containerStateLock.Unlock() @@ -1065,9 +1035,26 @@ func (dc *DockerCenter) fetchAll() error { for _, container := range containers { var containerDetail types.ContainerJSON - containerDetail, err = dc.inspectOneContainer(container.ID) + for idx := 0; idx < 3; idx++ { + if containerDetail, err = dc.client.ContainerInspect(context.Background(), container.ID); err == nil { + break + } + time.Sleep(time.Second * 5) + } if err == nil { + if !ContainerProcessAlive(containerDetail.State.Pid) { + containerDetail.State.Status = ContainerStatusExited + finishedAt := containerDetail.State.FinishedAt + finishedAtTime, _ := time.Parse(time.RFC3339, finishedAt) + now := time.Now() + duration := now.Sub(finishedAtTime) + if duration >= ContainerInfoDeletedTimeout { + continue + } + } containerMap[container.ID] = dc.CreateInfoDetail(containerDetail, envConfigPrefix, false) + } else { + dc.setLastError(err, "inspect container error "+container.ID) } } dc.updateContainers(containerMap) @@ -1078,10 +1065,14 @@ func (dc *DockerCenter) fetchAll() error { func (dc *DockerCenter) fetchOne(containerID string, tryFindSandbox bool) error { dc.containerStateLock.Lock() defer dc.containerStateLock.Unlock() - containerDetail, err := dc.inspectOneContainer(containerID) + containerDetail, err := dc.client.ContainerInspect(context.Background(), containerID) if err != nil { + dc.setLastError(err, "inspect container error "+containerID) return err } + if containerDetail.State.Status == ContainerStatusRunning && !ContainerProcessAlive(containerDetail.State.Pid) { + containerDetail.State.Status = ContainerStatusExited + } // docker 场景下 // tryFindSandbox如果是false, 那么fetchOne的地方应该会调用两次,一次是sandbox的id,一次是业务容器的id // tryFindSandbox如果是true, 调用的地方只会有一个业务容器的id,然后依赖fetchOne内部把sandbox信息补全 From 8d42203dd43146b0e8bc286a0a78c3f7e79d7599 Mon Sep 17 00:00:00 2001 From: Takuka0311 <1914426213@qq.com> Date: Fri, 13 Dec 2024 15:05:47 +0800 Subject: [PATCH 7/8] change file name (#1963) --- docs/cn/SUMMARY.md | 36 +++++++++---------- ...md => awesome-ilogtail-developer-guide.md} | 0 ...md => awesome-ilogtail-getting-started.md} | 0 ...cases.md => awesome-ilogtail-use-cases.md} | 0 .../{ilogtail.md => awesome-ilogtail.md} | 6 +++- ...de.md => config-server-developer-guide.md} | 0 .../{quick-start.md => usage-instructions.md} | 0 docs/cn/contributing/CONTRIBUTING.md | 26 +++++++------- .../log-protocol/How-to-add-new-protocol.md | 2 +- .../{README.md => log-protocol.md} | 2 +- .../{custom_single.md => custom-single.md} | 0 ...ummer-ospp-2024-projects-config-server.md} | 0 ... summer-ospp-2024-projects-ilogtail-io.md} | 0 ...README.md => summer-ospp-2024-projects.md} | 4 +-- .../{README.md => summer-ospp-2024.md} | 0 .../release-notes/release-notes.md | 2 +- .../aggregator/{README.md => aggregators.md} | 2 +- .../extension/{README.md => extensions.md} | 0 ...lusher-kafka_v2.md => flusher-kafka-v2.md} | 0 .../flusher/extended/flusher-pulsar.md | 2 +- .../flusher/{README.md => flushers.md} | 0 .../cn/plugins/input/{README.md => inputs.md} | 2 +- docs/cn/plugins/overview.md | 2 +- .../processor/{README.md => processors.md} | 2 +- 24 files changed, 46 insertions(+), 42 deletions(-) rename docs/cn/awesome-ilogtail/{developer-guide.md => awesome-ilogtail-developer-guide.md} (100%) rename docs/cn/awesome-ilogtail/{getting-started.md => awesome-ilogtail-getting-started.md} (100%) rename docs/cn/awesome-ilogtail/{use-cases.md => awesome-ilogtail-use-cases.md} (100%) rename docs/cn/awesome-ilogtail/{ilogtail.md => awesome-ilogtail.md} (99%) rename docs/cn/config-server/{developer-guide.md => config-server-developer-guide.md} (100%) rename docs/cn/config-server/{quick-start.md => usage-instructions.md} (100%) rename docs/cn/developer-guide/log-protocol/{README.md => log-protocol.md} (95%) rename docs/cn/developer-guide/log-protocol/protocol-spec/{custom_single.md => custom-single.md} (100%) rename docs/cn/events/summer-ospp-2024/projects/{config-server.md => summer-ospp-2024-projects-config-server.md} (100%) rename docs/cn/events/summer-ospp-2024/projects/{ilogtail-io.md => summer-ospp-2024-projects-ilogtail-io.md} (100%) rename docs/cn/events/summer-ospp-2024/projects/{README.md => summer-ospp-2024-projects.md} (56%) rename docs/cn/events/summer-ospp-2024/{README.md => summer-ospp-2024.md} (100%) rename docs/cn/plugins/aggregator/{README.md => aggregators.md} (85%) rename docs/cn/plugins/extension/{README.md => extensions.md} (100%) rename docs/cn/plugins/flusher/extended/{flusher-kafka_v2.md => flusher-kafka-v2.md} (100%) rename docs/cn/plugins/flusher/{README.md => flushers.md} (100%) rename docs/cn/plugins/input/{README.md => inputs.md} (99%) rename docs/cn/plugins/processor/{README.md => processors.md} (97%) diff --git a/docs/cn/SUMMARY.md b/docs/cn/SUMMARY.md index c56199a93f..936e4fe779 100644 --- a/docs/cn/SUMMARY.md +++ b/docs/cn/SUMMARY.md @@ -38,7 +38,7 @@ * [概览](plugins/overview.md) * [版本管理](plugins/stability-level.md) * 输入 - * [什么是输入插件](plugins/input/README.md) + * [什么是输入插件](plugins/input/inputs.md) * 原生插件 * [文本日志](plugins/input/native/input-file.md) * [容器标准输出](plugins/input/native/input-container-stdio.md) @@ -69,7 +69,7 @@ * [【Debug】Mock数据-Service](plugins/input/extended/service-mock.md) * [【Debug】文本日志](plugins/input/extended/metric-debug-file.md) * 处理 - * [什么是处理插件](plugins/processor/README.md) + * [什么是处理插件](plugins/processor/processors.md) * SPL 处理 * [SPL 处理](plugins/processor/spl/processor-spl-native.md) * 原生插件 @@ -102,13 +102,13 @@ * [多行切分](plugins/processor/extended/processor-split-log-regex.md) * [字符串替换](plugins/processor/extended/processor-string-replace.md) * 聚合 - * [什么是聚合插件](plugins/aggregator/README.md) + * [什么是聚合插件](plugins/aggregator/aggregators.md) * [基础聚合](plugins/aggregator/aggregator-base.md) * [按上下文分组](plugins/aggregator/aggregator-context.md) * [按Key分组](plugins/aggregator/aggregator-content-value-group.md) * [按GroupMetadata分组](plugins/aggregator/aggregator-metadata-group.md) * 输出 - * [什么是输出插件](plugins/flusher/README.md) + * [什么是输出插件](plugins/flusher/flushers.md) * 原生插件 * [SLS](plugins/flusher/native/flusher-sls.md) * [【Debug】Blackhole](plugins/flusher/native/flusher-blackhole.md) @@ -117,7 +117,7 @@ * [ClickHouse](plugins/flusher/extended/flusher-clickhouse.md) * [ElasticSearch](plugins/flusher/extended/flusher-elasticsearch.md) * [HTTP](plugins/flusher/extended/flusher-http.md) - * [kafkaV2](plugins/flusher/extended/flusher-kafka_v2.md) + * [kafkaV2](plugins/flusher/extended/flusher-kafka-v2.md) * [Kafka(Deprecated)](plugins/flusher/extended/flusher-kafka.md) * [OTLP日志](plugins/flusher/extended/flusher-otlp.md) * [Prometheus](plugins/flusher/extended/flusher-prometheus.md) @@ -125,7 +125,7 @@ * [标准输出/文件](plugins/flusher/extended/flusher-stdout.md) * [Loki](plugins/flusher/extended/loki.md) * 扩展 - * [什么是扩展插件](plugins/extension/README.md) + * [什么是扩展插件](plugins/extension/extensions.md) * [BasicAuth鉴权](plugins/extension/ext-basicauth.md) * [协议解码/反序列化](plugins/extension/ext-default-decoder.md) * [协议编码/序列化](plugins/extension/ext-default-encoder.md) @@ -142,12 +142,12 @@ * [代码风格](developer-guide/codestyle.md) * [数据模型](developer-guide/data-model.md) * 日志协议 - * [什么是日志协议](developer-guide/log-protocol/README.md) + * [什么是日志协议](developer-guide/log-protocol/log-protocol.md) * [协议转换](developer-guide/log-protocol/converter.md) * [增加新的日志协议](developer-guide/log-protocol/How-to-add-new-protocol.md) * 协议 * [sls协议](developer-guide/log-protocol/protocol-spec/sls.md) - * [单条协议](developer-guide/log-protocol/protocol-spec/custom_single.md) + * [单条协议](developer-guide/log-protocol/protocol-spec/custom-single.md) * [raw协议](developer-guide/log-protocol/protocol-spec/raw.md) * 插件开发 * [开源插件开发引导](developer-guide/plugin-development/plugin-development-guide.md) @@ -193,22 +193,22 @@ ## 管控工具 -* [使用介绍](config-server/quick-start.md) +* [使用介绍](config-server/usage-instructions.md) * [通信协议](config-server/communication-protocol.md) -* [开发指南](config-server/developer-guide.md) +* [开发指南](config-server/config-server-developer-guide.md) ## 社区活动 * 开源之夏 2024 - * [开源之夏 2024 活动介绍](events/summer-ospp-2024/README.md) + * [开源之夏 2024 活动介绍](events/summer-ospp-2024/summer-ospp-2024.md) * 项目 - * [iLogtail 社区项目介绍](events/summer-ospp-2024/projects/README.md) - * [iLogtail 数据吞吐性能优化](events/summer-ospp-2024/projects/ilogtail-io.md) - * [ConfigServer 能力升级 + 体验优化(全栈)](events/summer-ospp-2024/projects/config-server.md) + * [iLogtail 社区项目介绍](events/summer-ospp-2024/projects/summer-ospp-2024-projects.md) + * [iLogtail 数据吞吐性能优化](events/summer-ospp-2024/projects/summer-ospp-2024-projects-ilogtail-io.md) + * [ConfigServer 能力升级 + 体验优化(全栈)](events/summer-ospp-2024/projects/summer-ospp-2024-projects-config-server.md) ## Awesome iLogtail -* [走近iLogtail社区版](awesome-ilogtail/ilogtail.md) -* [iLogtail社区版使用入门](awesome-ilogtail/getting-started.md) -* [iLogtail社区版开发者指南](awesome-ilogtail/developer-guide.md) -* [iLogtail社区版使用案例](awesome-ilogtail/use-cases.md) +* [走近iLogtail社区版](awesome-ilogtail/awesome-ilogtail.md) +* [iLogtail社区版使用入门](awesome-ilogtail/awesome-ilogtail-getting-started.md) +* [iLogtail社区版开发者指南](awesome-ilogtail/awesome-ilogtail-developer-guide.md) +* [iLogtail社区版使用案例](awesome-ilogtail/awesome-ilogtail-use-cases.md) diff --git a/docs/cn/awesome-ilogtail/developer-guide.md b/docs/cn/awesome-ilogtail/awesome-ilogtail-developer-guide.md similarity index 100% rename from docs/cn/awesome-ilogtail/developer-guide.md rename to docs/cn/awesome-ilogtail/awesome-ilogtail-developer-guide.md diff --git a/docs/cn/awesome-ilogtail/getting-started.md b/docs/cn/awesome-ilogtail/awesome-ilogtail-getting-started.md similarity index 100% rename from docs/cn/awesome-ilogtail/getting-started.md rename to docs/cn/awesome-ilogtail/awesome-ilogtail-getting-started.md diff --git a/docs/cn/awesome-ilogtail/use-cases.md b/docs/cn/awesome-ilogtail/awesome-ilogtail-use-cases.md similarity index 100% rename from docs/cn/awesome-ilogtail/use-cases.md rename to docs/cn/awesome-ilogtail/awesome-ilogtail-use-cases.md diff --git a/docs/cn/awesome-ilogtail/ilogtail.md b/docs/cn/awesome-ilogtail/awesome-ilogtail.md similarity index 99% rename from docs/cn/awesome-ilogtail/ilogtail.md rename to docs/cn/awesome-ilogtail/awesome-ilogtail.md index aed421a46f..b63abd09a6 100644 --- a/docs/cn/awesome-ilogtail/ilogtail.md +++ b/docs/cn/awesome-ilogtail/awesome-ilogtail.md @@ -1,5 +1,7 @@ # 走近iLogtail社区版 + ## 开源历程 + * 2024-08 [iLogtail 开源两周年:感恩遇见,畅想未来](https://mp.weixin.qq.com/s/RoFjoYlPLG1yOzDGc7vqIQ) * 2023-02 [鲲鹏展翅凌云志:iLogtail社区2022年度开源报告](https://mp.weixin.qq.com/s/6luD7VUFd_0aaeyUBAShkw) * 2024-02 [你好,iLogtail 2.0!](https://developer.aliyun.com/article/1441630) @@ -7,6 +9,7 @@ * 2022-08 [iLogtail开源之路](https://mp.weixin.qq.com/s/5j5KJe9BmpZ1tdb-KCx_CQ) ## 技术分享 + * 2024-08 [软件测试之道 -- 做一个有匠心的程序员!](https://mp.weixin.qq.com/s/ktEMOcXBopFiX9NIN3chHg) 看iLogtail如何做测试设计 * 2024-08 [代码整洁之道--告别码农,做一个有思想的程序员!](https://mp.weixin.qq.com/s/tK0ZyRxKBGpCqIw16SPSxg) 看iLogtail如何追求代码整洁 * 2024-04 [破浪前行:iLogtail十年老架构如何浴火重生](https://developer.aliyun.com/article/1484844) @@ -18,8 +21,9 @@ * 2022-12 [阿里十年技术沉淀|深度解析百PB级数据总线技术](https://mp.weixin.qq.com/s/NKoTCM5o-Rs_83Wakk9yCw) ## 最佳实践 + * 2022-09 [零信任策略下K8s安全监控最佳实践](https://mp.weixin.qq.com/s/wYUNsGaWEnQZ0BVxsQORbA) ## 其他 -* 2021-09 [您有一份来自iLogtail社区的礼物待查收](https://mp.weixin.qq.com/s/fyWwnKR1I4jgNiX30Wu-Vg) +* 2021-09 [您有一份来自iLogtail社区的礼物待查收](https://mp.weixin.qq.com/s/fyWwnKR1I4jgNiX30Wu-Vg) diff --git a/docs/cn/config-server/developer-guide.md b/docs/cn/config-server/config-server-developer-guide.md similarity index 100% rename from docs/cn/config-server/developer-guide.md rename to docs/cn/config-server/config-server-developer-guide.md diff --git a/docs/cn/config-server/quick-start.md b/docs/cn/config-server/usage-instructions.md similarity index 100% rename from docs/cn/config-server/quick-start.md rename to docs/cn/config-server/usage-instructions.md diff --git a/docs/cn/contributing/CONTRIBUTING.md b/docs/cn/contributing/CONTRIBUTING.md index 37625f323a..c4e1f0b7df 100644 --- a/docs/cn/contributing/CONTRIBUTING.md +++ b/docs/cn/contributing/CONTRIBUTING.md @@ -1,12 +1,12 @@ # 贡献指南 -欢迎来到 iLogtail 的社区!感谢您为iLogtail贡献代码、文档及案例! +欢迎来到 LoongCollector 的社区!感谢您为 LoongCollector 贡献代码、文档及案例! -iLogtail 自从开源以来,受到了很多社区同学的关注。社区的每一个 Issue、每一个 pull request (PR),都是在为 iLogtail 的发展添砖加瓦。衷心地希望越来越多的社区同学能参与到 iLogtail 项目中来,跟我们一起把 iLogtail 做好。 +LoongCollector 自从开源以来,受到了很多社区同学的关注。社区的每一个 Issue、每一个 pull request (PR),都是在为 LoongCollector 的发展添砖加瓦。衷心地希望越来越多的社区同学能参与到 LoongCollector 项目中来,跟我们一起把 LoongCollector 做好。 ## 行为准则 -参与 iLogtail 社区贡献,请阅读并同意遵守[阿里巴巴开源行为准则](https://github.com/alibaba/community/blob/master/CODE_OF_CONDUCT_zh.md),共同营造一个开放透明且友好的开源社区环境。 +参与 LoongCollector 社区贡献,请阅读并同意遵守[阿里巴巴开源行为准则](https://github.com/alibaba/community/blob/master/CODE_OF_CONDUCT_zh.md),共同营造一个开放透明且友好的开源社区环境。 ## 贡献流程 @@ -38,14 +38,14 @@ iLogtail 自从开源以来,受到了很多社区同学的关注。社区的 设计定稿后,即可进行开发流程。下面是开源贡献者常用的工作流(workflow): -1. 将 [iLogtail](https://github.com/alibaba/loongcollector) 仓库 fork 到个人 GitHub 下。 +1. 将 [LoongCollector](https://github.com/alibaba/loongcollector) 仓库 fork 到个人 GitHub 下。 2. 基于个人 fork 分支进行开发、测试工作。详细流程: - 1. 保持个人 main 分支跟 iLogtail 主仓库 main 分支及时同步。 + 1. 保持个人 main 分支跟 LoongCollector 主仓库 main 分支及时同步。 2. 将 fork 后的个人仓库 clone 到本地。 3. 创建新的开发分支,并进行开发。**请确保对应的变更都有 UnitTest 或 E2E 测试**。 4. 在本地提交变更。**注意 commit log 保持简练、规范,提交的 email 需要和 GitHub 的 email 保持一致。** 5. 将变更 push 到远程个人分支。 -3. 向 iLogtail main 分支创建一个 [pull request (PR)](https://github.com/alibaba/loongcollector/pulls),在进行较大的变更的时候请确保 PR 有一个对应的 Issue,并进行关联。 +3. 向 LoongCollector main 分支创建一个 [pull request (PR)](https://github.com/alibaba/loongcollector/pulls),在进行较大的变更的时候请确保 PR 有一个对应的 Issue,并进行关联。 1. 发起 PR 前请进行如下规范性检查:[代码/文档风格](../developer-guide/codestyle.md)、[编码规范](../developer-guide/code-check/check-codestyle.md)、[依赖包许可证](../developer-guide/code-check/check-dependency-license.md)、[文件许可证](../developer-guide/code-check/check-license.md)。 2. 为了更好的进行版本管理,对于一些独立的特性或者关键BUG修复,请提交[Changelog](https://github.com/alibaba/loongcollector/blob/main/CHANGELOG.md). @@ -93,22 +93,22 @@ iLogtail 自从开源以来,受到了很多社区同学的关注。社区的 ### 案例分享 -我们也欢迎您分享任何关于 iLogtail 的使用案例。我们在知乎建立了专栏 [iLogtail社区](https://www.zhihu.com/column/c_1533139823409270785),欢迎大家投稿,分享 iLogtail 的使用案例。 +我们也欢迎您分享任何关于 LoongCollector 的使用案例。我们在知乎建立了专栏 [iLogtail社区](https://www.zhihu.com/column/c_1533139823409270785),欢迎大家投稿,分享 LoongCollector 的使用案例。 1. 在知乎写文章,例如[一文搞懂 SAE 日志采集架构](https://zhuanlan.zhihu.com/p/557591446)。 2. 推荐自己的文章到“iLogtail社区”专栏。 -3. GitHub上修改[use-cases.md](https://github.com/alibaba/loongcollector/blob/main/docs/cn/awesome-ilogtail/use-cases.md)并发起PR,Label选awesome ilogtail。 +3. GitHub上修改[use-cases.md](https://github.com/alibaba/loongcollector/blob/main/docs/cn/awesome-ilogtail/awesome-ilogtail-use-cases.md)并发起PR,Label选awesome ilogtail。 ### 参与社区讨论 -如果您在使用 iLogtail 中遇到任何问题,欢迎到 [Discussions](https://github.com/alibaba/loongcollector/discussions) 进行交流互动。也欢迎在这里帮助其他使用者解答一些使用中的问题。 +如果您在使用 LoongCollector 中遇到任何问题,欢迎到 [Discussions](https://github.com/alibaba/loongcollector/discussions) 进行交流互动。也欢迎在这里帮助其他使用者解答一些使用中的问题。 Discussion 分类: -* Announcements:iLogtail官方公告。 -* Help:使用 iLogtail 中遇到问题,想在社区寻求帮助。 -* Ideas:关于 iLogtail 的一些想法,欢迎随时交流。 -* Show and tell:可以在这里展示任何跟 iLogtail 相关的工作,例如一些小工具等。 +* Announcements:LoongCollector 官方公告。 +* Help:使用 LoongCollector 中遇到问题,想在社区寻求帮助。 +* Ideas:关于 LoongCollector 的一些想法,欢迎随时交流。 +* Show and tell:可以在这里展示任何跟 LoongCollector 相关的工作,例如一些小工具等。 ## 联系我们 diff --git a/docs/cn/developer-guide/log-protocol/How-to-add-new-protocol.md b/docs/cn/developer-guide/log-protocol/How-to-add-new-protocol.md index 7517792d15..138bc2ebd6 100644 --- a/docs/cn/developer-guide/log-protocol/How-to-add-new-protocol.md +++ b/docs/cn/developer-guide/log-protocol/How-to-add-new-protocol.md @@ -52,4 +52,4 @@ - 在`c.DoWithSelectedFields`方法的`switch`语句中新增一个`case`子句,`case`名为协议名,子句内容为`return c.ConvertToXXXProtocolLogs(logGroup, targetFields)`,其中涉及的函数即为第2步中编写的函数 - 在`c.ToByteStreamWithSelectedFields`方法的`switch`语句中新增一个`case`子句,`case`名为协议名,子句内容为`return c.ConvertToXXXProtocolStream(logGroup, targetFields)`,其中涉及的函数即为第2步中编写的函数 -4. 在`./doc/cn/developer-guide/log-protocol/converter.md`的附录、`README.md`中增加协议相关内容,并在`./doc/cn/developer-guide/log-protocol/protocol-spec`文件夹下新增`.md`文件描述具体的协议形式。 +4. 在`./doc/cn/developer-guide/log-protocol/converter.md`的附录、`log-protocol.md`中增加协议相关内容,并在`./doc/cn/developer-guide/log-protocol/protocol-spec`文件夹下新增`.md`文件描述具体的协议形式。 diff --git a/docs/cn/developer-guide/log-protocol/README.md b/docs/cn/developer-guide/log-protocol/log-protocol.md similarity index 95% rename from docs/cn/developer-guide/log-protocol/README.md rename to docs/cn/developer-guide/log-protocol/log-protocol.md index 0db26822db..f0de10536d 100644 --- a/docs/cn/developer-guide/log-protocol/README.md +++ b/docs/cn/developer-guide/log-protocol/log-protocol.md @@ -7,6 +7,6 @@ LoongCollector 的日志数据默认以sls自定义协议的形式与外部进 | 协议类型 | 协议名称 | 支持的编码方式 | |-------|--------------------------------------------------------------------------------------------------|---------------| | 标准协议 | [sls协议](protocol-spec/sls.md) | json、protobuf | -| 自定义协议 | [单条协议](protocol-spec/custom_single.md) | json | +| 自定义协议 | [单条协议](protocol-spec/custom-single.md) | json | | 标准协议 | [Influxdb协议](https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_reference/) | custom | | 字节流协议 | [raw协议](protocol-spec/raw.md) | custom | diff --git a/docs/cn/developer-guide/log-protocol/protocol-spec/custom_single.md b/docs/cn/developer-guide/log-protocol/protocol-spec/custom-single.md similarity index 100% rename from docs/cn/developer-guide/log-protocol/protocol-spec/custom_single.md rename to docs/cn/developer-guide/log-protocol/protocol-spec/custom-single.md diff --git a/docs/cn/events/summer-ospp-2024/projects/config-server.md b/docs/cn/events/summer-ospp-2024/projects/summer-ospp-2024-projects-config-server.md similarity index 100% rename from docs/cn/events/summer-ospp-2024/projects/config-server.md rename to docs/cn/events/summer-ospp-2024/projects/summer-ospp-2024-projects-config-server.md diff --git a/docs/cn/events/summer-ospp-2024/projects/ilogtail-io.md b/docs/cn/events/summer-ospp-2024/projects/summer-ospp-2024-projects-ilogtail-io.md similarity index 100% rename from docs/cn/events/summer-ospp-2024/projects/ilogtail-io.md rename to docs/cn/events/summer-ospp-2024/projects/summer-ospp-2024-projects-ilogtail-io.md diff --git a/docs/cn/events/summer-ospp-2024/projects/README.md b/docs/cn/events/summer-ospp-2024/projects/summer-ospp-2024-projects.md similarity index 56% rename from docs/cn/events/summer-ospp-2024/projects/README.md rename to docs/cn/events/summer-ospp-2024/projects/summer-ospp-2024-projects.md index 245bea106a..43de08c703 100644 --- a/docs/cn/events/summer-ospp-2024/projects/README.md +++ b/docs/cn/events/summer-ospp-2024/projects/summer-ospp-2024-projects.md @@ -2,5 +2,5 @@ iLogtail 为本次开源之夏贡献了两个项目。宣传视频:[iLogtail 社区开源之夏宣讲介绍](https://www.bilibili.com/video/BV1JH4y1V7FS/?vd_source=b01dd5670462ce34eca76313ec727f4d) -* [iLogtail 数据吞吐性能优化](ilogtail-io.md) -* [ConfigServer 能力升级 + 体验优化(全栈)](config-server.md) +* [iLogtail 数据吞吐性能优化](summer-ospp-2024-projects-ilogtail-io.md) +* [ConfigServer 能力升级 + 体验优化(全栈)](summer-ospp-2024-projects-config-server.md) diff --git a/docs/cn/events/summer-ospp-2024/README.md b/docs/cn/events/summer-ospp-2024/summer-ospp-2024.md similarity index 100% rename from docs/cn/events/summer-ospp-2024/README.md rename to docs/cn/events/summer-ospp-2024/summer-ospp-2024.md diff --git a/docs/cn/installation/release-notes/release-notes.md b/docs/cn/installation/release-notes/release-notes.md index 110b65ecfd..b9c31fcd79 100644 --- a/docs/cn/installation/release-notes/release-notes.md +++ b/docs/cn/installation/release-notes/release-notes.md @@ -38,7 +38,7 @@ LoongCollector 是一款集卓越性能、超强稳定性和灵活可编程性 3. 更灵活的编程管道 * C++ 全面插件化(已发布):同时提供了充足的组件可供插件自由组合,极大地方便社区新增高性能的输入和输出能力,C++原生插件开发指南详见[如何开发原生Input插件](../../developer-guide/plugin-development/native-plugins/how-to-write-native-input-plugins.md)和[如何开发原生Flusher插件](../../developer-guide/plugin-development/native-plugins/how-to-write-native-flusher-plugins.md)。 - * C++ Input 可使用原生 Processor(已发布):C++ Input插件能够与原生及扩展的Processor插件配合使用,并支持SPL插件。这意味着C++ Input插件不仅可以利用原生Processor提供的高性能来解析日志,还能通过丰富的扩展Processor功能进一步处理日志,具体详情请参阅文档[什么是处理插件](../../plugins/processor/README.md)和[什么是输入插件](../../plugins/input/README.md)。 + * C++ Input 可使用原生 Processor(已发布):C++ Input插件能够与原生及扩展的Processor插件配合使用,并支持SPL插件。这意味着C++ Input插件不仅可以利用原生Processor提供的高性能来解析日志,还能通过丰富的扩展Processor功能进一步处理日志,具体详情请参阅文档[什么是处理插件](../../plugins/processor/processors.md)和[什么是输入插件](../../plugins/input/inputs.md)。 * Golang Input 可使用原生 Processor (开发中):Go Input 支持多种灵活的数据源输入,而原生处理插件提供了高性能的数据处理。结合两者的优势,可以构建出既高效又能适应多种应用场景的数据处理流水线。详见 [Issue](https://github.com/alibaba/loongcollector/issues/1917)。 * SPL 处理模式(已发布):SPL 处理模式支持用户通过 SPL 语句实现对数据的处理。无需编写代码开发插件,极大地拓展了 LoongCollector 可应用的场景。详见文档 [SPL 处理](../../plugins/processor/spl/processor-spl-native.md)。 diff --git a/docs/cn/plugins/aggregator/README.md b/docs/cn/plugins/aggregator/aggregators.md similarity index 85% rename from docs/cn/plugins/aggregator/README.md rename to docs/cn/plugins/aggregator/aggregators.md index b77198d540..f455328e52 100644 --- a/docs/cn/plugins/aggregator/README.md +++ b/docs/cn/plugins/aggregator/aggregators.md @@ -4,6 +4,6 @@ * 使用了除[SLS输出插件](../flusher/native/flusher-sls.md) -* 使用了[SLS输出插件](../flusher/native/flusher-sls.md),且同时使用了[扩展处理插件](../processor/README.md) +* 使用了[SLS输出插件](../flusher/native/flusher-sls.md),且同时使用了[扩展处理插件](../processor/processors.md) 对于上述场景,如果用户的采集配置中未指定聚合插件,则`iLogtail`会使用默认聚合插件,即[上下文聚合插件](aggregator-context.md)。 diff --git a/docs/cn/plugins/extension/README.md b/docs/cn/plugins/extension/extensions.md similarity index 100% rename from docs/cn/plugins/extension/README.md rename to docs/cn/plugins/extension/extensions.md diff --git a/docs/cn/plugins/flusher/extended/flusher-kafka_v2.md b/docs/cn/plugins/flusher/extended/flusher-kafka-v2.md similarity index 100% rename from docs/cn/plugins/flusher/extended/flusher-kafka_v2.md rename to docs/cn/plugins/flusher/extended/flusher-kafka-v2.md diff --git a/docs/cn/plugins/flusher/extended/flusher-pulsar.md b/docs/cn/plugins/flusher/extended/flusher-pulsar.md index a60b2e45aa..b7ef0c1d85 100644 --- a/docs/cn/plugins/flusher/extended/flusher-pulsar.md +++ b/docs/cn/plugins/flusher/extended/flusher-pulsar.md @@ -123,7 +123,7 @@ Topic: test_%{content.application} - `%{content.fieldname}`。`content`代表从`contents`中取指定字段值 - `%{tag.fieldname}`,`tag`表示从`tags`中取指定字段值,例如:`%{tag.k8s.namespace.name}` -- `${env_name}`, 读取系统变量绑定到动态`topic`上,`ilogtail 1.5.0`开始支持。可以参考`flusher-kafka_v2`中的使用。 +- `${env_name}`, 读取系统变量绑定到动态`topic`上,`ilogtail 1.5.0`开始支持。可以参考`flusher-kafka-v2`中的使用。 - 其它方式暂不支持 ### TagFieldsRename diff --git a/docs/cn/plugins/flusher/README.md b/docs/cn/plugins/flusher/flushers.md similarity index 100% rename from docs/cn/plugins/flusher/README.md rename to docs/cn/plugins/flusher/flushers.md diff --git a/docs/cn/plugins/input/README.md b/docs/cn/plugins/input/inputs.md similarity index 99% rename from docs/cn/plugins/input/README.md rename to docs/cn/plugins/input/inputs.md index 238d3aa1a1..636684270d 100644 --- a/docs/cn/plugins/input/README.md +++ b/docs/cn/plugins/input/inputs.md @@ -84,4 +84,4 @@ - 插件组合规则: - 原生Input插件: 可配合原生/扩展Processor插件使用,支持SPL插件 - 扩展Input插件: 仅支持扩展Processor插件 - - 详细说明请参考[处理插件文档](../processor/README.md) + - 详细说明请参考[处理插件文档](../processor/processors.md) diff --git a/docs/cn/plugins/overview.md b/docs/cn/plugins/overview.md index 741c9d26af..a9a1f1838d 100644 --- a/docs/cn/plugins/overview.md +++ b/docs/cn/plugins/overview.md @@ -104,7 +104,7 @@ | 名称 | 提供方 | 简介 | | --- | --- | --- | | `flusher_kafka`
[Kafka](flusher/extended/flusher-kafka.md) | 社区 | 将采集到的数据输出到Kafka。推荐使用下面的flusher_kafka_v2 | -| `flusher_kafka_v2`
[Kafka V2](flusher/extended/flusher-kafka_v2.md) | 社区
[shalousun](https://github.com/shalousun) | 将采集到的数据输出到Kafka。 | +| `flusher_kafka_v2`
[Kafka V2](flusher/extended/flusher-kafka-v2.md) | 社区
[shalousun](https://github.com/shalousun) | 将采集到的数据输出到Kafka。 | | `flusher_stdout`
[标准输出/文件](flusher/extended/flusher-stdout.md) | SLS官方 | 将采集到的数据输出到标准输出或文件。 | | `flusher_otlp_log`
[OTLP日志](flusher/extended/flusher-otlp.md) | 社区
[liuhaoyang](https://github.com/liuhaoyang) | 将采集到的数据支持`Opentelemetry log protocol`的后端。 | | `flusher_http`
[HTTP](flusher/extended/flusher-http.md) | 社区
[snakorse](https://github.com/snakorse) | 将采集到的数据以http方式输出到指定的后端。 | diff --git a/docs/cn/plugins/processor/README.md b/docs/cn/plugins/processor/processors.md similarity index 97% rename from docs/cn/plugins/processor/README.md rename to docs/cn/plugins/processor/processors.md index fefefbcd6d..d92c50e4b7 100644 --- a/docs/cn/plugins/processor/README.md +++ b/docs/cn/plugins/processor/processors.md @@ -58,7 +58,7 @@ | [`input_network_security`](../input/native/input-network-security.md) | 网络安全监控插件 | | [`input_process_security`](../input/native/input-process-security.md) | 进程安全监控插件 | -更多输入插件说明请参考[输入插件文档](../input/README.md)。 +更多输入插件说明请参考[输入插件文档](../input/inputs.md)。 ### 插件组合规则 From 7695ae8e4780ef21f9675629c17182e2eae78d53 Mon Sep 17 00:00:00 2001 From: henryzhx8 Date: Fri, 13 Dec 2024 16:36:09 +0800 Subject: [PATCH 8/8] support project anonymous write (#1959) --- core/plugin/flusher/sls/FlusherSLS.cpp | 7 +++++ core/sdk/Client.cpp | 42 +++++++++++++++++++++----- 2 files changed, 42 insertions(+), 7 deletions(-) diff --git a/core/plugin/flusher/sls/FlusherSLS.cpp b/core/plugin/flusher/sls/FlusherSLS.cpp index 47977e47a6..67a87aa1ad 100644 --- a/core/plugin/flusher/sls/FlusherSLS.cpp +++ b/core/plugin/flusher/sls/FlusherSLS.cpp @@ -40,6 +40,9 @@ // TODO: temporarily used here #include "pipeline/PipelineManager.h" #include "plugin/flusher/sls/DiskBufferWriter.h" +#ifdef __ENTERPRISE__ +#include "plugin/flusher/sls/EnterpriseSLSClientManager.h" +#endif using namespace std; @@ -895,6 +898,10 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) } } SLSClientManager::GetInstance()->UpdateAccessKeyStatus(mAliuid, !hasAuthError); +#ifdef __ENTERPRISE__ + static auto manager = static_cast(SLSClientManager::GetInstance()); + manager->UpdateProjectAnonymousWriteStatus(mProject, !hasAuthError); +#endif } bool FlusherSLS::Send(string&& data, const string& shardHashKey, const string& logstore) { diff --git a/core/sdk/Client.cpp b/core/sdk/Client.cpp index c99d4dd71f..acfc49f95c 100644 --- a/core/sdk/Client.cpp +++ b/core/sdk/Client.cpp @@ -18,10 +18,14 @@ #include "CurlImp.h" #include "Exception.h" #include "Result.h" -#include "logger/Logger.h" -#include "plugin/flusher/sls/SLSClientManager.h" #include "app_config/AppConfig.h" +#include "common/Flags.h" +#include "logger/Logger.h" #include "monitor/Monitor.h" +#include "plugin/flusher/sls/SLSClientManager.h" +#ifdef __ENTERPRISE__ +#include "plugin/flusher/sls/EnterpriseSLSClientManager.h" +#endif namespace logtail { namespace sdk { @@ -214,7 +218,12 @@ namespace sdk { SLSClientManager::AuthType type; string accessKeyId, accessKeySecret; if (!SLSClientManager::GetInstance()->GetAccessKey(mAliuid, type, accessKeyId, accessKeySecret)) { - throw LOGException(LOGE_UNAUTHORIZED, ""); +#ifdef __ENTERPRISE__ + static auto* manager = static_cast(SLSClientManager::GetInstance()); + if (!manager->GetAccessKeyIfProjectSupportsAnonymousWrite(project, type, accessKeyId, accessKeySecret)) { + throw LOGException(LOGE_UNAUTHORIZED, ""); + } +#endif } if (type == SLSClientManager::AuthType::ANONYMOUS) { header[X_LOG_KEYPROVIDER] = MD5_SHA1_SALT_KEYPROVIDER; @@ -232,8 +241,17 @@ namespace sdk { if (mPort == 80 && mUsingHTTPS) { port = 443; } - mClient->Send( - httpMethod, host, port, url, queryString, header, body, mTimeout, httpMessage, AppConfig::GetInstance()->GetBindInterface(), mUsingHTTPS); + mClient->Send(httpMethod, + host, + port, + url, + queryString, + header, + body, + mTimeout, + httpMessage, + AppConfig::GetInstance()->GetBindInterface(), + mUsingHTTPS); if (httpMessage.statusCode != 200) { if (realIpPtr != NULL) { @@ -252,7 +270,12 @@ namespace sdk { SLSClientManager::AuthType type; string accessKeyId, accessKeySecret; if (!SLSClientManager::GetInstance()->GetAccessKey(mAliuid, type, accessKeyId, accessKeySecret)) { - return nullptr; +#ifdef __ENTERPRISE__ + static auto* manager = static_cast(SLSClientManager::GetInstance()); + if (!manager->GetAccessKeyIfProjectSupportsAnonymousWrite(project, type, accessKeyId, accessKeySecret)) { + return nullptr; + } +#endif } if (type == SLSClientManager::AuthType::ANONYMOUS) { httpHeader[X_LOG_KEYPROVIDER] = MD5_SHA1_SALT_KEYPROVIDER; @@ -280,7 +303,12 @@ namespace sdk { SLSClientManager::AuthType type; string accessKeyId, accessKeySecret; if (!SLSClientManager::GetInstance()->GetAccessKey(mAliuid, type, accessKeyId, accessKeySecret)) { - return nullptr; +#ifdef __ENTERPRISE__ + static auto* manager = static_cast(SLSClientManager::GetInstance()); + if (!manager->GetAccessKeyIfProjectSupportsAnonymousWrite(project, type, accessKeyId, accessKeySecret)) { + return nullptr; + } +#endif } if (type == SLSClientManager::AuthType::ANONYMOUS) { httpHeader[X_LOG_KEYPROVIDER] = MD5_SHA1_SALT_KEYPROVIDER;