Skip to content

Commit

Permalink
Merge branch '8.17' into mergify/bp/8.17/pr-41780
Browse files Browse the repository at this point in the history
  • Loading branch information
rdner authored Dec 11, 2024
2 parents ad9d6c4 + 092f0ea commit 5218b67
Show file tree
Hide file tree
Showing 59 changed files with 1,661 additions and 661 deletions.
13 changes: 13 additions & 0 deletions .mergify.yml
Original file line number Diff line number Diff line change
Expand Up @@ -372,3 +372,16 @@ pull_request_rules:
labels:
- "backport"
title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}"
- name: backport patches to 8.17 branch
conditions:
- merged
- label=backport-8.17
actions:
backport:
assignees:
- "{{ author }}"
branches:
- "8.17"
labels:
- "backport"
title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}"
1 change: 1 addition & 0 deletions CHANGELOG.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -2894,6 +2894,7 @@ https://github.com/elastic/beats/compare/v7.17.0\...v8.0.0[View commits]
- Add `while_pattern` type to multiline reader. {pull}19662[19662]
- auditd dataset: Use `process.args` to store program arguments instead of `auditd.log.aNNN` fields. {pull}29601[29601]
- Remove deprecated old `awscloudwatch` input name. {pull}29844[29844]
- Remove `docker` input. Please use `filestream` input with `container` parser or `container` input. {pull}28817[28817]

*Metricbeat*

Expand Down
12 changes: 12 additions & 0 deletions CHANGELOG.next.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,14 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff]
- Ensure Elasticsearch output can always recover from network errors {pull}40794[40794]
- Add `translate_ldap_attribute` processor. {pull}41472[41472]
- Remove unnecessary debug logs during idle connection teardown {issue}40824[40824]
- Remove unnecessary reload for Elastic Agent managed beats when apm tracing config changes from nil to nil {pull}41794[41794]

*Auditbeat*

- auditd: Request status from a separate socket to avoid data congestion {pull}41207[41207]
- auditd: Use ECS `event.type: end` instead of `stop` for SERVICE_STOP, DAEMON_ABORT, and DAEMON_END messages. {pull}41558[41558]
- auditd: Update syscall names for Linux 6.11. {pull}41558[41558]
- hasher: Geneneral improvements and fixes. {pull}41863[41863]

*Filebeat*

Expand Down Expand Up @@ -241,6 +246,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff]
- Add linux capabilities to processes in the system/process. {pull}37453[37453]
- Add linux capabilities to processes in the system/process. {pull}37453[37453]
- Add process.entity_id, process.group.name and process.group.id in add_process_metadata processor. Make fim module with kprobes backend to always add an appropriately configured add_process_metadata processor to enrich file events {pull}38776[38776]
- Split module/system/process into common and provider bits. {pull}41868[41868]

*Auditbeat*

Expand Down Expand Up @@ -314,6 +320,11 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff]
- Add support for Journald in the System module. {pull}41555[41555]
- Add ability to remove request trace logs from http_endpoint input. {pull}40005[40005]
- Add ability to remove request trace logs from entityanalytics input. {pull}40004[40004]
- Improve S3 polling mode states registry when using list prefix option. {pull}41869[41869]
- Add support for SSL and Proxy configurations for websoket type in streaming input. {pull}41934[41934]
- AWS S3 input registry cleanup for untracked s3 objects. {pull}41694[41694]
- The environment variable `BEATS_AZURE_EVENTHUB_INPUT_TRACING_ENABLED: true` enables internal logs tracer for the azure-eventhub input. {issue}41931[41931] {pull}41932[41932]
- Refactor & cleanup with updates to default values and documentation. {pull}41834[41834]

*Auditbeat*

Expand Down Expand Up @@ -369,6 +380,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff]
- Bump aerospike-client-go to version v7.7.1 and add support for basic auth in Aerospike module {pull}41233[41233]
- Only watch metadata for ReplicaSets in metricbeat k8s module {pull}41289[41289]
- Add support for region/zone for Vertex AI service in GCP module {pull}41551[41551]
- Add support for location label as an optional configuration parameter in GCP metrics metricset. {issue}41550[41550] {pull}41626[41626]

*Metricbeat*

Expand Down
43 changes: 29 additions & 14 deletions auditbeat/helper/hasher/hasher.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (
"fmt"
"hash"
"io"
"os"
"strings"
"time"

Expand Down Expand Up @@ -124,7 +123,7 @@ type FileTooLargeError struct {

// Error returns the error message for FileTooLargeError.
func (e FileTooLargeError) Error() string {
return fmt.Sprintf("hasher: file size %d exceeds max file size", e.fileSize)
return fmt.Sprintf("size %d exceeds max file size", e.fileSize)
}

// Config contains the configuration of a FileHasher.
Expand Down Expand Up @@ -174,28 +173,46 @@ type FileHasher struct {

// NewFileHasher creates a new FileHasher.
func NewFileHasher(c Config, done <-chan struct{}) (*FileHasher, error) {
var limit rate.Limit

if c.ScanRateBytesPerSec == 0 {
limit = rate.Inf
} else {
limit = rate.Limit(c.ScanRateBytesPerSec)
}

return &FileHasher{
config: c,
limiter: rate.NewLimiter(
rate.Limit(c.ScanRateBytesPerSec), // Rate
int(c.MaxFileSizeBytes), // Burst
limit, // Rate
int(c.MaxFileSizeBytes), // Burst
),
done: done,
}, nil
}

// HashFile hashes the contents of a file.
func (hasher *FileHasher) HashFile(path string) (map[HashType]Digest, error) {
info, err := os.Stat(path)
f, err := file.ReadOpen(path)
if err != nil {
return nil, fmt.Errorf("failed to stat file %v: %w", path, err)
return nil, fmt.Errorf("open: %w", err)
}
defer f.Close()

info, err := f.Stat()
if err != nil {
return nil, fmt.Errorf("stat: %w", err)
}
if !info.Mode().IsRegular() {
return nil, fmt.Errorf("not a regular file")

}

// Throttle reading and hashing rate.
if len(hasher.config.HashTypes) > 0 {
err = hasher.throttle(info.Size())
if err != nil {
return nil, fmt.Errorf("failed to hash file %v: %w", path, err)
return nil, err
}
}

Expand All @@ -210,15 +227,9 @@ func (hasher *FileHasher) HashFile(path string) (map[HashType]Digest, error) {
}

if len(hashes) > 0 {
f, err := file.ReadOpen(path)
if err != nil {
return nil, fmt.Errorf("failed to open file for hashing: %w", err)
}
defer f.Close()

hashWriter := multiWriter(hashes)
if _, err := io.Copy(hashWriter, f); err != nil {
return nil, fmt.Errorf("failed to calculate file hashes: %w", err)
return nil, err
}

nameToHash := make(map[HashType]Digest, len(hashes))
Expand All @@ -233,6 +244,10 @@ func (hasher *FileHasher) HashFile(path string) (map[HashType]Digest, error) {
}

func (hasher *FileHasher) throttle(fileSize int64) error {
// Burst is ignored if limit is infinite, so check it manually
if hasher.limiter.Limit() == rate.Inf && int(fileSize) > hasher.limiter.Burst() {
return FileTooLargeError{fileSize}
}
reservation := hasher.limiter.ReserveN(time.Now(), int(fileSize))
if !reservation.OK() {
// File is bigger than the max file size
Expand Down
5 changes: 4 additions & 1 deletion filebeat/autodiscover/builder/hints/logs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package hints

import (
"os"
"path/filepath"
"testing"

Expand All @@ -30,8 +31,10 @@ import (
"github.com/elastic/elastic-agent-libs/paths"
)

func TestMain(t *testing.M) {
func TestMain(m *testing.M) {
InitializeModule()

os.Exit(m.Run())
}

func TestGenerateHints(t *testing.T) {
Expand Down
Loading

0 comments on commit 5218b67

Please sign in to comment.