From 57ab5b461dfc7e784ee314a9ae34ad0115c8800f Mon Sep 17 00:00:00 2001 From: Sam DeHaan Date: Thu, 12 Dec 2024 16:06:23 -0500 Subject: [PATCH 1/3] Support postgres 17 table schema --- receiver/postgresqlreceiver/client.go | 151 +++- .../postgresqlreceiver/integration_test.go | 30 +- receiver/postgresqlreceiver/scraper.go | 8 +- receiver/postgresqlreceiver/scraper_test.go | 6 +- .../expected_single_db_post17.yaml | 703 ++++++++++++++++++ 5 files changed, 841 insertions(+), 57 deletions(-) create mode 100644 receiver/postgresqlreceiver/testdata/integration/expected_single_db_post17.yaml diff --git a/receiver/postgresqlreceiver/client.go b/receiver/postgresqlreceiver/client.go index dc0029873a10..6f02ddc63e71 100644 --- a/receiver/postgresqlreceiver/client.go +++ b/receiver/postgresqlreceiver/client.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "net" + "strconv" "strings" "time" @@ -56,6 +57,7 @@ type client interface { getMaxConnections(ctx context.Context) (int64, error) getIndexStats(ctx context.Context, database string) (map[indexIdentifer]indexStat, error) listDatabases(ctx context.Context) ([]string, error) + getVersion(ctx context.Context) (string, error) } type postgreSQLClient struct { @@ -442,7 +444,6 @@ type bgStat struct { checkpointWriteTime float64 checkpointSyncTime float64 bgWrites int64 - backendWrites int64 bufferBackendWrites int64 bufferFsyncWrites int64 bufferCheckpoints int64 @@ -451,54 +452,105 @@ type bgStat struct { } func (c *postgreSQLClient) getBGWriterStats(ctx context.Context) (*bgStat, error) { - query := `SELECT - checkpoints_req AS checkpoint_req, - checkpoints_timed AS checkpoint_scheduled, - checkpoint_write_time AS checkpoint_duration_write, - checkpoint_sync_time AS checkpoint_duration_sync, - buffers_clean AS bg_writes, - buffers_backend AS backend_writes, - buffers_backend_fsync AS buffers_written_fsync, - buffers_checkpoint AS buffers_checkpoints, - buffers_alloc AS buffers_allocated, - maxwritten_clean AS maxwritten_count - FROM pg_stat_bgwriter;` + version, err := c.getVersion(ctx) + if err != nil { + return nil, err + } + + major, err := parseMajorVersion(version) + if err != nil { + return nil, err + } - row := c.client.QueryRowContext(ctx, query) var ( checkpointsReq, checkpointsScheduled int64 checkpointSyncTime, checkpointWriteTime float64 bgWrites, bufferCheckpoints, bufferAllocated int64 bufferBackendWrites, bufferFsyncWrites, maxWritten int64 ) - err := row.Scan( - &checkpointsReq, - &checkpointsScheduled, - &checkpointWriteTime, - &checkpointSyncTime, - &bgWrites, - &bufferBackendWrites, - &bufferFsyncWrites, - &bufferCheckpoints, - &bufferAllocated, - &maxWritten, - ) - if err != nil { - return nil, err + + if major < 17 { + query := `SELECT + checkpoints_req AS checkpoint_req, + checkpoints_timed AS checkpoint_scheduled, + checkpoint_write_time AS checkpoint_duration_write, + checkpoint_sync_time AS checkpoint_duration_sync, + buffers_clean AS bg_writes, + buffers_backend AS backend_writes, + buffers_backend_fsync AS buffers_written_fsync, + buffers_checkpoint AS buffers_checkpoints, + buffers_alloc AS buffers_allocated, + maxwritten_clean AS maxwritten_count + FROM pg_stat_bgwriter;` + + row := c.client.QueryRowContext(ctx, query) + + if err = row.Scan( + &checkpointsReq, + &checkpointsScheduled, + &checkpointWriteTime, + &checkpointSyncTime, + &bgWrites, + &bufferBackendWrites, + &bufferFsyncWrites, + &bufferCheckpoints, + &bufferAllocated, + &maxWritten, + ); err != nil { + return nil, err + } + return &bgStat{ + checkpointsReq: checkpointsReq, + checkpointsScheduled: checkpointsScheduled, + checkpointWriteTime: checkpointWriteTime, + checkpointSyncTime: checkpointSyncTime, + bgWrites: bgWrites, + bufferBackendWrites: bufferBackendWrites, + bufferFsyncWrites: bufferFsyncWrites, + bufferCheckpoints: bufferCheckpoints, + buffersAllocated: bufferAllocated, + maxWritten: maxWritten, + }, nil + } else { + query := `SELECT + cp.num_requested AS checkpoint_req, + cp.num_timed AS checkpoint_scheduled, + cp.write_time AS checkpoint_duration_write, + cp.sync_time AS checkpoint_duration_sync, + cp.buffers_written AS buffers_checkpoints, + bg.buffers_clean AS bg_writes, + bg.buffers_alloc AS buffers_allocated, + bg.maxwritten_clean AS maxwritten_count + FROM pg_stat_bgwriter bg, pg_stat_checkpointer cp;` + + row := c.client.QueryRowContext(ctx, query) + + if err = row.Scan( + &checkpointsReq, + &checkpointsScheduled, + &checkpointWriteTime, + &checkpointSyncTime, + &bufferCheckpoints, + &bgWrites, + &bufferAllocated, + &maxWritten, + ); err != nil { + return nil, err + } + + return &bgStat{ + checkpointsReq: checkpointsReq, + checkpointsScheduled: checkpointsScheduled, + checkpointWriteTime: checkpointWriteTime, + checkpointSyncTime: checkpointSyncTime, + bgWrites: bgWrites, + bufferBackendWrites: -1, // Not found in pg17+ tables + bufferFsyncWrites: -1, // Not found in pg17+ tables + bufferCheckpoints: bufferCheckpoints, + buffersAllocated: bufferAllocated, + maxWritten: maxWritten, + }, nil } - return &bgStat{ - checkpointsReq: checkpointsReq, - checkpointsScheduled: checkpointsScheduled, - checkpointWriteTime: checkpointWriteTime, - checkpointSyncTime: checkpointSyncTime, - bgWrites: bgWrites, - backendWrites: bufferBackendWrites, - bufferBackendWrites: bufferBackendWrites, - bufferFsyncWrites: bufferFsyncWrites, - bufferCheckpoints: bufferCheckpoints, - buffersAllocated: bufferAllocated, - maxWritten: maxWritten, - }, nil } func (c *postgreSQLClient) getMaxConnections(ctx context.Context) (int64, error) { @@ -641,6 +693,23 @@ func (c *postgreSQLClient) listDatabases(ctx context.Context) ([]string, error) return databases, nil } +func (c *postgreSQLClient) getVersion(ctx context.Context) (string, error) { + query := "SHOW server_version;" + row := c.client.QueryRowContext(ctx, query) + var version string + err := row.Scan(&version) + return version, err +} + +func parseMajorVersion(ver string) (int, error) { + parts := strings.Split(ver, ".") + if len(parts) < 2 { + return 0, fmt.Errorf("unexpected version string: %s", ver) + } + + return strconv.Atoi(parts[0]) +} + func filterQueryByDatabases(baseQuery string, databases []string, groupBy bool) string { if len(databases) > 0 { var queryDatabases []string diff --git a/receiver/postgresqlreceiver/integration_test.go b/receiver/postgresqlreceiver/integration_test.go index fb290be1095c..d6423730d805 100644 --- a/receiver/postgresqlreceiver/integration_test.go +++ b/receiver/postgresqlreceiver/integration_test.go @@ -1,11 +1,10 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -//go:build integration - package postgresqlreceiver import ( + "fmt" "net" "path/filepath" "testing" @@ -22,37 +21,42 @@ import ( const postgresqlPort = "5432" +const pre17TestVersion = "13.18" +const post17TestVersion = "17.2" + func TestIntegration(t *testing.T) { defer testutil.SetFeatureGateForTest(t, separateSchemaAttrGate, false)() defer testutil.SetFeatureGateForTest(t, connectionPoolGate, false)() - t.Run("single_db", integrationTest("single_db", []string{"otel"})) - t.Run("multi_db", integrationTest("multi_db", []string{"otel", "otel2"})) - t.Run("all_db", integrationTest("all_db", []string{})) + t.Run("single_db", integrationTest("single_db", []string{"otel"}, pre17TestVersion)) + t.Run("multi_db", integrationTest("multi_db", []string{"otel", "otel2"}, pre17TestVersion)) + t.Run("all_db", integrationTest("all_db", []string{}, pre17TestVersion)) + + t.Run("single_db_post17", integrationTest("single_db_post17", []string{"otel"}, post17TestVersion)) } func TestIntegrationWithSeparateSchemaAttr(t *testing.T) { defer testutil.SetFeatureGateForTest(t, separateSchemaAttrGate, true)() defer testutil.SetFeatureGateForTest(t, connectionPoolGate, false)() - t.Run("single_db_schemaattr", integrationTest("single_db_schemaattr", []string{"otel"})) - t.Run("multi_db_schemaattr", integrationTest("multi_db_schemaattr", []string{"otel", "otel2"})) - t.Run("all_db_schemaattr", integrationTest("all_db_schemaattr", []string{})) + t.Run("single_db_schemaattr", integrationTest("single_db_schemaattr", []string{"otel"}, pre17TestVersion)) + t.Run("multi_db_schemaattr", integrationTest("multi_db_schemaattr", []string{"otel", "otel2"}, pre17TestVersion)) + t.Run("all_db_schemaattr", integrationTest("all_db_schemaattr", []string{}, pre17TestVersion)) } func TestIntegrationWithConnectionPool(t *testing.T) { defer testutil.SetFeatureGateForTest(t, separateSchemaAttrGate, false)() defer testutil.SetFeatureGateForTest(t, connectionPoolGate, true)() - t.Run("single_db_connpool", integrationTest("single_db_connpool", []string{"otel"})) - t.Run("multi_db_connpool", integrationTest("multi_db_connpool", []string{"otel", "otel2"})) - t.Run("all_db_connpool", integrationTest("all_db_connpool", []string{})) + t.Run("single_db_connpool", integrationTest("single_db_connpool", []string{"otel"}, pre17TestVersion)) + t.Run("multi_db_connpool", integrationTest("multi_db_connpool", []string{"otel", "otel2"}, pre17TestVersion)) + t.Run("all_db_connpool", integrationTest("all_db_connpool", []string{}, pre17TestVersion)) } -func integrationTest(name string, databases []string) func(*testing.T) { +func integrationTest(name string, databases []string, pgVersion string) func(*testing.T) { expectedFile := filepath.Join("testdata", "integration", "expected_"+name+".yaml") return scraperinttest.NewIntegrationTest( NewFactory(), scraperinttest.WithContainerRequest( testcontainers.ContainerRequest{ - Image: "postgres:13.18", + Image: fmt.Sprintf("postgres:%s", pgVersion), Env: map[string]string{ "POSTGRES_USER": "root", "POSTGRES_PASSWORD": "otel", diff --git a/receiver/postgresqlreceiver/scraper.go b/receiver/postgresqlreceiver/scraper.go index 9cc13f638c44..871153108d8f 100644 --- a/receiver/postgresqlreceiver/scraper.go +++ b/receiver/postgresqlreceiver/scraper.go @@ -304,9 +304,13 @@ func (p *postgreSQLScraper) collectBGWriterStats( p.mb.RecordPostgresqlBgwriterBuffersAllocatedDataPoint(now, bgStats.buffersAllocated) p.mb.RecordPostgresqlBgwriterBuffersWritesDataPoint(now, bgStats.bgWrites, metadata.AttributeBgBufferSourceBgwriter) - p.mb.RecordPostgresqlBgwriterBuffersWritesDataPoint(now, bgStats.bufferBackendWrites, metadata.AttributeBgBufferSourceBackend) + if bgStats.bufferBackendWrites >= 0 { + p.mb.RecordPostgresqlBgwriterBuffersWritesDataPoint(now, bgStats.bufferBackendWrites, metadata.AttributeBgBufferSourceBackend) + } p.mb.RecordPostgresqlBgwriterBuffersWritesDataPoint(now, bgStats.bufferCheckpoints, metadata.AttributeBgBufferSourceCheckpoints) - p.mb.RecordPostgresqlBgwriterBuffersWritesDataPoint(now, bgStats.bufferFsyncWrites, metadata.AttributeBgBufferSourceBackendFsync) + if bgStats.bufferFsyncWrites >= 0 { + p.mb.RecordPostgresqlBgwriterBuffersWritesDataPoint(now, bgStats.bufferFsyncWrites, metadata.AttributeBgBufferSourceBackendFsync) + } p.mb.RecordPostgresqlBgwriterCheckpointCountDataPoint(now, bgStats.checkpointsReq, metadata.AttributeBgCheckpointTypeRequested) p.mb.RecordPostgresqlBgwriterCheckpointCountDataPoint(now, bgStats.checkpointsScheduled, metadata.AttributeBgCheckpointTypeScheduled) diff --git a/receiver/postgresqlreceiver/scraper_test.go b/receiver/postgresqlreceiver/scraper_test.go index 86457712eb21..15165c2a01f4 100644 --- a/receiver/postgresqlreceiver/scraper_test.go +++ b/receiver/postgresqlreceiver/scraper_test.go @@ -452,6 +452,11 @@ func (m *mockClient) listDatabases(_ context.Context) ([]string, error) { return args.Get(0).([]string), args.Error(1) } +func (m *mockClient) getVersion(_ context.Context) (string, error) { + args := m.Called() + return args.String(0), args.Error(1) +} + func (m *mockClientFactory) getClient(database string) (client, error) { args := m.Called(database) return args.Get(0).(client), args.Error(1) @@ -511,7 +516,6 @@ func (m *mockClient) initMocks(database string, schema string, databases []strin checkpointWriteTime: 3.12, checkpointSyncTime: 4.23, bgWrites: 5, - backendWrites: 6, bufferBackendWrites: 7, bufferFsyncWrites: 8, bufferCheckpoints: 9, diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_single_db_post17.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_single_db_post17.yaml new file mode 100644 index 000000000000..85f81fc5f0e3 --- /dev/null +++ b/receiver/postgresqlreceiver/testdata/integration/expected_single_db_post17.yaml @@ -0,0 +1,703 @@ +resourceMetrics: + - resource: {} + scopeMetrics: + - metrics: + - description: Number of buffers allocated. + name: postgresql.bgwriter.buffers.allocated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "289" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{buffers}' + - description: Number of buffers written. + name: postgresql.bgwriter.buffers.writes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: source + value: + stringValue: bgwriter + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "60" + attributes: + - key: source + value: + stringValue: checkpoints + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{buffers}' + - description: The number of checkpoints performed. + name: postgresql.bgwriter.checkpoint.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4" + attributes: + - key: type + value: + stringValue: requested + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: type + value: + stringValue: scheduled + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{checkpoints}' + - description: Total time spent writing and syncing files to disk by checkpoints. + name: postgresql.bgwriter.duration + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 14 + attributes: + - key: type + value: + stringValue: sync + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asDouble: 7 + attributes: + - key: type + value: + stringValue: write + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ms + - description: Number of times the background writer stopped a cleaning scan because it had written too many buffers. + name: postgresql.bgwriter.maxwritten + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: Configured maximum number of client connections allowed + gauge: + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.connection.max + unit: '{connections}' + - description: Number of user databases. + name: postgresql.database.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{databases}' + - description: The number of database locks. + gauge: + dataPoints: + - asInt: "1" + attributes: + - key: lock_type + value: + stringValue: relation + - key: mode + value: + stringValue: AccessShareLock + - key: relation + value: + stringValue: pg_class + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: lock_type + value: + stringValue: relation + - key: mode + value: + stringValue: AccessShareLock + - key: relation + value: + stringValue: pg_class_oid_index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: lock_type + value: + stringValue: relation + - key: mode + value: + stringValue: AccessShareLock + - key: relation + value: + stringValue: pg_class_relname_nsp_index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: lock_type + value: + stringValue: relation + - key: mode + value: + stringValue: AccessShareLock + - key: relation + value: + stringValue: pg_class_tblspc_relfilenode_index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: lock_type + value: + stringValue: relation + - key: mode + value: + stringValue: AccessShareLock + - key: relation + value: + stringValue: pg_locks + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.database.locks + unit: '{lock}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver + version: latest + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: otel + scopeMetrics: + - metrics: + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' + - description: The number of commits. + name: postgresql.commits + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: The database disk usage. + name: postgresql.db_size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7184900" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The number of deadlocks. + name: postgresql.deadlocks + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{deadlock}' + - description: The number of rollbacks. + name: postgresql.rollbacks + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: Number of user tables in a database. + name: postgresql.table.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{table}' + - description: The number of temp files. + name: postgresql.temp_files + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver + version: latest + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: otel + - key: postgresql.table.name + value: + stringValue: public.table1 + scopeMetrics: + - metrics: + - description: The number of blocks read. + name: postgresql.blocks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: source + value: + stringValue: heap_hit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: heap_read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: idx_hit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: idx_read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: tidx_hit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: tidx_read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: toast_hit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: toast_read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: The number of db row operations. + name: postgresql.operations + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: operation + value: + stringValue: del + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: operation + value: + stringValue: hot_upd + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: operation + value: + stringValue: ins + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: operation + value: + stringValue: upd + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: The number of rows in the database. + name: postgresql.rows + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: state + value: + stringValue: dead + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: state + value: + stringValue: live + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "1" + - description: The number of sequential scans. + name: postgresql.sequential_scans + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{sequential_scan}' + - description: Disk space used by a table. + name: postgresql.table.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of times a table has manually been vacuumed. + name: postgresql.table.vacuum.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{vacuums}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver + version: latest + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: otel + - key: postgresql.table.name + value: + stringValue: public.table2 + scopeMetrics: + - metrics: + - description: The number of blocks read. + name: postgresql.blocks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: source + value: + stringValue: heap_hit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: heap_read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: idx_hit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: idx_read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: tidx_hit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: tidx_read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: toast_hit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: source + value: + stringValue: toast_read + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: The number of db row operations. + name: postgresql.operations + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: operation + value: + stringValue: del + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: operation + value: + stringValue: hot_upd + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: operation + value: + stringValue: ins + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: operation + value: + stringValue: upd + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: The number of rows in the database. + name: postgresql.rows + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: state + value: + stringValue: dead + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: state + value: + stringValue: live + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "1" + - description: The number of sequential scans. + name: postgresql.sequential_scans + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{sequential_scan}' + - description: Disk space used by a table. + name: postgresql.table.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of times a table has manually been vacuumed. + name: postgresql.table.vacuum.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{vacuums}' + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver + version: latest + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: otel + - key: postgresql.index.name + value: + stringValue: table1_pkey + - key: postgresql.table.name + value: + stringValue: table1 + scopeMetrics: + - metrics: + - description: The number of index scans on a table. + name: postgresql.index.scans + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{scans}' + - description: The size of the index on disk. + gauge: + dataPoints: + - asInt: "8192" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.index.size + unit: By + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver + version: latest + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: otel + - key: postgresql.index.name + value: + stringValue: table2_pkey + - key: postgresql.table.name + value: + stringValue: table2 + scopeMetrics: + - metrics: + - description: The number of index scans on a table. + name: postgresql.index.scans + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{scans}' + - description: The size of the index on disk. + gauge: + dataPoints: + - asInt: "8192" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.index.size + unit: By + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver + version: latest From 89509d97aaf6d43a221f62bd49b1ce7116dc5fc0 Mon Sep 17 00:00:00 2001 From: Sam DeHaan Date: Thu, 12 Dec 2024 16:08:14 -0500 Subject: [PATCH 2/3] Update changelog --- .chloggen/postgresql-17-bgwriter.yaml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .chloggen/postgresql-17-bgwriter.yaml diff --git a/.chloggen/postgresql-17-bgwriter.yaml b/.chloggen/postgresql-17-bgwriter.yaml new file mode 100644 index 000000000000..d4e69abd8a50 --- /dev/null +++ b/.chloggen/postgresql-17-bgwriter.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: postgresqlreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Update the postgresqlreceiver to handle new table schema for the bgwriter metrics in pg17+" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36784] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] From e05979aa8e54fbef9b82962c8ed4f719396ea42b Mon Sep 17 00:00:00 2001 From: Sam DeHaan Date: Thu, 12 Dec 2024 16:27:43 -0500 Subject: [PATCH 3/3] Add build tag, fix fmt --- receiver/postgresqlreceiver/integration_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/receiver/postgresqlreceiver/integration_test.go b/receiver/postgresqlreceiver/integration_test.go index d6423730d805..a95199536142 100644 --- a/receiver/postgresqlreceiver/integration_test.go +++ b/receiver/postgresqlreceiver/integration_test.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +//go:build integration + package postgresqlreceiver import ( @@ -21,8 +23,10 @@ import ( const postgresqlPort = "5432" -const pre17TestVersion = "13.18" -const post17TestVersion = "17.2" +const ( + pre17TestVersion = "13.18" + post17TestVersion = "17.2" +) func TestIntegration(t *testing.T) { defer testutil.SetFeatureGateForTest(t, separateSchemaAttrGate, false)()