From 5e6ed9a1895f3b4dea8456d9c2c149a63318a495 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Thu, 14 Dec 2023 09:28:01 -0500 Subject: [PATCH] cleanup deprecated flag types in tabletenv (#14733) Signed-off-by: Andrew Mason --- go/flags/endtoend/vtcombo.txt | 16 +- go/flags/endtoend/vttablet.txt | 16 +- .../endtoend/reparent/prscomplex/main_test.go | 6 +- .../buffer/buffer_test_helpers.go | 2 +- go/test/endtoend/vault/vault_test.go | 2 +- go/test/endtoend/vreplication/cluster_test.go | 2 +- .../vreplication/vreplication_test.go | 2 +- go/test/endtoend/vtgate/godriver/main_test.go | 2 +- .../vtgate/queries/timeout/main_test.go | 4 +- .../vtgate/readafterwrite/raw_test.go | 2 +- .../endtoend/vtgate/reservedconn/main_test.go | 2 +- .../reservedconn/reconnect2/main_test.go | 2 +- .../endtoend/vtgate/unsharded/main_test.go | 2 +- go/test/fuzzing/tabletserver_schema_fuzzer.go | 5 +- .../endtoend/connkilling/main_test.go | 3 +- go/vt/vttablet/endtoend/framework/server.go | 8 +- .../streamtimeout/healthstream_test.go | 2 +- .../endtoend/streamtimeout/main_test.go | 2 +- go/vt/vttablet/onlineddl/executor.go | 4 +- go/vt/vttablet/tabletserver/connpool/pool.go | 6 +- .../tabletserver/connpool/pool_test.go | 13 +- go/vt/vttablet/tabletserver/debugenv.go | 4 +- go/vt/vttablet/tabletserver/gc/tablegc.go | 4 +- .../vttablet/tabletserver/health_streamer.go | 8 +- .../tabletserver/health_streamer_test.go | 4 +- .../tabletserver/query_engine_test.go | 6 +- .../tabletserver/repltracker/reader.go | 6 +- .../tabletserver/repltracker/reader_test.go | 2 +- .../tabletserver/repltracker/repltracker.go | 2 +- .../repltracker/repltracker_test.go | 2 +- .../tabletserver/repltracker/writer.go | 6 +- .../tabletserver/repltracker/writer_test.go | 2 +- go/vt/vttablet/tabletserver/schema/engine.go | 6 +- .../tabletserver/schema/engine_test.go | 8 +- .../tabletserver/schema/load_table_test.go | 4 +- go/vt/vttablet/tabletserver/state_manager.go | 10 +- go/vt/vttablet/tabletserver/status.go | 4 +- .../vttablet/tabletserver/tabletenv/config.go | 511 ++++++++++++------ .../tabletserver/tabletenv/config_test.go | 67 ++- go/vt/vttablet/tabletserver/tabletserver.go | 2 +- .../tabletserver/throttle/throttler.go | 4 +- go/vt/vttablet/tabletserver/tx_engine.go | 6 +- go/vt/vttablet/tabletserver/tx_engine_test.go | 8 +- go/vt/vttablet/tabletserver/tx_pool_test.go | 32 +- go/vt/vttest/vtprocess.go | 8 +- 45 files changed, 504 insertions(+), 315 deletions(-) diff --git a/go/flags/endtoend/vtcombo.txt b/go/flags/endtoend/vtcombo.txt index cc3b55ee9cd..7529bfe2f18 100644 --- a/go/flags/endtoend/vtcombo.txt +++ b/go/flags/endtoend/vtcombo.txt @@ -271,27 +271,27 @@ Flags: --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables). --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results - --queryserver-config-idle-timeout duration query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 30m0s) + --queryserver-config-idle-timeout duration query server idle timeout, vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 30m0s) --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000) --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4) --queryserver-config-olap-transaction-timeout duration query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30s) --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting - --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. (default 0s) + --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime, vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16) --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) - --queryserver-config-query-pool-timeout duration query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. (default 0s) + --queryserver-config-query-pool-timeout duration query server query pool timeout, it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000) - --queryserver-config-query-timeout duration query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) + --queryserver-config-query-timeout duration query server query timeout, this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true) - --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s) + --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s) --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768) --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200) - --queryserver-config-stream-pool-timeout duration query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. (default 0s) + --queryserver-config-stream-pool-timeout duration query server stream pool timeout, it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection --queryserver-config-strict-table-acl only allow queries that pass table acl checks --queryserver-config-terse-errors prevent bind vars from escaping in client error messages --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20) - --queryserver-config-transaction-timeout duration query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30s) + --queryserver-config-transaction-timeout duration query server transaction timeout, a transaction will be killed if it takes longer than this value (default 30s) --queryserver-config-truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate) --queryserver-config-txpool-timeout duration query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1s) --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000) @@ -319,7 +319,7 @@ Flags: --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state --shard_sync_retry_delay duration delay between retries of updates to keep the tablet and its shard record in sync (default 30s) - --shutdown_grace_period duration how long to wait (in seconds) for queries and transactions to complete during graceful shutdown. (default 0s) + --shutdown_grace_period duration how long to wait for queries and transactions to complete during graceful shutdown. --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s) diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt index f3085f71d2f..59016b972ba 100644 --- a/go/flags/endtoend/vttablet.txt +++ b/go/flags/endtoend/vttablet.txt @@ -264,27 +264,27 @@ Flags: --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables). --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results - --queryserver-config-idle-timeout duration query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 30m0s) + --queryserver-config-idle-timeout duration query server idle timeout, vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 30m0s) --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000) --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4) --queryserver-config-olap-transaction-timeout duration query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30s) --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting - --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. (default 0s) + --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime, vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16) --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) - --queryserver-config-query-pool-timeout duration query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. (default 0s) + --queryserver-config-query-pool-timeout duration query server query pool timeout, it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000) - --queryserver-config-query-timeout duration query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) + --queryserver-config-query-timeout duration query server query timeout, this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true) - --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s) + --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s) --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768) --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200) - --queryserver-config-stream-pool-timeout duration query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. (default 0s) + --queryserver-config-stream-pool-timeout duration query server stream pool timeout, it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection --queryserver-config-strict-table-acl only allow queries that pass table acl checks --queryserver-config-terse-errors prevent bind vars from escaping in client error messages --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20) - --queryserver-config-transaction-timeout duration query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30s) + --queryserver-config-transaction-timeout duration query server transaction timeout, a transaction will be killed if it takes longer than this value (default 30s) --queryserver-config-truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate) --queryserver-config-txpool-timeout duration query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1s) --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000) @@ -319,7 +319,7 @@ Flags: --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state --shard_sync_retry_delay duration delay between retries of updates to keep the tablet and its shard record in sync (default 30s) - --shutdown_grace_period duration how long to wait (in seconds) for queries and transactions to complete during graceful shutdown. (default 0s) + --shutdown_grace_period duration how long to wait for queries and transactions to complete during graceful shutdown. --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s) diff --git a/go/test/endtoend/reparent/prscomplex/main_test.go b/go/test/endtoend/reparent/prscomplex/main_test.go index 88276012781..88e3d6c09fa 100644 --- a/go/test/endtoend/reparent/prscomplex/main_test.go +++ b/go/test/endtoend/reparent/prscomplex/main_test.go @@ -63,12 +63,12 @@ func TestMain(m *testing.M) { SchemaSQL: schemaSQL, } clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, - "--queryserver-config-query-timeout=9000", + "--queryserver-config-query-timeout=9000s", "--queryserver-config-pool-size=3", "--queryserver-config-stream-pool-size=3", "--queryserver-config-transaction-cap=2", - "--queryserver-config-transaction-timeout=20", - "--shutdown_grace_period=3", + "--queryserver-config-transaction-timeout=20s", + "--shutdown_grace_period=3s", "--queryserver-config-schema-change-signal=false") err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false) if err != nil { diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go index 5ba71f7ab6e..cb88ded9ac5 100644 --- a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go +++ b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go @@ -229,7 +229,7 @@ func (bt *BufferingTest) createCluster() (*cluster.LocalProcessCluster, int) { } clusterInstance.VtTabletExtraArgs = []string{ "--health_check_interval", "1s", - "--queryserver-config-transaction-timeout", "20", + "--queryserver-config-transaction-timeout", "20s", } if err := clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false); err != nil { return nil, 1 diff --git a/go/test/endtoend/vault/vault_test.go b/go/test/endtoend/vault/vault_test.go index 73b5a89b156..684a374707d 100644 --- a/go/test/endtoend/vault/vault_test.go +++ b/go/test/endtoend/vault/vault_test.go @@ -62,7 +62,7 @@ var ( "--watch_replication_stream", // Frequently reload schema, generating some tablet traffic, // so we can speed up token refresh - "--queryserver-config-schema-reload-time", "5", + "--queryserver-config-schema-reload-time", "5s", "--serving_state_grace_period", "1s"} vaultTabletArg = []string{ "--db-credentials-server", "vault", diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index 89cebc7d0b1..baca65a25fa 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -450,7 +450,7 @@ func (vc *VitessCluster) AddTablet(t testing.TB, cell *Cell, keyspace *Keyspace, tablet := &Tablet{} options := []string{ - "--queryserver-config-schema-reload-time", "5", + "--queryserver-config-schema-reload-time", "5s", "--heartbeat_on_demand_duration", "5s", "--heartbeat_interval", "250ms", } // FIXME: for multi-cell initial schema doesn't seem to load without "--queryserver-config-schema-reload-time" diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 2bc4df760ee..abbdfe7f4dd 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -242,7 +242,7 @@ func TestVreplicationCopyThrottling(t *testing.T) { extraVTTabletArgs = []string{ // We rely on holding open transactions to generate innodb history so extend the timeout // to avoid flakiness when the CI is very slow. - fmt.Sprintf("--queryserver-config-transaction-timeout=%d", int64(defaultTimeout.Seconds())*3), + fmt.Sprintf("--queryserver-config-transaction-timeout=%s", (defaultTimeout * 3).String()), fmt.Sprintf("--vreplication_copy_phase_max_innodb_history_list_length=%d", maxSourceTrxHistory), parallelInsertWorkers, } diff --git a/go/test/endtoend/vtgate/godriver/main_test.go b/go/test/endtoend/vtgate/godriver/main_test.go index 492a68662fc..587c189d2ea 100644 --- a/go/test/endtoend/vtgate/godriver/main_test.go +++ b/go/test/endtoend/vtgate/godriver/main_test.go @@ -105,7 +105,7 @@ func TestMain(m *testing.M) { VSchema: VSchema, } clusterInstance.VtTabletExtraArgs = []string{ - "--queryserver-config-transaction-timeout", "3", + "--queryserver-config-transaction-timeout", "3s", } if err := clusterInstance.StartKeyspace(*Keyspace, []string{"-80", "80-"}, 1, false); err != nil { log.Fatal(err.Error()) diff --git a/go/test/endtoend/vtgate/queries/timeout/main_test.go b/go/test/endtoend/vtgate/queries/timeout/main_test.go index d71dc55ef46..c265e824e88 100644 --- a/go/test/endtoend/vtgate/queries/timeout/main_test.go +++ b/go/test/endtoend/vtgate/queries/timeout/main_test.go @@ -63,8 +63,8 @@ func TestMain(m *testing.M) { clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-config-max-result-size", "1000000", - "--queryserver-config-query-timeout", "200", - "--queryserver-config-query-pool-timeout", "200") + "--queryserver-config-query-timeout", "200s", + "--queryserver-config-query-pool-timeout", "200s") // Start Unsharded keyspace ukeyspace := &cluster.Keyspace{ Name: uks, diff --git a/go/test/endtoend/vtgate/readafterwrite/raw_test.go b/go/test/endtoend/vtgate/readafterwrite/raw_test.go index 56f9b3a44cb..0549a9b06b0 100644 --- a/go/test/endtoend/vtgate/readafterwrite/raw_test.go +++ b/go/test/endtoend/vtgate/readafterwrite/raw_test.go @@ -119,7 +119,7 @@ func TestMain(m *testing.M) { VSchema: vSchema, } clusterInstance.VtTabletExtraArgs = []string{ - "--queryserver-config-transaction-timeout", "5", + "--queryserver-config-transaction-timeout", "5s", } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { return 1 diff --git a/go/test/endtoend/vtgate/reservedconn/main_test.go b/go/test/endtoend/vtgate/reservedconn/main_test.go index cc76e7a3b46..528182a82e2 100644 --- a/go/test/endtoend/vtgate/reservedconn/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/main_test.go @@ -133,7 +133,7 @@ func runAllTests(m *testing.M) int { SchemaSQL: sqlSchema, VSchema: vSchema, } - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "5"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "5s"} if enableSettingsPool { clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-enable-settings-pool") } diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go index f97d96ef89a..b66bb15dbd5 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go @@ -96,7 +96,7 @@ func runAllTests(m *testing.M) int { SchemaSQL: sqlSchema, VSchema: vSchema, } - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "5"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "5s"} if enableSettingsPool { clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-enable-settings-pool") } diff --git a/go/test/endtoend/vtgate/unsharded/main_test.go b/go/test/endtoend/vtgate/unsharded/main_test.go index f772fabecc1..7405a7dd87f 100644 --- a/go/test/endtoend/vtgate/unsharded/main_test.go +++ b/go/test/endtoend/vtgate/unsharded/main_test.go @@ -179,7 +179,7 @@ func runAllTests(m *testing.M) int { SchemaSQL: SchemaSQL, VSchema: VSchema, } - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "3", "--queryserver-config-max-result-size", "30"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "3s", "--queryserver-config-max-result-size", "30"} if enableSettingsPool { clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-enable-settings-pool") } diff --git a/go/test/fuzzing/tabletserver_schema_fuzzer.go b/go/test/fuzzing/tabletserver_schema_fuzzer.go index 67bb36e52ed..39af22a1918 100644 --- a/go/test/fuzzing/tabletserver_schema_fuzzer.go +++ b/go/test/fuzzing/tabletserver_schema_fuzzer.go @@ -17,6 +17,7 @@ import ( "context" "sync" "testing" + "time" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" @@ -60,9 +61,9 @@ func newTestLoadTable(tableName, comment string, db *fakesqldb.DB) (*schema.Tabl appParams := db.ConnParams() dbaParams := db.ConnParams() cfg := tabletenv.ConnPoolConfig{ - Size: 2, + Size: 2, + IdleTimeout: 10 * time.Second, } - _ = cfg.IdleTimeoutSeconds.Set("10s") connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest"), "", cfg) connPool.Open(appParams, dbaParams, appParams) diff --git a/go/vt/vttablet/endtoend/connkilling/main_test.go b/go/vt/vttablet/endtoend/connkilling/main_test.go index e7486c397eb..3d0ec344715 100644 --- a/go/vt/vttablet/endtoend/connkilling/main_test.go +++ b/go/vt/vttablet/endtoend/connkilling/main_test.go @@ -23,6 +23,7 @@ import ( "fmt" "os" "testing" + "time" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/tableacl" @@ -81,7 +82,7 @@ func TestMain(m *testing.M) { connParams = cluster.MySQLConnParams() connAppDebugParams = cluster.MySQLAppDebugConnParams() config := tabletenv.NewDefaultConfig() - _ = config.Oltp.TxTimeoutSeconds.Set("3s") + config.Oltp.TxTimeout = 3 * time.Second ctx, cancel := context.WithCancel(context.Background()) defer cancel() err := framework.StartCustomServer(ctx, connParams, connAppDebugParams, cluster.DbName(), config) diff --git a/go/vt/vttablet/endtoend/framework/server.go b/go/vt/vttablet/endtoend/framework/server.go index 9e78dc08a85..7fca349deff 100644 --- a/go/vt/vttablet/endtoend/framework/server.go +++ b/go/vt/vttablet/endtoend/framework/server.go @@ -118,11 +118,11 @@ func StartServer(ctx context.Context, connParams, connAppDebugParams mysql.ConnP config.TwoPCCoordinatorAddress = "fake" config.HotRowProtection.Mode = tabletenv.Enable config.TrackSchemaVersions = true - _ = config.GracePeriods.ShutdownSeconds.Set("2s") + config.GracePeriods.Shutdown = 2 * time.Second config.SignalWhenSchemaChange = true - _ = config.Healthcheck.IntervalSeconds.Set("100ms") - _ = config.Oltp.TxTimeoutSeconds.Set("5s") - _ = config.Olap.TxTimeoutSeconds.Set("5s") + config.Healthcheck.Interval = 100 * time.Millisecond + config.Oltp.TxTimeout = 5 * time.Second + config.Olap.TxTimeout = 5 * time.Second config.EnableViews = true config.QueryCacheDoorkeeper = false gotBytes, _ := yaml2.Marshal(config) diff --git a/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go b/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go index d69ce193ef9..9890efd427d 100644 --- a/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go +++ b/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go @@ -74,7 +74,7 @@ func TestSchemaChangeTimedout(t *testing.T) { // This is because the query timeout triggers the *DBConn.Kill() method, which in turn holds the mutex lock on the health_streamer. // Although not indefinitely, this can result in longer wait times. // It's worth noting that the behavior of *DBConn.Kill() is outside the scope of this test. - reloadInterval := config.SignalSchemaChangeReloadIntervalSeconds.Get() + reloadInterval := config.SignalSchemaChangeReloadInterval time.Sleep(reloadInterval) // pause simulating the mysql stall to allow the health_streamer to resume. diff --git a/go/vt/vttablet/endtoend/streamtimeout/main_test.go b/go/vt/vttablet/endtoend/streamtimeout/main_test.go index 68851bf901b..0b2f37a987c 100644 --- a/go/vt/vttablet/endtoend/streamtimeout/main_test.go +++ b/go/vt/vttablet/endtoend/streamtimeout/main_test.go @@ -84,7 +84,7 @@ func TestMain(m *testing.M) { connParams := cluster.MySQLConnParams() connAppDebugParams := cluster.MySQLAppDebugConnParams() config = tabletenv.NewDefaultConfig() - _ = config.SchemaReloadIntervalSeconds.Set("2100ms") + config.SchemaReloadInterval = (2 * time.Second) + (100 * time.Millisecond) config.SchemaChangeReloadTimeout = 10 * time.Second config.SignalWhenSchemaChange = true diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go index a5d642b0abf..bb8431b5199 100644 --- a/go/vt/vttablet/onlineddl/executor.go +++ b/go/vt/vttablet/onlineddl/executor.go @@ -265,8 +265,8 @@ func NewExecutor(env tabletenv.Env, tabletAlias *topodatapb.TabletAlias, ts *top tabletAlias: tabletAlias.CloneVT(), pool: connpool.NewPool(env, "OnlineDDLExecutorPool", tabletenv.ConnPoolConfig{ - Size: databasePoolSize, - IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, + Size: databasePoolSize, + IdleTimeout: env.Config().OltpReadPool.IdleTimeout, }), tabletTypeFunc: tabletTypeFunc, ts: ts, diff --git a/go/vt/vttablet/tabletserver/connpool/pool.go b/go/vt/vttablet/tabletserver/connpool/pool.go index 6f8b72870e0..a8eb2c52d83 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool.go +++ b/go/vt/vttablet/tabletserver/connpool/pool.go @@ -68,14 +68,14 @@ type Pool struct { // to publish stats only. func NewPool(env tabletenv.Env, name string, cfg tabletenv.ConnPoolConfig) *Pool { cp := &Pool{ - timeout: cfg.TimeoutSeconds.Get(), + timeout: cfg.Timeout, env: env, } config := smartconnpool.Config[*Conn]{ Capacity: int64(cfg.Size), - IdleTimeout: cfg.IdleTimeoutSeconds.Get(), - MaxLifetime: cfg.MaxLifetimeSeconds.Get(), + IdleTimeout: cfg.IdleTimeout, + MaxLifetime: cfg.MaxLifetime, RefreshInterval: mysqlctl.PoolDynamicHostnameResolution, } diff --git a/go/vt/vttablet/tabletserver/connpool/pool_test.go b/go/vt/vttablet/tabletserver/connpool/pool_test.go index ecdd2df4465..ff43388d12c 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool_test.go +++ b/go/vt/vttablet/tabletserver/connpool/pool_test.go @@ -54,8 +54,8 @@ func TestConnPoolTimeout(t *testing.T) { cfg := tabletenv.ConnPoolConfig{ Size: 1, } - _ = cfg.TimeoutSeconds.Set("1s") - _ = cfg.IdleTimeoutSeconds.Set("10s") + cfg.Timeout = time.Second + cfg.IdleTimeout = 10 * time.Second connPool := NewPool(tabletenv.NewEnv(nil, "PoolTest"), "TestPool", cfg) connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) defer connPool.Close() @@ -325,9 +325,8 @@ func newPool() *Pool { } func newPoolWithCapacity(capacity int) *Pool { - cfg := tabletenv.ConnPoolConfig{ - Size: capacity, - } - _ = cfg.IdleTimeoutSeconds.Set("10s") - return NewPool(tabletenv.NewEnv(nil, "PoolTest"), "TestPool", cfg) + return NewPool(tabletenv.NewEnv(nil, "PoolTest"), "TestPool", tabletenv.ConnPoolConfig{ + Size: capacity, + IdleTimeout: 10 * time.Second, + }) } diff --git a/go/vt/vttablet/tabletserver/debugenv.go b/go/vt/vttablet/tabletserver/debugenv.go index e229c46cadd..c780a28ed90 100644 --- a/go/vt/vttablet/tabletserver/debugenv.go +++ b/go/vt/vttablet/tabletserver/debugenv.go @@ -125,7 +125,7 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request) case "RowStreamerMaxMySQLReplLagSecs": setInt64Val(func(val int64) { tsv.Config().RowStreamer.MaxMySQLReplLagSecs = val }) case "UnhealthyThreshold": - setDurationVal(func(d time.Duration) { _ = tsv.Config().Healthcheck.UnhealthyThresholdSeconds.Set(d.String()) }) + setDurationVal(func(d time.Duration) { tsv.Config().Healthcheck.UnhealthyThreshold = d }) setDurationVal(tsv.hs.SetUnhealthyThreshold) setDurationVal(tsv.sm.SetUnhealthyThreshold) case "ThrottleMetricThreshold": @@ -145,7 +145,7 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request) vars = addVar(vars, "WarnResultSize", tsv.WarnResultSize) vars = addVar(vars, "RowStreamerMaxInnoDBTrxHistLen", func() int64 { return tsv.Config().RowStreamer.MaxInnoDBTrxHistLen }) vars = addVar(vars, "RowStreamerMaxMySQLReplLagSecs", func() int64 { return tsv.Config().RowStreamer.MaxMySQLReplLagSecs }) - vars = addVar(vars, "UnhealthyThreshold", tsv.Config().Healthcheck.UnhealthyThresholdSeconds.Get) + vars = addVar(vars, "UnhealthyThreshold", func() time.Duration { return tsv.Config().Healthcheck.UnhealthyThreshold }) vars = addVar(vars, "ThrottleMetricThreshold", tsv.ThrottleMetricThreshold) vars = append(vars, envValue{ Name: "Consolidator", diff --git a/go/vt/vttablet/tabletserver/gc/tablegc.go b/go/vt/vttablet/tabletserver/gc/tablegc.go index d8d12611e43..928d102624f 100644 --- a/go/vt/vttablet/tabletserver/gc/tablegc.go +++ b/go/vt/vttablet/tabletserver/gc/tablegc.go @@ -145,8 +145,8 @@ func NewTableGC(env tabletenv.Env, ts *topo.Server, lagThrottler *throttle.Throt env: env, ts: ts, pool: connpool.NewPool(env, "TableGCPool", tabletenv.ConnPoolConfig{ - Size: 2, - IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, + Size: 2, + IdleTimeout: env.Config().OltpReadPool.IdleTimeout, }), purgingTables: map[string]bool{}, diff --git a/go/vt/vttablet/tabletserver/health_streamer.go b/go/vt/vttablet/tabletserver/health_streamer.go index 2e75ccf2a20..88304da4bec 100644 --- a/go/vt/vttablet/tabletserver/health_streamer.go +++ b/go/vt/vttablet/tabletserver/health_streamer.go @@ -98,13 +98,13 @@ func newHealthStreamer(env tabletenv.Env, alias *topodatapb.TabletAlias, engine if env.Config().SignalWhenSchemaChange { // We need one connection for the reloader. pool = connpool.NewPool(env, "", tabletenv.ConnPoolConfig{ - Size: 1, - IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, + Size: 1, + IdleTimeout: env.Config().OltpReadPool.IdleTimeout, }) } hs := &healthStreamer{ stats: env.Stats(), - degradedThreshold: env.Config().Healthcheck.DegradedThresholdSeconds.Get(), + degradedThreshold: env.Config().Healthcheck.DegradedThreshold, clients: make(map[chan *querypb.StreamHealthResponse]struct{}), state: &querypb.StreamHealthResponse{ @@ -122,7 +122,7 @@ func newHealthStreamer(env tabletenv.Env, alias *topodatapb.TabletAlias, engine viewsEnabled: env.Config().EnableViews, se: engine, } - hs.unhealthyThreshold.Store(env.Config().Healthcheck.UnhealthyThresholdSeconds.Get().Nanoseconds()) + hs.unhealthyThreshold.Store(env.Config().Healthcheck.UnhealthyThreshold.Nanoseconds()) return hs } diff --git a/go/vt/vttablet/tabletserver/health_streamer_test.go b/go/vt/vttablet/tabletserver/health_streamer_test.go index b2fbb2db1ea..3220bd3ffe7 100644 --- a/go/vt/vttablet/tabletserver/health_streamer_test.go +++ b/go/vt/vttablet/tabletserver/health_streamer_test.go @@ -216,7 +216,7 @@ func TestReloadSchema(t *testing.T) { defer db.Close() config := newConfig(db) config.SignalWhenSchemaChange = testcase.enableSchemaChange - _ = config.SchemaReloadIntervalSeconds.Set("100ms") + config.SchemaReloadInterval = 100 * time.Millisecond env := tabletenv.NewEnv(config, "ReplTrackerTest") alias := &topodatapb.TabletAlias{ @@ -333,7 +333,7 @@ func TestReloadView(t *testing.T) { defer db.Close() config := newConfig(db) config.SignalWhenSchemaChange = true - _ = config.SchemaReloadIntervalSeconds.Set("100ms") + config.SchemaReloadInterval = 100 * time.Millisecond config.EnableViews = true env := tabletenv.NewEnv(config, "TestReloadView") diff --git a/go/vt/vttablet/tabletserver/query_engine_test.go b/go/vt/vttablet/tabletserver/query_engine_test.go index 7bfac4988f2..f20b5522140 100644 --- a/go/vt/vttablet/tabletserver/query_engine_test.go +++ b/go/vt/vttablet/tabletserver/query_engine_test.go @@ -352,9 +352,9 @@ func TestStatsURL(t *testing.T) { func newTestQueryEngine(idleTimeout time.Duration, strict bool, dbcfgs *dbconfigs.DBConfigs) *QueryEngine { config := tabletenv.NewDefaultConfig() config.DB = dbcfgs - _ = config.OltpReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) - _ = config.OlapReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) - _ = config.TxPool.IdleTimeoutSeconds.Set(idleTimeout.String()) + config.OltpReadPool.IdleTimeout = idleTimeout + config.OlapReadPool.IdleTimeout = idleTimeout + config.TxPool.IdleTimeout = idleTimeout env := tabletenv.NewEnv(config, "TabletServerTest") se := schema.NewEngine(env) qe := NewQueryEngine(env, se) diff --git a/go/vt/vttablet/tabletserver/repltracker/reader.go b/go/vt/vttablet/tabletserver/repltracker/reader.go index fe469bb2e31..694778d1119 100644 --- a/go/vt/vttablet/tabletserver/repltracker/reader.go +++ b/go/vt/vttablet/tabletserver/repltracker/reader.go @@ -71,7 +71,7 @@ func newHeartbeatReader(env tabletenv.Env) *heartbeatReader { return &heartbeatReader{} } - heartbeatInterval := config.ReplicationTracker.HeartbeatIntervalSeconds.Get() + heartbeatInterval := config.ReplicationTracker.HeartbeatInterval return &heartbeatReader{ env: env, enabled: true, @@ -80,8 +80,8 @@ func newHeartbeatReader(env tabletenv.Env) *heartbeatReader { ticks: timer.NewTimer(heartbeatInterval), errorLog: logutil.NewThrottledLogger("HeartbeatReporter", 60*time.Second), pool: connpool.NewPool(env, "HeartbeatReadPool", tabletenv.ConnPoolConfig{ - Size: 1, - IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, + Size: 1, + IdleTimeout: env.Config().OltpReadPool.IdleTimeout, }), } } diff --git a/go/vt/vttablet/tabletserver/repltracker/reader_test.go b/go/vt/vttablet/tabletserver/repltracker/reader_test.go index 54ece70fc1a..60321cb6164 100644 --- a/go/vt/vttablet/tabletserver/repltracker/reader_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/reader_test.go @@ -139,7 +139,7 @@ func TestReaderReadHeartbeatError(t *testing.T) { func newReader(db *fakesqldb.DB, frozenTime *time.Time) *heartbeatReader { config := tabletenv.NewDefaultConfig() config.ReplicationTracker.Mode = tabletenv.Heartbeat - _ = config.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") + config.ReplicationTracker.HeartbeatInterval = time.Second params, _ := db.ConnParams().MysqlParams() cp := *params dbc := dbconfigs.NewTestDBConfigs(cp, cp, "") diff --git a/go/vt/vttablet/tabletserver/repltracker/repltracker.go b/go/vt/vttablet/tabletserver/repltracker/repltracker.go index 5ab44eb774e..6f504b2a445 100644 --- a/go/vt/vttablet/tabletserver/repltracker/repltracker.go +++ b/go/vt/vttablet/tabletserver/repltracker/repltracker.go @@ -66,7 +66,7 @@ type ReplTracker struct { func NewReplTracker(env tabletenv.Env, alias *topodatapb.TabletAlias) *ReplTracker { return &ReplTracker{ mode: env.Config().ReplicationTracker.Mode, - forceHeartbeat: env.Config().ReplicationTracker.HeartbeatOnDemandSeconds.Get() > 0, + forceHeartbeat: env.Config().ReplicationTracker.HeartbeatOnDemand > 0, hw: newHeartbeatWriter(env, alias), hr: newHeartbeatReader(env), poller: &poller{}, diff --git a/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go b/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go index 01912c3f689..ee74ed52ab5 100644 --- a/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go @@ -37,7 +37,7 @@ func TestReplTracker(t *testing.T) { config := tabletenv.NewDefaultConfig() config.ReplicationTracker.Mode = tabletenv.Heartbeat - _ = config.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") + config.ReplicationTracker.HeartbeatInterval = time.Second params, _ := db.ConnParams().MysqlParams() cp := *params config.DB = dbconfigs.NewTestDBConfigs(cp, cp, "") diff --git a/go/vt/vttablet/tabletserver/repltracker/writer.go b/go/vt/vttablet/tabletserver/repltracker/writer.go index 801fcc8cd57..245ef24c41c 100644 --- a/go/vt/vttablet/tabletserver/repltracker/writer.go +++ b/go/vt/vttablet/tabletserver/repltracker/writer.go @@ -72,17 +72,17 @@ func newHeartbeatWriter(env tabletenv.Env, alias *topodatapb.TabletAlias) *heart config := env.Config() // config.EnableLagThrottler is a feature flag for the throttler; if throttler runs, then heartbeat must also run - if config.ReplicationTracker.Mode != tabletenv.Heartbeat && config.ReplicationTracker.HeartbeatOnDemandSeconds.Get() == 0 { + if config.ReplicationTracker.Mode != tabletenv.Heartbeat && config.ReplicationTracker.HeartbeatOnDemand == 0 { return &heartbeatWriter{} } - heartbeatInterval := config.ReplicationTracker.HeartbeatIntervalSeconds.Get() + heartbeatInterval := config.ReplicationTracker.HeartbeatInterval w := &heartbeatWriter{ env: env, enabled: true, tabletAlias: alias.CloneVT(), now: time.Now, interval: heartbeatInterval, - onDemandDuration: config.ReplicationTracker.HeartbeatOnDemandSeconds.Get(), + onDemandDuration: config.ReplicationTracker.HeartbeatOnDemand, ticks: timer.NewTimer(heartbeatInterval), errorLog: logutil.NewThrottledLogger("HeartbeatWriter", 60*time.Second), // We make this pool size 2; to prevent pool exhausted diff --git a/go/vt/vttablet/tabletserver/repltracker/writer_test.go b/go/vt/vttablet/tabletserver/repltracker/writer_test.go index 5044586c0d2..664a0464b78 100644 --- a/go/vt/vttablet/tabletserver/repltracker/writer_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/writer_test.go @@ -66,7 +66,7 @@ func TestWriteHeartbeatError(t *testing.T) { func newTestWriter(db *fakesqldb.DB, frozenTime *time.Time) *heartbeatWriter { config := tabletenv.NewDefaultConfig() config.ReplicationTracker.Mode = tabletenv.Heartbeat - _ = config.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") + config.ReplicationTracker.HeartbeatInterval = time.Second params, _ := db.ConnParams().MysqlParams() cp := *params diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go index f328972ca2e..9f973324302 100644 --- a/go/vt/vttablet/tabletserver/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -100,14 +100,14 @@ type Engine struct { // NewEngine creates a new Engine. func NewEngine(env tabletenv.Env) *Engine { - reloadTime := env.Config().SchemaReloadIntervalSeconds.Get() + reloadTime := env.Config().SchemaReloadInterval se := &Engine{ env: env, // We need three connections: one for the reloader, one for // the historian, and one for the tracker. conns: connpool.NewPool(env, "", tabletenv.ConnPoolConfig{ - Size: 3, - IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, + Size: 3, + IdleTimeout: env.Config().OltpReadPool.IdleTimeout, }), ticks: timer.NewTimer(reloadTime), } diff --git a/go/vt/vttablet/tabletserver/schema/engine_test.go b/go/vt/vttablet/tabletserver/schema/engine_test.go index 408d0a35841..2eb117cba36 100644 --- a/go/vt/vttablet/tabletserver/schema/engine_test.go +++ b/go/vt/vttablet/tabletserver/schema/engine_test.go @@ -576,10 +576,10 @@ func TestSchemaEngineCloseTickRace(t *testing.T) { func newEngine(reloadTime time.Duration, idleTimeout time.Duration, schemaMaxAgeSeconds int64, db *fakesqldb.DB) *Engine { config := tabletenv.NewDefaultConfig() - _ = config.SchemaReloadIntervalSeconds.Set(reloadTime.String()) - _ = config.OltpReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) - _ = config.OlapReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) - _ = config.TxPool.IdleTimeoutSeconds.Set(idleTimeout.String()) + config.SchemaReloadInterval = reloadTime + config.OltpReadPool.IdleTimeout = idleTimeout + config.OlapReadPool.IdleTimeout = idleTimeout + config.TxPool.IdleTimeout = idleTimeout config.SchemaVersionMaxAgeSeconds = schemaMaxAgeSeconds se := NewEngine(tabletenv.NewEnv(config, "SchemaTest")) se.InitDBConfig(newDBConfigs(db).DbaWithDB()) diff --git a/go/vt/vttablet/tabletserver/schema/load_table_test.go b/go/vt/vttablet/tabletserver/schema/load_table_test.go index eeefb688e61..088afac3720 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table_test.go +++ b/go/vt/vttablet/tabletserver/schema/load_table_test.go @@ -230,9 +230,9 @@ func newTestLoadTable(tableType string, comment string, db *fakesqldb.DB) (*Tabl appParams := db.ConnParams() dbaParams := db.ConnParams() cfg := tabletenv.ConnPoolConfig{ - Size: 2, + Size: 2, + IdleTimeout: 10 * time.Second, } - _ = cfg.IdleTimeoutSeconds.Set("10s") connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest"), "", cfg) connPool.Open(appParams, dbaParams, appParams) conn, err := connPool.Get(ctx, nil) diff --git a/go/vt/vttablet/tabletserver/state_manager.go b/go/vt/vttablet/tabletserver/state_manager.go index 98ed846600c..8aa7776957f 100644 --- a/go/vt/vttablet/tabletserver/state_manager.go +++ b/go/vt/vttablet/tabletserver/state_manager.go @@ -197,11 +197,11 @@ func (sm *stateManager) Init(env tabletenv.Env, target *querypb.Target) { sm.target = target.CloneVT() sm.transitioning = semaphore.NewWeighted(1) sm.checkMySQLThrottler = semaphore.NewWeighted(1) - sm.timebombDuration = env.Config().OltpReadPool.TimeoutSeconds.Get() * 10 - sm.hcticks = timer.NewTimer(env.Config().Healthcheck.IntervalSeconds.Get()) - sm.unhealthyThreshold.Store(env.Config().Healthcheck.UnhealthyThresholdSeconds.Get().Nanoseconds()) - sm.shutdownGracePeriod = env.Config().GracePeriods.ShutdownSeconds.Get() - sm.transitionGracePeriod = env.Config().GracePeriods.TransitionSeconds.Get() + sm.timebombDuration = env.Config().OltpReadPool.Timeout * 10 + sm.hcticks = timer.NewTimer(env.Config().Healthcheck.Interval) + sm.unhealthyThreshold.Store(env.Config().Healthcheck.UnhealthyThreshold.Nanoseconds()) + sm.shutdownGracePeriod = env.Config().GracePeriods.Shutdown + sm.transitionGracePeriod = env.Config().GracePeriods.Transition } // SetServingType changes the state to the specified settings. diff --git a/go/vt/vttablet/tabletserver/status.go b/go/vt/vttablet/tabletserver/status.go index f91cc4ad566..b1ebb24bc57 100644 --- a/go/vt/vttablet/tabletserver/status.go +++ b/go/vt/vttablet/tabletserver/status.go @@ -229,8 +229,8 @@ func (tsv *TabletServer) AddStatusHeader() { // AddStatusPart registers the status part for the status page. func (tsv *TabletServer) AddStatusPart() { // Save the threshold values for reporting. - degradedThreshold.Store(tsv.config.Healthcheck.DegradedThresholdSeconds.Get().Nanoseconds()) - unhealthyThreshold.Store(tsv.config.Healthcheck.UnhealthyThresholdSeconds.Get().Nanoseconds()) + degradedThreshold.Store(tsv.config.Healthcheck.DegradedThreshold.Nanoseconds()) + unhealthyThreshold.Store(tsv.config.Healthcheck.UnhealthyThreshold.Nanoseconds()) tsv.exporter.AddStatusPart("Health", queryserviceStatusTemplate, func() any { status := queryserviceStatus{ diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go index ac2629709b9..25352aba91b 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config.go @@ -122,10 +122,8 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.IntVar(¤tConfig.OlapReadPool.Size, "queryserver-config-stream-pool-size", defaultConfig.OlapReadPool.Size, "query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion") fs.IntVar(¤tConfig.TxPool.Size, "queryserver-config-transaction-cap", defaultConfig.TxPool.Size, "query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout)") fs.IntVar(¤tConfig.MessagePostponeParallelism, "queryserver-config-message-postpone-cap", defaultConfig.MessagePostponeParallelism, "query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem.") - currentConfig.Oltp.TxTimeoutSeconds = defaultConfig.Oltp.TxTimeoutSeconds.Clone() - fs.Var(¤tConfig.Oltp.TxTimeoutSeconds, currentConfig.Oltp.TxTimeoutSeconds.Name(), "query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value") - currentConfig.GracePeriods.ShutdownSeconds = flagutil.NewDeprecatedFloat64Seconds(defaultConfig.GracePeriods.ShutdownSeconds.Name(), defaultConfig.GracePeriods.TransitionSeconds.Get()) - fs.Var(¤tConfig.GracePeriods.ShutdownSeconds, currentConfig.GracePeriods.ShutdownSeconds.Name(), "how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.") + fs.DurationVar(¤tConfig.Oltp.TxTimeout, "queryserver-config-transaction-timeout", defaultConfig.Oltp.TxTimeout, "query server transaction timeout, a transaction will be killed if it takes longer than this value") + fs.DurationVar(¤tConfig.GracePeriods.Shutdown, "shutdown_grace_period", defaultConfig.GracePeriods.Shutdown, "how long to wait for queries and transactions to complete during graceful shutdown.") fs.IntVar(¤tConfig.Oltp.MaxRows, "queryserver-config-max-result-size", defaultConfig.Oltp.MaxRows, "query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries.") fs.IntVar(¤tConfig.Oltp.WarnRows, "queryserver-config-warn-result-size", defaultConfig.Oltp.WarnRows, "query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this") fs.BoolVar(¤tConfig.PassthroughDML, "queryserver-config-passthrough-dmls", defaultConfig.PassthroughDML, "query server pass through all dml statements without rewriting") @@ -134,24 +132,16 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.Int64Var(¤tConfig.QueryCacheMemory, "queryserver-config-query-cache-memory", defaultConfig.QueryCacheMemory, "query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") - currentConfig.SchemaReloadIntervalSeconds = defaultConfig.SchemaReloadIntervalSeconds.Clone() - fs.Var(¤tConfig.SchemaReloadIntervalSeconds, currentConfig.SchemaReloadIntervalSeconds.Name(), "query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time.") + fs.DurationVar(¤tConfig.SchemaReloadInterval, "queryserver-config-schema-reload-time", defaultConfig.SchemaReloadInterval, "query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time.") fs.DurationVar(¤tConfig.SchemaChangeReloadTimeout, "schema-change-reload-timeout", defaultConfig.SchemaChangeReloadTimeout, "query server schema change reload timeout, this is how long to wait for the signaled schema reload operation to complete before giving up") fs.BoolVar(¤tConfig.SignalWhenSchemaChange, "queryserver-config-schema-change-signal", defaultConfig.SignalWhenSchemaChange, "query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work") - currentConfig.Olap.TxTimeoutSeconds = defaultConfig.Olap.TxTimeoutSeconds.Clone() - fs.Var(¤tConfig.Olap.TxTimeoutSeconds, defaultConfig.Olap.TxTimeoutSeconds.Name(), "query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed") - currentConfig.Oltp.QueryTimeoutSeconds = defaultConfig.Oltp.QueryTimeoutSeconds.Clone() - fs.Var(¤tConfig.Oltp.QueryTimeoutSeconds, currentConfig.Oltp.QueryTimeoutSeconds.Name(), "query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed.") - currentConfig.OltpReadPool.TimeoutSeconds = defaultConfig.OltpReadPool.TimeoutSeconds.Clone() - fs.Var(¤tConfig.OltpReadPool.TimeoutSeconds, currentConfig.OltpReadPool.TimeoutSeconds.Name(), "query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.") - currentConfig.OlapReadPool.TimeoutSeconds = defaultConfig.OlapReadPool.TimeoutSeconds.Clone() - fs.Var(¤tConfig.OlapReadPool.TimeoutSeconds, currentConfig.OlapReadPool.TimeoutSeconds.Name(), "query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.") - currentConfig.TxPool.TimeoutSeconds = defaultConfig.TxPool.TimeoutSeconds.Clone() - fs.Var(¤tConfig.TxPool.TimeoutSeconds, currentConfig.TxPool.TimeoutSeconds.Name(), "query server transaction pool timeout, it is how long vttablet waits if tx pool is full") - currentConfig.OltpReadPool.IdleTimeoutSeconds = defaultConfig.OltpReadPool.IdleTimeoutSeconds.Clone() - fs.Var(¤tConfig.OltpReadPool.IdleTimeoutSeconds, currentConfig.OltpReadPool.IdleTimeoutSeconds.Name(), "query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance.") - currentConfig.OltpReadPool.MaxLifetimeSeconds = defaultConfig.OltpReadPool.MaxLifetimeSeconds.Clone() - fs.Var(¤tConfig.OltpReadPool.MaxLifetimeSeconds, currentConfig.OltpReadPool.MaxLifetimeSeconds.Name(), "query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool.") + fs.DurationVar(¤tConfig.Olap.TxTimeout, "queryserver-config-olap-transaction-timeout", defaultConfig.Olap.TxTimeout, "query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed") + fs.DurationVar(¤tConfig.Oltp.QueryTimeout, "queryserver-config-query-timeout", defaultConfig.Oltp.QueryTimeout, "query server query timeout, this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed.") + fs.DurationVar(¤tConfig.OltpReadPool.Timeout, "queryserver-config-query-pool-timeout", defaultConfig.OltpReadPool.Timeout, "query server query pool timeout, it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.") + fs.DurationVar(¤tConfig.OlapReadPool.Timeout, "queryserver-config-stream-pool-timeout", defaultConfig.OlapReadPool.Timeout, "query server stream pool timeout, it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.") + fs.DurationVar(¤tConfig.TxPool.Timeout, "queryserver-config-txpool-timeout", defaultConfig.TxPool.Timeout, "query server transaction pool timeout, it is how long vttablet waits if tx pool is full") + fs.DurationVar(¤tConfig.OltpReadPool.IdleTimeout, "queryserver-config-idle-timeout", defaultConfig.OltpReadPool.IdleTimeout, "query server idle timeout, vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance.") + fs.DurationVar(¤tConfig.OltpReadPool.MaxLifetime, "queryserver-config-pool-conn-max-lifetime", defaultConfig.OltpReadPool.MaxLifetime, "query server connection max lifetime, vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool.") fs.IntVar(¤tConfig.OltpReadPool.MaxWaiters, "queryserver-config-query-pool-waiter-cap", defaultConfig.OltpReadPool.MaxWaiters, "query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection") fs.IntVar(¤tConfig.OlapReadPool.MaxWaiters, "queryserver-config-stream-pool-waiter-cap", defaultConfig.OlapReadPool.MaxWaiters, "query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection") fs.IntVar(¤tConfig.TxPool.MaxWaiters, "queryserver-config-txpool-waiter-cap", defaultConfig.TxPool.MaxWaiters, "query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection") @@ -201,13 +191,9 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.Int64Var(¤tConfig.ConsolidatorStreamQuerySize, "consolidator-stream-query-size", defaultConfig.ConsolidatorStreamQuerySize, "Configure the stream consolidator query size in bytes. Setting to 0 disables the stream consolidator.") fs.Int64Var(¤tConfig.ConsolidatorStreamTotalSize, "consolidator-stream-total-size", defaultConfig.ConsolidatorStreamTotalSize, "Configure the stream consolidator total size in bytes. Setting to 0 disables the stream consolidator.") - currentConfig.Healthcheck.IntervalSeconds = flagutil.NewDeprecatedFloat64Seconds(defaultConfig.Healthcheck.IntervalSeconds.Name(), defaultConfig.Healthcheck.IntervalSeconds.Get()) - currentConfig.Healthcheck.DegradedThresholdSeconds = flagutil.NewDeprecatedFloat64Seconds(defaultConfig.Healthcheck.DegradedThresholdSeconds.Name(), defaultConfig.Healthcheck.DegradedThresholdSeconds.Get()) - currentConfig.Healthcheck.UnhealthyThresholdSeconds = flagutil.NewDeprecatedFloat64Seconds(defaultConfig.Healthcheck.UnhealthyThresholdSeconds.Name(), defaultConfig.Healthcheck.UnhealthyThresholdSeconds.Get()) - - fs.DurationVar(&healthCheckInterval, currentConfig.Healthcheck.IntervalSeconds.Name(), currentConfig.Healthcheck.IntervalSeconds.Get(), "Interval between health checks") - fs.DurationVar(°radedThreshold, currentConfig.Healthcheck.DegradedThresholdSeconds.Name(), currentConfig.Healthcheck.DegradedThresholdSeconds.Get(), "replication lag after which a replica is considered degraded") - fs.DurationVar(&unhealthyThreshold, currentConfig.Healthcheck.UnhealthyThresholdSeconds.Name(), currentConfig.Healthcheck.UnhealthyThresholdSeconds.Get(), "replication lag after which a replica is considered unhealthy") + fs.DurationVar(&healthCheckInterval, "health_check_interval", defaultConfig.Healthcheck.Interval, "Interval between health checks") + fs.DurationVar(°radedThreshold, "degraded_threshold", defaultConfig.Healthcheck.DegradedThreshold, "replication lag after which a replica is considered degraded") + fs.DurationVar(&unhealthyThreshold, "unhealthy_threshold", defaultConfig.Healthcheck.UnhealthyThreshold, "replication lag after which a replica is considered unhealthy") fs.DurationVar(&transitionGracePeriod, "serving_state_grace_period", 0, "how long to pause after broadcasting health to vtgate, before enforcing a new serving state") fs.BoolVar(&enableReplicationReporter, "enable_replication_reporter", false, "Use polling to track replication lag.") @@ -232,10 +218,10 @@ var ( func Init() { // IdleTimeout is only initialized for OltpReadPool , but the other pools need to inherit the value. // TODO(sougou): Make a decision on whether this should be global or per-pool. - _ = currentConfig.OlapReadPool.IdleTimeoutSeconds.Set(currentConfig.OltpReadPool.IdleTimeoutSeconds.Get().String()) - _ = currentConfig.TxPool.IdleTimeoutSeconds.Set(currentConfig.OltpReadPool.IdleTimeoutSeconds.Get().String()) - _ = currentConfig.OlapReadPool.MaxLifetimeSeconds.Set(currentConfig.OltpReadPool.MaxLifetimeSeconds.Get().String()) - _ = currentConfig.TxPool.MaxLifetimeSeconds.Set(currentConfig.OltpReadPool.MaxLifetimeSeconds.Get().String()) + currentConfig.OlapReadPool.IdleTimeout = currentConfig.OltpReadPool.IdleTimeout + currentConfig.TxPool.IdleTimeout = currentConfig.OltpReadPool.IdleTimeout + currentConfig.OlapReadPool.MaxLifetime = currentConfig.OltpReadPool.MaxLifetime + currentConfig.TxPool.MaxLifetime = currentConfig.OltpReadPool.MaxLifetime if enableHotRowProtection { if enableHotRowProtectionDryRun { @@ -257,7 +243,7 @@ func Init() { } if heartbeatInterval == 0 { - heartbeatInterval = defaultConfig.ReplicationTracker.HeartbeatIntervalSeconds.Get() + heartbeatInterval = defaultConfig.ReplicationTracker.HeartbeatInterval } if heartbeatInterval > time.Second { heartbeatInterval = time.Second @@ -265,8 +251,8 @@ func Init() { if heartbeatOnDemandDuration < 0 { heartbeatOnDemandDuration = 0 } - _ = currentConfig.ReplicationTracker.HeartbeatIntervalSeconds.Set(heartbeatInterval.String()) - _ = currentConfig.ReplicationTracker.HeartbeatOnDemandSeconds.Set(heartbeatOnDemandDuration.String()) + currentConfig.ReplicationTracker.HeartbeatInterval = heartbeatInterval + currentConfig.ReplicationTracker.HeartbeatOnDemand = heartbeatOnDemandDuration switch { case enableHeartbeat: @@ -277,10 +263,10 @@ func Init() { currentConfig.ReplicationTracker.Mode = Disable } - _ = currentConfig.Healthcheck.IntervalSeconds.Set(healthCheckInterval.String()) - _ = currentConfig.Healthcheck.DegradedThresholdSeconds.Set(degradedThreshold.String()) - _ = currentConfig.Healthcheck.UnhealthyThresholdSeconds.Set(unhealthyThreshold.String()) - _ = currentConfig.GracePeriods.TransitionSeconds.Set(transitionGracePeriod.String()) + currentConfig.Healthcheck.Interval = healthCheckInterval + currentConfig.Healthcheck.DegradedThreshold = degradedThreshold + currentConfig.Healthcheck.UnhealthyThreshold = unhealthyThreshold + currentConfig.GracePeriods.Transition = transitionGracePeriod switch streamlog.GetQueryLogFormat() { case streamlog.QueryLogFormatText: @@ -320,24 +306,24 @@ type TabletConfig struct { ReplicationTracker ReplicationTrackerConfig `json:"replicationTracker,omitempty"` // Consolidator can be enable, disable, or notOnPrimary. Default is enable. - Consolidator string `json:"consolidator,omitempty"` - PassthroughDML bool `json:"passthroughDML,omitempty"` - StreamBufferSize int `json:"streamBufferSize,omitempty"` - ConsolidatorStreamTotalSize int64 `json:"consolidatorStreamTotalSize,omitempty"` - ConsolidatorStreamQuerySize int64 `json:"consolidatorStreamQuerySize,omitempty"` - QueryCacheMemory int64 `json:"queryCacheMemory,omitempty"` - QueryCacheDoorkeeper bool `json:"queryCacheDoorkeeper,omitempty"` - SchemaReloadIntervalSeconds flagutil.DeprecatedFloat64Seconds `json:"schemaReloadIntervalSeconds,omitempty"` - SignalSchemaChangeReloadIntervalSeconds flagutil.DeprecatedFloat64Seconds `json:"signalSchemaChangeReloadIntervalSeconds,omitempty"` - SchemaChangeReloadTimeout time.Duration `json:"schemaChangeReloadTimeout,omitempty"` - WatchReplication bool `json:"watchReplication,omitempty"` - TrackSchemaVersions bool `json:"trackSchemaVersions,omitempty"` - SchemaVersionMaxAgeSeconds int64 `json:"schemaVersionMaxAgeSeconds,omitempty"` - TerseErrors bool `json:"terseErrors,omitempty"` - TruncateErrorLen int `json:"truncateErrorLen,omitempty"` - AnnotateQueries bool `json:"annotateQueries,omitempty"` - MessagePostponeParallelism int `json:"messagePostponeParallelism,omitempty"` - SignalWhenSchemaChange bool `json:"signalWhenSchemaChange,omitempty"` + Consolidator string `json:"consolidator,omitempty"` + PassthroughDML bool `json:"passthroughDML,omitempty"` + StreamBufferSize int `json:"streamBufferSize,omitempty"` + ConsolidatorStreamTotalSize int64 `json:"consolidatorStreamTotalSize,omitempty"` + ConsolidatorStreamQuerySize int64 `json:"consolidatorStreamQuerySize,omitempty"` + QueryCacheMemory int64 `json:"queryCacheMemory,omitempty"` + QueryCacheDoorkeeper bool `json:"queryCacheDoorkeeper,omitempty"` + SchemaReloadInterval time.Duration `json:"schemaReloadIntervalSeconds,omitempty"` + SignalSchemaChangeReloadInterval time.Duration `json:"signalSchemaChangeReloadIntervalSeconds,omitempty"` + SchemaChangeReloadTimeout time.Duration `json:"schemaChangeReloadTimeout,omitempty"` + WatchReplication bool `json:"watchReplication,omitempty"` + TrackSchemaVersions bool `json:"trackSchemaVersions,omitempty"` + SchemaVersionMaxAgeSeconds int64 `json:"schemaVersionMaxAgeSeconds,omitempty"` + TerseErrors bool `json:"terseErrors,omitempty"` + TruncateErrorLen int `json:"truncateErrorLen,omitempty"` + AnnotateQueries bool `json:"annotateQueries,omitempty"` + MessagePostponeParallelism int `json:"messagePostponeParallelism,omitempty"` + SignalWhenSchemaChange bool `json:"signalWhenSchemaChange,omitempty"` ExternalConnections map[string]*dbconfigs.DBConfigs `json:"externalConnections,omitempty"` @@ -377,15 +363,19 @@ func (cfg *TabletConfig) MarshalJSON() ([]byte, error) { tmp := struct { TCProxy - SchemaReloadIntervalSeconds string `json:"schemaReloadIntervalSeconds,omitempty"` - SignalSchemaChangeReloadIntervalSeconds string `json:"signalSchemaChangeReloadIntervalSeconds,omitempty"` - SchemaChangeReloadTimeout string `json:"schemaChangeReloadTimeout,omitempty"` + SchemaReloadInterval string `json:"schemaReloadIntervalSeconds,omitempty"` + SignalSchemaChangeReloadInterval string `json:"signalSchemaChangeReloadIntervalSeconds,omitempty"` + SchemaChangeReloadTimeout string `json:"schemaChangeReloadTimeout,omitempty"` }{ TCProxy: TCProxy(*cfg), } - if d := cfg.SchemaReloadIntervalSeconds.Get(); d != 0 { - tmp.SchemaReloadIntervalSeconds = d.String() + if d := cfg.SchemaReloadInterval; d != 0 { + tmp.SchemaReloadInterval = d.String() + } + + if d := cfg.SignalSchemaChangeReloadInterval; d != 0 { + tmp.SignalSchemaChangeReloadInterval = d.String() } if d := cfg.SchemaChangeReloadTimeout; d != 0 { @@ -395,14 +385,62 @@ func (cfg *TabletConfig) MarshalJSON() ([]byte, error) { return json.Marshal(&tmp) } +func (cfg *TabletConfig) UnmarshalJSON(data []byte) (err error) { + type TCProxy TabletConfig + + var tmp struct { + TCProxy + SchemaReloadInterval string `json:"schemaReloadIntervalSeconds,omitempty"` + SignalSchemaChangeReloadInterval string `json:"signalSchemaChangeReloadIntervalSeconds,omitempty"` + SchemaChangeReloadTimeout string `json:"schemaChangeReloadTimeout,omitempty"` + } + + tmp.TCProxy = TCProxy(*cfg) + + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + + *cfg = TabletConfig(tmp.TCProxy) + + if tmp.SchemaReloadInterval != "" { + cfg.SchemaReloadInterval, err = time.ParseDuration(tmp.SchemaReloadInterval) + if err != nil { + return err + } + } else { + cfg.SchemaReloadInterval = 0 + } + + if tmp.SignalSchemaChangeReloadInterval != "" { + cfg.SignalSchemaChangeReloadInterval, err = time.ParseDuration(tmp.SignalSchemaChangeReloadInterval) + if err != nil { + return err + } + } else { + cfg.SignalSchemaChangeReloadInterval = 0 + } + + if tmp.SchemaChangeReloadTimeout != "" { + cfg.SchemaChangeReloadTimeout, err = time.ParseDuration(tmp.SchemaChangeReloadTimeout) + if err != nil { + return err + } + } else { + cfg.SchemaChangeReloadTimeout = 0 + } + + return nil +} + // ConnPoolConfig contains the config for a conn pool. type ConnPoolConfig struct { - Size int `json:"size,omitempty"` - TimeoutSeconds flagutil.DeprecatedFloat64Seconds `json:"timeoutSeconds,omitempty"` - IdleTimeoutSeconds flagutil.DeprecatedFloat64Seconds `json:"idleTimeoutSeconds,omitempty"` - MaxLifetimeSeconds flagutil.DeprecatedFloat64Seconds `json:"maxLifetimeSeconds,omitempty"` - PrefillParallelism int `json:"prefillParallelism,omitempty"` - MaxWaiters int `json:"maxWaiters,omitempty"` + Size int `json:"size,omitempty"` + Timeout time.Duration `json:"timeoutSeconds,omitempty"` + IdleTimeout time.Duration `json:"idleTimeoutSeconds,omitempty"` + MaxLifetime time.Duration `json:"maxLifetimeSeconds,omitempty"` + PrefillParallelism int `json:"prefillParallelism,omitempty"` + MaxWaiters int `json:"maxWaiters,omitempty"` } func (cfg *ConnPoolConfig) MarshalJSON() ([]byte, error) { @@ -410,31 +448,73 @@ func (cfg *ConnPoolConfig) MarshalJSON() ([]byte, error) { tmp := struct { Proxy - TimeoutSeconds string `json:"timeoutSeconds,omitempty"` - IdleTimeoutSeconds string `json:"idleTimeoutSeconds,omitempty"` - MaxLifetimeSeconds string `json:"maxLifetimeSeconds,omitempty"` + Timeout string `json:"timeoutSeconds,omitempty"` + IdleTimeout string `json:"idleTimeoutSeconds,omitempty"` + MaxLifetime string `json:"maxLifetimeSeconds,omitempty"` }{ Proxy: Proxy(*cfg), } - if d := cfg.TimeoutSeconds.Get(); d != 0 { - tmp.TimeoutSeconds = d.String() + if d := cfg.Timeout; d != 0 { + tmp.Timeout = d.String() } - if d := cfg.IdleTimeoutSeconds.Get(); d != 0 { - tmp.IdleTimeoutSeconds = d.String() + if d := cfg.IdleTimeout; d != 0 { + tmp.IdleTimeout = d.String() } - if d := cfg.MaxLifetimeSeconds.Get(); d != 0 { - tmp.MaxLifetimeSeconds = d.String() + if d := cfg.MaxLifetime; d != 0 { + tmp.MaxLifetime = d.String() } return json.Marshal(&tmp) } +func (cfg *ConnPoolConfig) UnmarshalJSON(data []byte) (err error) { + var tmp struct { + Size int `json:"size,omitempty"` + Timeout string `json:"timeoutSeconds,omitempty"` + IdleTimeout string `json:"idleTimeoutSeconds,omitempty"` + MaxLifetime string `json:"maxLifetimeSeconds,omitempty"` + PrefillParallelism int `json:"prefillParallelism,omitempty"` + MaxWaiters int `json:"maxWaiters,omitempty"` + } + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.Timeout != "" { + cfg.Timeout, err = time.ParseDuration(tmp.Timeout) + if err != nil { + return err + } + } + + if tmp.IdleTimeout != "" { + cfg.IdleTimeout, err = time.ParseDuration(tmp.IdleTimeout) + if err != nil { + return err + } + } + + if tmp.MaxLifetime != "" { + cfg.MaxLifetime, err = time.ParseDuration(tmp.MaxLifetime) + if err != nil { + return err + } + } + + cfg.Size = tmp.Size + cfg.PrefillParallelism = tmp.PrefillParallelism + cfg.MaxWaiters = tmp.MaxWaiters + + return nil +} + // OlapConfig contains the config for olap settings. type OlapConfig struct { - TxTimeoutSeconds flagutil.DeprecatedFloat64Seconds `json:"txTimeoutSeconds,omitempty"` + TxTimeout time.Duration `json:"txTimeoutSeconds,omitempty"` } func (cfg *OlapConfig) MarshalJSON() ([]byte, error) { @@ -447,19 +527,38 @@ func (cfg *OlapConfig) MarshalJSON() ([]byte, error) { Proxy: Proxy(*cfg), } - if d := cfg.TxTimeoutSeconds.Get(); d != 0 { + if d := cfg.TxTimeout; d != 0 { tmp.TxTimeoutSeconds = d.String() } return json.Marshal(&tmp) } +func (cfg *OlapConfig) UnmarshalJSON(data []byte) (err error) { + var tmp struct { + TxTimeout string `json:"txTimeoutSeconds,omitempty"` + } + + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.TxTimeout != "" { + cfg.TxTimeout, err = time.ParseDuration(tmp.TxTimeout) + if err != nil { + return err + } + } + + return nil +} + // OltpConfig contains the config for oltp settings. type OltpConfig struct { - QueryTimeoutSeconds flagutil.DeprecatedFloat64Seconds `json:"queryTimeoutSeconds,omitempty"` - TxTimeoutSeconds flagutil.DeprecatedFloat64Seconds `json:"txTimeoutSeconds,omitempty"` - MaxRows int `json:"maxRows,omitempty"` - WarnRows int `json:"warnRows,omitempty"` + QueryTimeout time.Duration `json:"queryTimeoutSeconds,omitempty"` + TxTimeout time.Duration `json:"txTimeoutSeconds,omitempty"` + MaxRows int `json:"maxRows,omitempty"` + WarnRows int `json:"warnRows,omitempty"` } func (cfg *OltpConfig) MarshalJSON() ([]byte, error) { @@ -467,23 +566,51 @@ func (cfg *OltpConfig) MarshalJSON() ([]byte, error) { tmp := struct { Proxy - QueryTimeoutSeconds string `json:"queryTimeoutSeconds,omitempty"` - TxTimeoutSeconds string `json:"txTimeoutSeconds,omitempty"` + QueryTimeout string `json:"queryTimeoutSeconds,omitempty"` + TxTimeout string `json:"txTimeoutSeconds,omitempty"` }{ Proxy: Proxy(*cfg), } - if d := cfg.QueryTimeoutSeconds.Get(); d != 0 { - tmp.QueryTimeoutSeconds = d.String() + if d := cfg.QueryTimeout; d != 0 { + tmp.QueryTimeout = d.String() } - if d := cfg.TxTimeoutSeconds.Get(); d != 0 { - tmp.TxTimeoutSeconds = d.String() + if d := cfg.TxTimeout; d != 0 { + tmp.TxTimeout = d.String() } return json.Marshal(&tmp) } +func (cfg *OltpConfig) UnmarshalJSON(data []byte) (err error) { + var tmp struct { + OltpConfig + QueryTimeout string `json:"queryTimeoutSeconds,omitempty"` + TxTimeout string `json:"txTimeoutSeconds,omitempty"` + } + + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.QueryTimeout != "" { + cfg.QueryTimeout, err = time.ParseDuration(tmp.QueryTimeout) + if err != nil { + return err + } + } + + if tmp.TxTimeout != "" { + cfg.TxTimeout, err = time.ParseDuration(tmp.TxTimeout) + if err != nil { + return err + } + } + + return nil +} + // HotRowProtectionConfig contains the config for hot row protection. type HotRowProtectionConfig struct { // Mode can be disable, dryRun or enable. Default is disable. @@ -495,97 +622,177 @@ type HotRowProtectionConfig struct { // HealthcheckConfig contains the config for healthcheck. type HealthcheckConfig struct { - IntervalSeconds flagutil.DeprecatedFloat64Seconds `json:"intervalSeconds,omitempty"` - DegradedThresholdSeconds flagutil.DeprecatedFloat64Seconds `json:"degradedThresholdSeconds,omitempty"` - UnhealthyThresholdSeconds flagutil.DeprecatedFloat64Seconds `json:"unhealthyThresholdSeconds,omitempty"` + Interval time.Duration + DegradedThreshold time.Duration + UnhealthyThreshold time.Duration } func (cfg *HealthcheckConfig) MarshalJSON() ([]byte, error) { - type Proxy HealthcheckConfig - - tmp := struct { - Proxy + var tmp struct { IntervalSeconds string `json:"intervalSeconds,omitempty"` DegradedThresholdSeconds string `json:"degradedThresholdSeconds,omitempty"` UnhealthyThresholdSeconds string `json:"unhealthyThresholdSeconds,omitempty"` - }{ - Proxy: Proxy(*cfg), } - if d := cfg.IntervalSeconds.Get(); d != 0 { + if d := cfg.Interval; d != 0 { tmp.IntervalSeconds = d.String() } - if d := cfg.DegradedThresholdSeconds.Get(); d != 0 { + if d := cfg.DegradedThreshold; d != 0 { tmp.DegradedThresholdSeconds = d.String() } - if d := cfg.UnhealthyThresholdSeconds.Get(); d != 0 { + if d := cfg.UnhealthyThreshold; d != 0 { tmp.UnhealthyThresholdSeconds = d.String() } return json.Marshal(&tmp) } +func (cfg *HealthcheckConfig) UnmarshalJSON(data []byte) (err error) { + var tmp struct { + Interval string `json:"intervalSeconds,omitempty"` + DegradedThreshold string `json:"degradedThresholdSeconds,omitempty"` + UnhealthyThreshold string `json:"unhealthyThresholdSeconds,omitempty"` + } + + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.Interval != "" { + cfg.Interval, err = time.ParseDuration(tmp.Interval) + if err != nil { + return err + } + } + + if tmp.DegradedThreshold != "" { + cfg.DegradedThreshold, err = time.ParseDuration(tmp.DegradedThreshold) + if err != nil { + return err + } + } + + if tmp.UnhealthyThreshold != "" { + cfg.UnhealthyThreshold, err = time.ParseDuration(tmp.UnhealthyThreshold) + if err != nil { + return err + } + } + + return nil +} + // GracePeriodsConfig contains various grace periods. // TODO(sougou): move lameduck here? type GracePeriodsConfig struct { - ShutdownSeconds flagutil.DeprecatedFloat64Seconds `json:"shutdownSeconds,omitempty"` - TransitionSeconds flagutil.DeprecatedFloat64Seconds `json:"transitionSeconds,omitempty"` + Shutdown time.Duration + Transition time.Duration } func (cfg *GracePeriodsConfig) MarshalJSON() ([]byte, error) { - type Proxy GracePeriodsConfig - - tmp := struct { - Proxy + var tmp struct { ShutdownSeconds string `json:"shutdownSeconds,omitempty"` TransitionSeconds string `json:"transitionSeconds,omitempty"` - }{ - Proxy: Proxy(*cfg), } - if d := cfg.ShutdownSeconds.Get(); d != 0 { + if d := cfg.Shutdown; d != 0 { tmp.ShutdownSeconds = d.String() } - if d := cfg.TransitionSeconds.Get(); d != 0 { + if d := cfg.Transition; d != 0 { tmp.TransitionSeconds = d.String() } return json.Marshal(&tmp) } +func (cfg *GracePeriodsConfig) UnmarshalJSON(data []byte) (err error) { + var tmp struct { + Shutdown string `json:"shutdownSeconds,omitempty"` + Transition string `json:"transitionSeconds,omitempty"` + } + + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.Shutdown != "" { + cfg.Shutdown, err = time.ParseDuration(tmp.Shutdown) + if err != nil { + return err + } + } + + if tmp.Transition != "" { + cfg.Transition, err = time.ParseDuration(tmp.Transition) + if err != nil { + return err + } + } + + return nil +} + // ReplicationTrackerConfig contains the config for the replication tracker. type ReplicationTrackerConfig struct { // Mode can be disable, polling or heartbeat. Default is disable. - Mode string `json:"mode,omitempty"` - HeartbeatIntervalSeconds flagutil.DeprecatedFloat64Seconds `json:"heartbeatIntervalSeconds,omitempty"` - HeartbeatOnDemandSeconds flagutil.DeprecatedFloat64Seconds `json:"heartbeatOnDemandSeconds,omitempty"` + Mode string `json:"mode,omitempty"` + HeartbeatInterval time.Duration + HeartbeatOnDemand time.Duration } func (cfg *ReplicationTrackerConfig) MarshalJSON() ([]byte, error) { - type Proxy ReplicationTrackerConfig - tmp := struct { - Proxy + Mode string `json:"mode,omitempty"` HeartbeatIntervalSeconds string `json:"heartbeatIntervalSeconds,omitempty"` HeartbeatOnDemandSeconds string `json:"heartbeatOnDemandSeconds,omitempty"` }{ - Proxy: Proxy(*cfg), + Mode: cfg.Mode, } - if d := cfg.HeartbeatIntervalSeconds.Get(); d != 0 { + if d := cfg.HeartbeatInterval; d != 0 { tmp.HeartbeatIntervalSeconds = d.String() } - if d := cfg.HeartbeatOnDemandSeconds.Get(); d != 0 { + if d := cfg.HeartbeatOnDemand; d != 0 { tmp.HeartbeatOnDemandSeconds = d.String() } return json.Marshal(&tmp) } +func (cfg *ReplicationTrackerConfig) UnmarshalJSON(data []byte) (err error) { + var tmp struct { + Mode string `json:"mode,omitempty"` + HeartbeatInterval string `json:"heartbeatIntervalSeconds,omitempty"` + HeartbeatOnDemand string `json:"heartbeatOnDemandSeconds,omitempty"` + } + + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.HeartbeatInterval != "" { + cfg.HeartbeatInterval, err = time.ParseDuration(tmp.HeartbeatInterval) + if err != nil { + return err + } + } + + if tmp.HeartbeatOnDemand != "" { + cfg.HeartbeatOnDemand, err = time.ParseDuration(tmp.HeartbeatOnDemand) + if err != nil { + return err + } + } + + cfg.Mode = tmp.Mode + + return nil +} + // TransactionLimitConfig captures configuration of transaction pool slots // limiter configuration. type TransactionLimitConfig struct { @@ -628,9 +835,9 @@ func (c *TabletConfig) Clone() *TabletConfig { func (c *TabletConfig) SetTxTimeoutForWorkload(val time.Duration, workload querypb.ExecuteOptions_Workload) { switch workload { case querypb.ExecuteOptions_OLAP: - _ = c.Olap.TxTimeoutSeconds.Set(val.String()) + c.Olap.TxTimeout = val case querypb.ExecuteOptions_OLTP: - _ = c.Oltp.TxTimeoutSeconds.Set(val.String()) + c.Oltp.TxTimeout = val default: panic(fmt.Sprintf("unsupported workload type: %v", workload)) } @@ -643,9 +850,9 @@ func (c *TabletConfig) TxTimeoutForWorkload(workload querypb.ExecuteOptions_Work case querypb.ExecuteOptions_DBA: return 0 case querypb.ExecuteOptions_OLAP: - return c.Olap.TxTimeoutSeconds.Get() + return c.Olap.TxTimeout default: - return c.Oltp.TxTimeoutSeconds.Get() + return c.Oltp.TxTimeout } } @@ -736,54 +943,36 @@ func (c *TabletConfig) verifyTxThrottlerConfig() error { // They actually get overwritten during Init. var defaultConfig = TabletConfig{ OltpReadPool: ConnPoolConfig{ - Size: 16, - // TODO (ajm188): remove the zero-value ones after these are durations. - // See the comment below in GracePeriodsConfig as to why they are needed - // for now. - TimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-query-pool-timeout", 0), - IdleTimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-idle-timeout", 30*time.Minute), - MaxLifetimeSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-pool-conn-max-lifetime", 0), - MaxWaiters: 5000, + Size: 16, + IdleTimeout: 30 * time.Minute, + MaxWaiters: 5000, }, OlapReadPool: ConnPoolConfig{ - Size: 200, - // TODO (ajm188): remove the zero-value ones after these are durations. - // See the comment below in GracePeriodsConfig as to why they are needed - // for now. - TimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-stream-pool-timeout", 0), - IdleTimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-stream-pool-idle-timeout", 30*time.Minute), + Size: 200, + IdleTimeout: 30 * time.Minute, }, TxPool: ConnPoolConfig{ - Size: 20, - TimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-txpool-timeout", time.Second), - // No actual flag for this one, but has non-zero value - IdleTimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-txpool-idle-timeout", 30*time.Minute), - MaxWaiters: 5000, + Size: 20, + Timeout: time.Second, + IdleTimeout: 30 * time.Minute, + MaxWaiters: 5000, }, Olap: OlapConfig{ - TxTimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-olap-transaction-timeout", 30*time.Second), + TxTimeout: 30 * time.Second, }, Oltp: OltpConfig{ - QueryTimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-query-timeout", 30*time.Second), - TxTimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-transaction-timeout", 30*time.Second), - MaxRows: 10000, + QueryTimeout: 30 * time.Second, + TxTimeout: 30 * time.Second, + MaxRows: 10000, }, Healthcheck: HealthcheckConfig{ - IntervalSeconds: flagutil.NewDeprecatedFloat64Seconds("health_check_interval", 20*time.Second), - DegradedThresholdSeconds: flagutil.NewDeprecatedFloat64Seconds("degraded_threshold", 30*time.Second), - UnhealthyThresholdSeconds: flagutil.NewDeprecatedFloat64Seconds("unhealthy_threshold", 2*time.Hour), - }, - GracePeriods: GracePeriodsConfig{ - // TODO (ajm188) remove after these are durations. it's not necessary - // for production code because it's the zero value, but it's required - // for tests to pass (which require the name field to be present for - // deep equality). - ShutdownSeconds: flagutil.NewDeprecatedFloat64Seconds("shutdown_grace_period", 0), + Interval: 20 * time.Second, + DegradedThreshold: 30 * time.Second, + UnhealthyThreshold: 2 * time.Hour, }, ReplicationTracker: ReplicationTrackerConfig{ - Mode: Disable, - HeartbeatIntervalSeconds: flagutil.NewDeprecatedFloat64Seconds("heartbeat_interval", 250*time.Millisecond), - HeartbeatOnDemandSeconds: flagutil.NewDeprecatedFloat64Seconds("heartbeat_on_demand_duration", 0), + Mode: Disable, + HeartbeatInterval: 250 * time.Millisecond, }, HotRowProtection: HotRowProtectionConfig{ Mode: Disable, @@ -807,8 +996,8 @@ var defaultConfig = TabletConfig{ QueryCacheMemory: 32 * 1024 * 1024, // 32 mb for our query cache // The doorkeeper for the plan cache is disabled by default in endtoend tests to ensure // results are consistent between runs. - QueryCacheDoorkeeper: !servenv.TestingEndtoend, - SchemaReloadIntervalSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-schema-reload-time", 30*time.Minute), + QueryCacheDoorkeeper: !servenv.TestingEndtoend, + SchemaReloadInterval: 30 * time.Minute, // SchemaChangeReloadTimeout is used for the signal reload operation where we have to query mysqld. // The queries during the signal reload operation are typically expected to have low load, // but in busy systems with many tables, some queries may take longer than anticipated. diff --git a/go/vt/vttablet/tabletserver/tabletenv/config_test.go b/go/vt/vttablet/tabletserver/tabletenv/config_test.go index e472cbb4789..1cf1559ba6e 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config_test.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config_test.go @@ -47,8 +47,11 @@ func TestConfigParse(t *testing.T) { }, }, OltpReadPool: ConnPoolConfig{ - Size: 16, - MaxWaiters: 40, + Size: 16, + MaxWaiters: 40, + Timeout: 10 * time.Second, + IdleTimeout: 20 * time.Second, + MaxLifetime: 50 * time.Second, }, RowStreamer: RowStreamerConfig{ MaxInnoDBTrxHistLen: 1000, @@ -56,10 +59,6 @@ func TestConfigParse(t *testing.T) { }, } - _ = cfg.OltpReadPool.TimeoutSeconds.Set("10s") - _ = cfg.OltpReadPool.IdleTimeoutSeconds.Set("20s") - _ = cfg.OltpReadPool.MaxLifetimeSeconds.Set("50s") - gotBytes, err := yaml2.Marshal(&cfg) require.NoError(t, err) wantBytes := `db: @@ -109,9 +108,9 @@ txPool: {} user: c oltpReadPool: size: 16 - idleTimeoutSeconds: 20 + idleTimeoutSeconds: 20s maxWaiters: 40 - maxLifetimeSeconds: 50 + maxLifetimeSeconds: 50s `) gotCfg := cfg gotCfg.DB = cfg.DB.Clone() @@ -178,17 +177,17 @@ func TestClone(t *testing.T) { cfg1 := &TabletConfig{ OltpReadPool: ConnPoolConfig{ - Size: 16, - MaxWaiters: 40, + Size: 16, + MaxWaiters: 40, + Timeout: 10 * time.Second, + IdleTimeout: 20 * time.Second, + MaxLifetime: 50 * time.Second, }, RowStreamer: RowStreamerConfig{ MaxInnoDBTrxHistLen: 1000000, MaxMySQLReplLagSecs: 43200, }, } - _ = cfg1.OltpReadPool.TimeoutSeconds.Set("10s") - _ = cfg1.OltpReadPool.IdleTimeoutSeconds.Set("20s") - _ = cfg1.OltpReadPool.MaxLifetimeSeconds.Set("50s") cfg2 := cfg1.Clone() assert.Equal(t, cfg1, cfg2) @@ -206,14 +205,14 @@ func TestFlags(t *testing.T) { // Simple Init. Init() - _ = want.OlapReadPool.IdleTimeoutSeconds.Set("30m") - _ = want.TxPool.IdleTimeoutSeconds.Set("30m") + want.OlapReadPool.IdleTimeout = 30 * time.Minute + want.TxPool.IdleTimeout = 30 * time.Minute want.HotRowProtection.Mode = Disable want.Consolidator = Enable - _ = want.Healthcheck.IntervalSeconds.Set("20s") - _ = want.Healthcheck.DegradedThresholdSeconds.Set("30s") - _ = want.Healthcheck.UnhealthyThresholdSeconds.Set("2h") - _ = want.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") + want.Healthcheck.Interval = 20 * time.Second + want.Healthcheck.DegradedThreshold = 30 * time.Second + want.Healthcheck.UnhealthyThreshold = 2 * time.Hour + want.ReplicationTracker.HeartbeatInterval = time.Second want.ReplicationTracker.Mode = Disable assert.Equal(t, want.DB, currentConfig.DB) assert.Equal(t, want, currentConfig) @@ -269,52 +268,52 @@ func TestFlags(t *testing.T) { enableHeartbeat = true heartbeatInterval = 1 * time.Second currentConfig.ReplicationTracker.Mode = "" - currentConfig.ReplicationTracker.HeartbeatIntervalSeconds.Set("0s") + currentConfig.ReplicationTracker.HeartbeatInterval = 0 Init() want.ReplicationTracker.Mode = Heartbeat - want.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") + want.ReplicationTracker.HeartbeatInterval = time.Second assert.Equal(t, want, currentConfig) enableHeartbeat = false heartbeatInterval = 1 * time.Second currentConfig.ReplicationTracker.Mode = "" - currentConfig.ReplicationTracker.HeartbeatIntervalSeconds.Set("0s") + currentConfig.ReplicationTracker.HeartbeatInterval = 0 Init() want.ReplicationTracker.Mode = Disable - want.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") + want.ReplicationTracker.HeartbeatInterval = time.Second assert.Equal(t, want, currentConfig) enableReplicationReporter = true heartbeatInterval = 1 * time.Second currentConfig.ReplicationTracker.Mode = "" - currentConfig.ReplicationTracker.HeartbeatIntervalSeconds.Set("0s") + currentConfig.ReplicationTracker.HeartbeatInterval = 0 Init() want.ReplicationTracker.Mode = Polling - want.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") + want.ReplicationTracker.HeartbeatInterval = time.Second assert.Equal(t, want, currentConfig) - healthCheckInterval = 1 * time.Second - currentConfig.Healthcheck.IntervalSeconds.Set("0s") + healthCheckInterval = time.Second + currentConfig.Healthcheck.Interval = 0 Init() - want.Healthcheck.IntervalSeconds.Set("1s") + want.Healthcheck.Interval = time.Second assert.Equal(t, want, currentConfig) degradedThreshold = 2 * time.Second - currentConfig.Healthcheck.DegradedThresholdSeconds.Set("0s") + currentConfig.Healthcheck.DegradedThreshold = 0 Init() - want.Healthcheck.DegradedThresholdSeconds.Set("2s") + want.Healthcheck.DegradedThreshold = 2 * time.Second assert.Equal(t, want, currentConfig) unhealthyThreshold = 3 * time.Second - currentConfig.Healthcheck.UnhealthyThresholdSeconds.Set("0s") + currentConfig.Healthcheck.UnhealthyThreshold = 0 Init() - want.Healthcheck.UnhealthyThresholdSeconds.Set("3s") + want.Healthcheck.UnhealthyThreshold = 3 * time.Second assert.Equal(t, want, currentConfig) transitionGracePeriod = 4 * time.Second - currentConfig.GracePeriods.TransitionSeconds.Set("0s") + currentConfig.GracePeriods.Transition = 0 Init() - want.GracePeriods.TransitionSeconds.Set("4s") + want.GracePeriods.Transition = 4 * time.Second assert.Equal(t, want, currentConfig) currentConfig.SanitizeLogMessages = false diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 7f4d3a09513..308573eb82b 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -161,7 +161,7 @@ func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletC topoServer: topoServer, alias: alias.CloneVT(), } - tsv.QueryTimeout.Store(config.Oltp.QueryTimeoutSeconds.Get().Nanoseconds()) + tsv.QueryTimeout.Store(config.Oltp.QueryTimeout.Nanoseconds()) tsOnce.Do(func() { srvTopoServer = srvtopo.NewResilientServer(ctx, topoServer, "TabletSrvTopo") }) diff --git a/go/vt/vttablet/tabletserver/throttle/throttler.go b/go/vt/vttablet/tabletserver/throttle/throttler.go index 5c5050d10ab..6eccb17b592 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttler.go +++ b/go/vt/vttablet/tabletserver/throttle/throttler.go @@ -233,8 +233,8 @@ func NewThrottler(env tabletenv.Env, srvTopoServer srvtopo.Server, ts *topo.Serv ts: ts, heartbeatWriter: heartbeatWriter, pool: connpool.NewPool(env, "ThrottlerPool", tabletenv.ConnPoolConfig{ - Size: 2, - IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, + Size: 2, + IdleTimeout: env.Config().OltpReadPool.IdleTimeout, }), } diff --git a/go/vt/vttablet/tabletserver/tx_engine.go b/go/vt/vttablet/tabletserver/tx_engine.go index fe8f1aa0b6e..7e8ecc06a75 100644 --- a/go/vt/vttablet/tabletserver/tx_engine.go +++ b/go/vt/vttablet/tabletserver/tx_engine.go @@ -97,7 +97,7 @@ func NewTxEngine(env tabletenv.Env) *TxEngine { config := env.Config() te := &TxEngine{ env: env, - shutdownGracePeriod: config.GracePeriods.ShutdownSeconds.Get(), + shutdownGracePeriod: config.GracePeriods.Shutdown, reservedConnStats: env.Exporter().NewTimings("ReservedConnections", "Reserved connections stats", "operation"), } limiter := txlimiter.New(env) @@ -124,8 +124,8 @@ func NewTxEngine(env tabletenv.Env) *TxEngine { // the TxPreparedPool. te.preparedPool = NewTxPreparedPool(config.TxPool.Size - 2) readPool := connpool.NewPool(env, "TxReadPool", tabletenv.ConnPoolConfig{ - Size: 3, - IdleTimeoutSeconds: env.Config().TxPool.IdleTimeoutSeconds, + Size: 3, + IdleTimeout: env.Config().TxPool.IdleTimeout, }) te.twoPC = NewTwoPC(readPool) te.state = NotServing diff --git a/go/vt/vttablet/tabletserver/tx_engine_test.go b/go/vt/vttablet/tabletserver/tx_engine_test.go index 6ddf2f5a9d3..8b190d675f8 100644 --- a/go/vt/vttablet/tabletserver/tx_engine_test.go +++ b/go/vt/vttablet/tabletserver/tx_engine_test.go @@ -46,8 +46,8 @@ func TestTxEngineClose(t *testing.T) { config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) config.TxPool.Size = 10 - _ = config.Oltp.TxTimeoutSeconds.Set("100ms") - _ = config.GracePeriods.ShutdownSeconds.Set("0s") + config.Oltp.TxTimeout = 100 * time.Millisecond + config.GracePeriods.Shutdown = 0 te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest")) // Normal close. @@ -533,8 +533,8 @@ func setupTxEngine(db *fakesqldb.DB) *TxEngine { config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) config.TxPool.Size = 10 - config.Oltp.TxTimeoutSeconds.Set("100ms") - _ = config.GracePeriods.ShutdownSeconds.Set("0s") + config.Oltp.TxTimeout = 100 * time.Millisecond + config.GracePeriods.Shutdown = 0 te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest")) return te } diff --git a/go/vt/vttablet/tabletserver/tx_pool_test.go b/go/vt/vttablet/tabletserver/tx_pool_test.go index 3515310c481..71bba1e128d 100644 --- a/go/vt/vttablet/tabletserver/tx_pool_test.go +++ b/go/vt/vttablet/tabletserver/tx_pool_test.go @@ -303,7 +303,7 @@ func TestTxPoolWaitTimeoutError(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().TxPool.TimeoutSeconds.Set("1s") + env.Config().TxPool.Timeout = time.Second // given db, txPool, _, closer := setupWithEnv(t, env) defer closer() @@ -425,7 +425,7 @@ func TestTxTimeoutKillsTransactions(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().Oltp.TxTimeoutSeconds.Set("1s") + env.Config().Oltp.TxTimeout = time.Second _, txPool, limiter, closer := setupWithEnv(t, env) defer closer() startingKills := txPool.env.Stats().KillCounters.Counts()["Transactions"] @@ -474,7 +474,7 @@ func TestTxTimeoutDoesNotKillShortLivedTransactions(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().Oltp.TxTimeoutSeconds.Set("1s") + env.Config().Oltp.TxTimeout = time.Second _, txPool, _, closer := setupWithEnv(t, env) defer closer() startingKills := txPool.env.Stats().KillCounters.Counts()["Transactions"] @@ -507,8 +507,8 @@ func TestTxTimeoutKillsOlapTransactions(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().Oltp.TxTimeoutSeconds.Set("1s") - _ = env.Config().Olap.TxTimeoutSeconds.Set("2s") + env.Config().Oltp.TxTimeout = time.Second + env.Config().Olap.TxTimeout = 2 * time.Second _, txPool, _, closer := setupWithEnv(t, env) defer closer() startingKills := txPool.env.Stats().KillCounters.Counts()["Transactions"] @@ -545,8 +545,8 @@ func TestTxTimeoutNotEnforcedForZeroLengthTimeouts(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 2 env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().Oltp.TxTimeoutSeconds.Set("0s") - _ = env.Config().Olap.TxTimeoutSeconds.Set("0s") + env.Config().Oltp.TxTimeout = 0 + env.Config().Olap.TxTimeout = 0 _, txPool, _, closer := setupWithEnv(t, env) defer closer() startingKills := txPool.env.Stats().KillCounters.Counts()["Transactions"] @@ -588,8 +588,8 @@ func TestTxTimeoutReservedConn(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().Oltp.TxTimeoutSeconds.Set("1s") - _ = env.Config().Olap.TxTimeoutSeconds.Set("2s") + env.Config().Oltp.TxTimeout = time.Second + env.Config().Olap.TxTimeout = 2 * time.Second _, txPool, _, closer := setupWithEnv(t, env) defer closer() startingRcKills := txPool.env.Stats().KillCounters.Counts()["ReservedConnection"] @@ -631,8 +631,8 @@ func TestTxTimeoutReusedReservedConn(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().Oltp.TxTimeoutSeconds.Set("1s") - _ = env.Config().Olap.TxTimeoutSeconds.Set("2s") + env.Config().Oltp.TxTimeout = time.Second + env.Config().Olap.TxTimeout = 2 * time.Second _, txPool, _, closer := setupWithEnv(t, env) defer closer() startingRcKills := txPool.env.Stats().KillCounters.Counts()["ReservedConnection"] @@ -814,12 +814,12 @@ func newTxPoolWithEnv(env tabletenv.Env) (*TxPool, *fakeLimiter) { func newEnv(exporterName string) tabletenv.Env { config := tabletenv.NewDefaultConfig() config.TxPool.Size = 300 - _ = config.Oltp.TxTimeoutSeconds.Set("30s") - _ = config.TxPool.TimeoutSeconds.Set("40s") + config.Oltp.TxTimeout = 30 * time.Second + config.TxPool.Timeout = 40 * time.Second config.TxPool.MaxWaiters = 500000 - _ = config.OltpReadPool.IdleTimeoutSeconds.Set("30s") - _ = config.OlapReadPool.IdleTimeoutSeconds.Set("30s") - _ = config.TxPool.IdleTimeoutSeconds.Set("30s") + config.OltpReadPool.IdleTimeout = 30 * time.Second + config.OlapReadPool.IdleTimeout = 30 * time.Second + config.TxPool.IdleTimeout = 30 * time.Second env := tabletenv.NewEnv(config, exporterName) return env } diff --git a/go/vt/vttest/vtprocess.go b/go/vt/vttest/vtprocess.go index c17d66dedec..28cecbaf001 100644 --- a/go/vt/vttest/vtprocess.go +++ b/go/vt/vttest/vtprocess.go @@ -185,12 +185,12 @@ const ( // QueryServerArgs are the default arguments passed to all Vitess query servers var QueryServerArgs = []string{ "--queryserver-config-pool-size", "4", - "--queryserver-config-query-timeout", "300", - "--queryserver-config-schema-reload-time", "60", + "--queryserver-config-query-timeout", "300s", + "--queryserver-config-schema-reload-time", "60s", "--queryserver-config-stream-pool-size", "4", "--queryserver-config-transaction-cap", "4", - "--queryserver-config-transaction-timeout", "300", - "--queryserver-config-txpool-timeout", "300", + "--queryserver-config-transaction-timeout", "300s", + "--queryserver-config-txpool-timeout", "300s", } // VtcomboProcess returns a VtProcess handle for a local `vtcombo` service,