From 0ab8d03e2f5bdd7d7eb7cd3fb5e68880426f2a2f Mon Sep 17 00:00:00 2001 From: Ying Date: Mon, 25 Nov 2024 14:39:06 -0500 Subject: [PATCH 01/11] running the test a lot --- .../managed_configuration.test.ts | 414 ------------------ .../integration_tests/removed_types.test.ts | 158 ------- .../server/integration_tests/run.sh | 12 + .../integration_tests/task_cost_check.test.ts | 63 --- ...sk_manager_capacity_based_claiming.test.ts | 5 +- ...er_capacity_based_claiming.test_01.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_02.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_03.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_04.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_05.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_06.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_07.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_08.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_09.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_10.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_11.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_12.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_13.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_14.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_15.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_16.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_17.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_18.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_19.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_20.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_21.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_22.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_23.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_24.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_25.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_26.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_27.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_28.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_29.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_30.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_31.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_32.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_33.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_34.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_35.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_36.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_37.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_38.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_39.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_40.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_41.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_42.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_43.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_44.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_45.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_46.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_47.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_48.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_49.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_50.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_51.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_52.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_53.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_54.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_55.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_56.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_57.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_58.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_59.test.ts | 330 ++++++++++++++ ...er_capacity_based_claiming.test_60.test.ts | 330 ++++++++++++++ .../task_manager_switch_task_claimers.test.ts | 369 ---------------- .../task_priority_check.test.ts | 60 --- .../task_state_validation.test.ts | 340 -------------- .../server/task_claimers/strategy_mget.ts | 2 + 69 files changed, 19817 insertions(+), 1406 deletions(-) delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/managed_configuration.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/removed_types.test.ts create mode 100755 x-pack/plugins/task_manager/server/integration_tests/run.sh delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_cost_check.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_41.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_42.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_43.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_44.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_45.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_46.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_47.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_48.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_49.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_50.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_51.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_52.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_53.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_54.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_55.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_56.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_57.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_58.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_59.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_60.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_switch_task_claimers.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_priority_check.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_state_validation.test.ts diff --git a/x-pack/plugins/task_manager/server/integration_tests/managed_configuration.test.ts b/x-pack/plugins/task_manager/server/integration_tests/managed_configuration.test.ts deleted file mode 100644 index ab1d1bc0498fd..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/managed_configuration.test.ts +++ /dev/null @@ -1,414 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import sinon from 'sinon'; -import { Client } from '@elastic/elasticsearch'; -import { elasticsearchServiceMock, savedObjectsRepositoryMock } from '@kbn/core/server/mocks'; -import { SavedObjectsErrorHelpers, Logger } from '@kbn/core/server'; -import { ADJUST_THROUGHPUT_INTERVAL } from '../lib/create_managed_configuration'; -import { TaskManagerPlugin, TaskManagerStartContract } from '../plugin'; -import { coreMock } from '@kbn/core/server/mocks'; -import { TaskManagerConfig } from '../config'; - -describe('managed configuration', () => { - let taskManagerStart: TaskManagerStartContract; - let logger: Logger; - - let clock: sinon.SinonFakeTimers; - const savedObjectsClient = savedObjectsRepositoryMock.create(); - const esStart = elasticsearchServiceMock.createStart(); - - const inlineScriptError = new Error('cannot execute [inline] scripts" error') as Error & { - meta: unknown; - }; - inlineScriptError.meta = { - body: { - error: { - caused_by: { - reason: 'cannot execute [inline] scripts', - }, - }, - }, - }; - - afterEach(() => clock.restore()); - - describe('managed poll interval', () => { - beforeEach(async () => { - jest.resetAllMocks(); - clock = sinon.useFakeTimers(); - - const context = coreMock.createPluginInitializerContext({ - discovery: { - active_nodes_lookback: '30s', - interval: 10000, - }, - kibanas_per_partition: 2, - capacity: 10, - max_attempts: 9, - poll_interval: 3000, - allow_reading_invalid_state: false, - version_conflict_threshold: 80, - monitored_aggregated_stats_refresh_rate: 60000, - monitored_stats_health_verbose_log: { - enabled: false, - level: 'debug' as const, - warn_delayed_task_start_in_seconds: 60, - }, - monitored_stats_required_freshness: 4000, - monitored_stats_running_average_window: 50, - request_capacity: 1000, - monitored_task_execution_thresholds: { - default: { - error_threshold: 90, - warn_threshold: 80, - }, - custom: {}, - }, - ephemeral_tasks: { - enabled: true, - request_capacity: 10, - }, - unsafe: { - exclude_task_types: [], - authenticate_background_task_utilization: true, - }, - event_loop_delay: { - monitor: true, - warn_threshold: 5000, - }, - worker_utilization_running_average_window: 5, - metrics_reset_interval: 3000, - claim_strategy: 'update_by_query', - request_timeouts: { - update_by_query: 1000, - }, - auto_calculate_default_ech_capacity: false, - }); - logger = context.logger.get('taskManager'); - - const taskManager = new TaskManagerPlugin(context); - ( - await taskManager.setup(coreMock.createSetup(), { usageCollection: undefined }) - ).registerTaskDefinitions({ - foo: { - title: 'Foo', - createTaskRunner: jest.fn(), - }, - }); - - const coreStart = coreMock.createStart(); - coreStart.elasticsearch = esStart; - esStart.client.asInternalUser.child.mockReturnValue( - esStart.client.asInternalUser as unknown as Client - ); - coreStart.savedObjects.createInternalRepository.mockReturnValue(savedObjectsClient); - taskManagerStart = await taskManager.start(coreStart, {}); - - // force rxjs timers to fire when they are scheduled for setTimeout(0) as the - // sinon fake timers cause them to stall - clock.tick(0); - }); - - test('should increase poll interval when Elasticsearch returns 429 error', async () => { - savedObjectsClient.create.mockRejectedValueOnce( - SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b') - ); - - // Cause "too many requests" error to be thrown - await expect( - taskManagerStart.schedule({ - taskType: 'foo', - state: {}, - params: {}, - }) - ).rejects.toThrowErrorMatchingInlineSnapshot(`"Too Many Requests"`); - clock.tick(ADJUST_THROUGHPUT_INTERVAL); - - expect(logger.warn).toHaveBeenCalledWith( - 'Poll interval configuration is temporarily increased after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Poll interval configuration changing from 3000 to 3600 after seeing 1 "too many request" and/or "execute [inline] script" error(s)' - ); - expect(logger.debug).toHaveBeenCalledWith('Task poller now using interval of 3600ms'); - }); - - test('should increase poll interval when Elasticsearch returns "cannot execute [inline] scripts" error', async () => { - const childEsClient = esStart.client.asInternalUser.child({}) as jest.Mocked; - childEsClient.search.mockImplementationOnce(async () => { - throw inlineScriptError; - }); - - await expect(taskManagerStart.fetch({})).rejects.toThrowErrorMatchingInlineSnapshot( - `"cannot execute [inline] scripts\\" error"` - ); - - clock.tick(ADJUST_THROUGHPUT_INTERVAL); - - expect(logger.warn).toHaveBeenCalledWith( - 'Poll interval configuration is temporarily increased after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Poll interval configuration changing from 3000 to 3600 after seeing 1 "too many request" and/or "execute [inline] script" error(s)' - ); - expect(logger.debug).toHaveBeenCalledWith('Task poller now using interval of 3600ms'); - }); - }); - - describe('managed capacity with default claim strategy', () => { - beforeEach(async () => { - jest.resetAllMocks(); - clock = sinon.useFakeTimers(); - - const context = coreMock.createPluginInitializerContext({ - discovery: { - active_nodes_lookback: '30s', - interval: 10000, - }, - kibanas_per_partition: 2, - capacity: 10, - max_attempts: 9, - poll_interval: 3000, - allow_reading_invalid_state: false, - version_conflict_threshold: 80, - monitored_aggregated_stats_refresh_rate: 60000, - monitored_stats_health_verbose_log: { - enabled: false, - level: 'debug' as const, - warn_delayed_task_start_in_seconds: 60, - }, - monitored_stats_required_freshness: 4000, - monitored_stats_running_average_window: 50, - request_capacity: 1000, - monitored_task_execution_thresholds: { - default: { - error_threshold: 90, - warn_threshold: 80, - }, - custom: {}, - }, - ephemeral_tasks: { - enabled: true, - request_capacity: 10, - }, - unsafe: { - exclude_task_types: [], - authenticate_background_task_utilization: true, - }, - event_loop_delay: { - monitor: true, - warn_threshold: 5000, - }, - worker_utilization_running_average_window: 5, - metrics_reset_interval: 3000, - claim_strategy: 'update_by_query', - request_timeouts: { - update_by_query: 1000, - }, - auto_calculate_default_ech_capacity: false, - }); - logger = context.logger.get('taskManager'); - - const taskManager = new TaskManagerPlugin(context); - ( - await taskManager.setup(coreMock.createSetup(), { usageCollection: undefined }) - ).registerTaskDefinitions({ - foo: { - title: 'Foo', - createTaskRunner: jest.fn(), - }, - }); - - const coreStart = coreMock.createStart(); - coreStart.elasticsearch = esStart; - esStart.client.asInternalUser.child.mockReturnValue( - esStart.client.asInternalUser as unknown as Client - ); - coreStart.savedObjects.createInternalRepository.mockReturnValue(savedObjectsClient); - taskManagerStart = await taskManager.start(coreStart, {}); - - // force rxjs timers to fire when they are scheduled for setTimeout(0) as the - // sinon fake timers cause them to stall - clock.tick(0); - }); - - test('should lower capacity when Elasticsearch returns 429 error', async () => { - savedObjectsClient.create.mockRejectedValueOnce( - SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b') - ); - - // Cause "too many requests" error to be thrown - await expect( - taskManagerStart.schedule({ - taskType: 'foo', - state: {}, - params: {}, - }) - ).rejects.toThrowErrorMatchingInlineSnapshot(`"Too Many Requests"`); - clock.tick(ADJUST_THROUGHPUT_INTERVAL); - - expect(logger.warn).toHaveBeenCalledWith( - 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Capacity configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Task pool now using 10 as the max worker value which is based on a capacity of 10' - ); - }); - - test('should lower capacity when Elasticsearch returns "cannot execute [inline] scripts" error', async () => { - const childEsClient = esStart.client.asInternalUser.child({}) as jest.Mocked; - childEsClient.search.mockImplementationOnce(async () => { - throw inlineScriptError; - }); - - await expect(taskManagerStart.fetch({})).rejects.toThrowErrorMatchingInlineSnapshot( - `"cannot execute [inline] scripts\\" error"` - ); - clock.tick(ADJUST_THROUGHPUT_INTERVAL); - - expect(logger.warn).toHaveBeenCalledWith( - 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Capacity configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Task pool now using 10 as the max worker value which is based on a capacity of 10' - ); - }); - }); - - describe('managed capacity with mget claim strategy', () => { - beforeEach(async () => { - jest.resetAllMocks(); - clock = sinon.useFakeTimers(); - - const context = coreMock.createPluginInitializerContext({ - discovery: { - active_nodes_lookback: '30s', - interval: 10000, - }, - kibanas_per_partition: 2, - capacity: 10, - max_attempts: 9, - poll_interval: 3000, - allow_reading_invalid_state: false, - version_conflict_threshold: 80, - monitored_aggregated_stats_refresh_rate: 60000, - monitored_stats_health_verbose_log: { - enabled: false, - level: 'debug' as const, - warn_delayed_task_start_in_seconds: 60, - }, - monitored_stats_required_freshness: 4000, - monitored_stats_running_average_window: 50, - request_capacity: 1000, - monitored_task_execution_thresholds: { - default: { - error_threshold: 90, - warn_threshold: 80, - }, - custom: {}, - }, - ephemeral_tasks: { - enabled: true, - request_capacity: 10, - }, - unsafe: { - exclude_task_types: [], - authenticate_background_task_utilization: true, - }, - event_loop_delay: { - monitor: true, - warn_threshold: 5000, - }, - worker_utilization_running_average_window: 5, - metrics_reset_interval: 3000, - claim_strategy: 'mget', - request_timeouts: { - update_by_query: 1000, - }, - auto_calculate_default_ech_capacity: false, - }); - logger = context.logger.get('taskManager'); - - const taskManager = new TaskManagerPlugin(context); - ( - await taskManager.setup(coreMock.createSetup(), { usageCollection: undefined }) - ).registerTaskDefinitions({ - foo: { - title: 'Foo', - createTaskRunner: jest.fn(), - }, - }); - - const coreStart = coreMock.createStart(); - coreStart.elasticsearch = esStart; - esStart.client.asInternalUser.child.mockReturnValue( - esStart.client.asInternalUser as unknown as Client - ); - coreStart.savedObjects.createInternalRepository.mockReturnValue(savedObjectsClient); - taskManagerStart = await taskManager.start(coreStart, {}); - - // force rxjs timers to fire when they are scheduled for setTimeout(0) as the - // sinon fake timers cause them to stall - clock.tick(0); - }); - - test('should lower capacity when Elasticsearch returns 429 error', async () => { - savedObjectsClient.create.mockRejectedValueOnce( - SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b') - ); - - // Cause "too many requests" error to be thrown - await expect( - taskManagerStart.schedule({ - taskType: 'foo', - state: {}, - params: {}, - }) - ).rejects.toThrowErrorMatchingInlineSnapshot(`"Too Many Requests"`); - clock.tick(ADJUST_THROUGHPUT_INTERVAL); - - expect(logger.warn).toHaveBeenCalledWith( - 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Capacity configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Task pool now using 20 as the max allowed cost which is based on a capacity of 10' - ); - }); - - test('should lower capacity when Elasticsearch returns "cannot execute [inline] scripts" error', async () => { - const childEsClient = esStart.client.asInternalUser.child({}) as jest.Mocked; - childEsClient.search.mockImplementationOnce(async () => { - throw inlineScriptError; - }); - - await expect(taskManagerStart.fetch({})).rejects.toThrowErrorMatchingInlineSnapshot( - `"cannot execute [inline] scripts\\" error"` - ); - clock.tick(ADJUST_THROUGHPUT_INTERVAL); - - expect(logger.warn).toHaveBeenCalledWith( - 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Capacity configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Task pool now using 20 as the max allowed cost which is based on a capacity of 10' - ); - }); - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/removed_types.test.ts b/x-pack/plugins/task_manager/server/integration_tests/removed_types.test.ts deleted file mode 100644 index 390c426e9c69f..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/removed_types.test.ts +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import { ElasticsearchClient } from '@kbn/core/server'; -import { TaskManagerPlugin, TaskManagerStartContract } from '../plugin'; -import { injectTask, retry, setupTestServers } from './lib'; -import { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { ConcreteTaskInstance, TaskStatus } from '../task'; -import { CreateWorkloadAggregatorOpts } from '../monitoring/workload_statistics'; - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -const { createWorkloadAggregator: createWorkloadAggregatorMock } = jest.requireMock( - '../monitoring/workload_statistics' -); -jest.mock('../monitoring/workload_statistics', () => { - const actual = jest.requireActual('../monitoring/workload_statistics'); - return { - ...actual, - createWorkloadAggregator: jest.fn().mockImplementation((opts) => { - return new actual.createWorkloadAggregator(opts); - }), - }; -}); - -// FLAKY: https://github.com/elastic/kibana/issues/194208 -describe.skip('unrecognized task types', () => { - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createWorkloadAggregatorOpts: CreateWorkloadAggregatorOpts; - - const taskIdsToRemove: string[] = []; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - monitored_aggregated_stats_refresh_rate: 5000, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(createWorkloadAggregatorMock).toHaveBeenCalledTimes(1); - createWorkloadAggregatorOpts = createWorkloadAggregatorMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(async () => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - test('should be no workload aggregator errors when there are removed task types', async () => { - const errorLogSpy = jest.spyOn(createWorkloadAggregatorOpts.logger, 'error'); - const removeTypeId = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: removeTypeId, - taskType: 'sampleTaskRemovedType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - const notRegisteredTypeId = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: notRegisteredTypeId, - taskType: 'sampleTaskNotRegisteredType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - - taskIdsToRemove.push(removeTypeId); - taskIdsToRemove.push(notRegisteredTypeId); - - await retry(async () => { - const task = await getTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser); - expect(task?._source?.task?.status).toBe('unrecognized'); - }); - - // monitored_aggregated_stats_refresh_rate is set to the minimum of 5 seconds - // so we want to wait that long to let it refresh - await new Promise((r) => setTimeout(r, 5100)); - - const errorLogCalls = errorLogSpy.mock.calls[0]; - - // if there are any error logs, none of them should be workload aggregator errors - if (errorLogCalls) { - // should be no workload aggregator errors - for (const elog of errorLogCalls) { - if (typeof elog === 'string') { - expect(elog).not.toMatch(/^\[WorkloadAggregator\]: Error: Unsupported task type/i); - } - } - } - }); -}); - -async function getTask(esClient: ElasticsearchClient) { - const response = await esClient.search<{ task: ConcreteTaskInstance }>({ - index: '.kibana_task_manager', - body: { - query: { - bool: { - filter: [ - { - term: { - 'task.taskType': 'sampleTaskRemovedType', - }, - }, - ], - }, - }, - }, - }); - - return response.hits.hits[0]; -} diff --git a/x-pack/plugins/task_manager/server/integration_tests/run.sh b/x-pack/plugins/task_manager/server/integration_tests/run.sh new file mode 100755 index 0000000000000..c790592acca9d --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Define the source file name +source_file="task_manager_capacity_based_claiming.test.ts" + +# Loop from 001 to 100 +for i in $(seq -w 1 60); do + # Construct the new file name with suffix + destination_file="${source_file%.*}_$i.test.${source_file##*.}" + # Copy the file + cp "$source_file" "$destination_file" +done diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_cost_check.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_cost_check.test.ts deleted file mode 100644 index 96678f714ac69..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_cost_check.test.ts +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { - type TestElasticsearchUtils, - type TestKibanaUtils, -} from '@kbn/core-test-helpers-kbn-server'; -import { TaskCost, TaskDefinition } from '../task'; -import { setupTestServers } from './lib'; -import { TaskTypeDictionary } from '../task_type_dictionary'; - -jest.mock('../task_type_dictionary', () => { - const actual = jest.requireActual('../task_type_dictionary'); - return { - ...actual, - TaskTypeDictionary: jest.fn().mockImplementation((opts) => { - return new actual.TaskTypeDictionary(opts); - }), - }; -}); - -// Notify response-ops if a task sets a cost to something other than `Normal` -describe('Task cost checks', () => { - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskTypeDictionary: TaskTypeDictionary; - - beforeAll(async () => { - const setupResult = await setupTestServers(); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - const mockedTaskTypeDictionary = jest.requireMock('../task_type_dictionary'); - expect(mockedTaskTypeDictionary.TaskTypeDictionary).toHaveBeenCalledTimes(1); - taskTypeDictionary = mockedTaskTypeDictionary.TaskTypeDictionary.mock.results[0].value; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - it('detects tasks with cost definitions', async () => { - const taskTypes = taskTypeDictionary.getAllDefinitions(); - const taskTypesWithCost = taskTypes - .map((taskType: TaskDefinition) => - !!taskType.cost ? { taskType: taskType.type, cost: taskType.cost } : null - ) - .filter( - (tt: { taskType: string; cost: TaskCost } | null) => - null != tt && tt.cost !== TaskCost.Normal - ); - expect(taskTypesWithCost).toMatchSnapshot(); - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts index d74dc12283360..312f847575702 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts @@ -94,8 +94,7 @@ jest.mock('../queries/task_claiming', () => { const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); -// FLAKY: https://github.com/elastic/kibana/issues/191117 -describe.skip('capacity based claiming', () => { +describe('capacity based claiming', () => { const taskIdsToRemove: string[] = []; let esServer: TestElasticsearchUtils; let kibanaServer: TestKibanaUtils; @@ -196,6 +195,8 @@ describe.skip('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_41.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_41.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_41.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_42.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_42.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_42.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_43.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_43.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_43.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_44.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_44.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_44.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_45.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_45.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_45.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_46.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_46.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_46.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_47.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_47.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_47.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_48.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_48.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_48.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_49.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_49.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_49.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_50.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_50.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_50.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_51.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_51.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_51.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_52.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_52.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_52.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_53.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_53.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_53.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_54.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_54.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_54.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_55.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_55.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_55.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_56.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_56.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_56.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_57.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_57.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_57.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_58.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_58.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_58.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_59.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_59.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_59.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_60.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_60.test.ts new file mode 100644 index 0000000000000..312f847575702 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_60.test.ts @@ -0,0 +1,330 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push(new Date()); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + it('should claim tasks until the next task will exceed capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const now = new Date(); + const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + mockTaskTypeXLCostRunFn.mockImplementation(() => { + taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + return { state: { foo: 'test' } }; + }); + + // inject 6 normal cost tasks for total cost of 12 + const ids: string[] = []; + times(6, () => ids.push(uuidV4())); + const runAt1 = new Date(now.valueOf() - 5); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt1, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + // inject 1 XL cost task that will put us over the max cost capacity of 20 + const xlid = uuidV4(); + const runAt2 = now; + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: xlid, + taskType: '_xlCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt2, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(xlid); + + // inject one more normal cost task + const runAt3 = new Date(now.valueOf() + 5); + const lastid = uuidV4(); + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id: lastid, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt: runAt3, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(lastid); + + // retry until all tasks have been run + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + }); + + expect(taskRunAtDates.length).toBe(8); + + const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // the first 6 tasks should have been run at the same time (adding some fudge factor) + // and they should all be normal cost tasks + for (let i = 0; i < 6; i++) { + expect(taskRunAtDates[i].type).toBe('normal'); + expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + } + + // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + expect(taskRunAtDates[6].type).toBe('xl'); + expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // last task should be normal cost and be run after one polling interval has passed + expect(taskRunAtDates[7].type).toBe('normal'); + expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // background task load should be 0 or 60 or 100 since we're only running these tasks + // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 60 || load === 100).toBe(true); + } + }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_switch_task_claimers.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_switch_task_claimers.test.ts deleted file mode 100644 index b89f9f92586fe..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_switch_task_claimers.test.ts +++ /dev/null @@ -1,369 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import { schema } from '@kbn/config-schema'; -import { SerializedConcreteTaskInstance, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { injectTask, setupTestServers, retry } from './lib'; -import { setupKibanaServer } from './lib/setup_test_servers'; - -const mockTaskTypeRunFn = jest.fn(); -const mockCreateTaskRunner = jest.fn(); -const mockTaskType = { - title: '', - description: '', - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ ...state, baz: state.baz || '' }), - schema: schema.object({ - foo: schema.string(), - bar: schema.string(), - baz: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunner.mockImplementation(() => ({ - run: mockTaskTypeRunFn, - })), -}; -const { TaskClaiming: TaskClaimingMock } = jest.requireMock('../queries/task_claiming'); -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - // We need to register here because once the class is instantiated, adding - // definitions won't get claimed because of "partitionIntoClaimingBatches". - opts.definitions.registerTaskDefinitions({ - fooType: mockTaskType, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -describe('switch task claiming strategies', () => { - beforeEach(() => { - jest.clearAllMocks(); - }); - - it('should switch from default to update_by_query and still claim tasks', async () => { - const setupResultDefault = await setupTestServers(); - const esServer = setupResultDefault.esServer; - let kibanaServer = setupResultDefault.kibanaServer; - let taskClaimingOpts: TaskClaimingOpts = TaskClaimingMock.mock.calls[0][0]; - - expect(taskClaimingOpts.strategy).toBe('mget'); - - mockTaskTypeRunFn.mockImplementation(() => { - return { state: {} }; - }); - - // inject a task to run and ensure it is claimed and run - const id1 = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: id1, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(1); - }); - - if (kibanaServer) { - await kibanaServer.stop(); - } - - const setupResultUbq = await setupKibanaServer({ - xpack: { - task_manager: { - claim_strategy: 'update_by_query', - }, - }, - }); - kibanaServer = setupResultUbq.kibanaServer; - - taskClaimingOpts = TaskClaimingMock.mock.calls[1][0]; - expect(taskClaimingOpts.strategy).toBe('update_by_query'); - - // inject a task to run and ensure it is claimed and run - const id2 = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: id2, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(2); - }); - - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - it('should switch from update_by_query to default and still claim tasks', async () => { - const setupResultUbq = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: 'update_by_query', - }, - }, - }); - const esServer = setupResultUbq.esServer; - let kibanaServer = setupResultUbq.kibanaServer; - let taskClaimingOpts: TaskClaimingOpts = TaskClaimingMock.mock.calls[0][0]; - - expect(taskClaimingOpts.strategy).toBe('update_by_query'); - - mockTaskTypeRunFn.mockImplementation(() => { - return { state: {} }; - }); - - // inject a task to run and ensure it is claimed and run - const id1 = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: id1, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(1); - }); - - if (kibanaServer) { - await kibanaServer.stop(); - } - - const setupResultDefault = await setupKibanaServer(); - kibanaServer = setupResultDefault.kibanaServer; - - taskClaimingOpts = TaskClaimingMock.mock.calls[1][0]; - expect(taskClaimingOpts.strategy).toBe('mget'); - - // inject a task to run and ensure it is claimed and run - const id2 = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: id2, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(2); - }); - - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - it('should switch from default to update_by_query and claim tasks that were running during shutdown', async () => { - const setupResultDefault = await setupTestServers(); - const esServer = setupResultDefault.esServer; - let kibanaServer = setupResultDefault.kibanaServer; - let taskClaimingOpts: TaskClaimingOpts = TaskClaimingMock.mock.calls[0][0]; - - expect(taskClaimingOpts.strategy).toBe('mget'); - - mockTaskTypeRunFn.mockImplementation(async () => { - await new Promise((resolve) => setTimeout(resolve, 2000)); - return { state: {} }; - }); - - // inject a task to run and ensure it is claimed and run - const id1 = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: id1, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - timeoutOverride: '5s', - retryAt: null, - ownerId: null, - }); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(1); - }); - - if (kibanaServer) { - await kibanaServer.stop(); - } - - const setupResultUbq = await setupKibanaServer({ - xpack: { - task_manager: { - claim_strategy: 'update_by_query', - }, - }, - }); - kibanaServer = setupResultUbq.kibanaServer; - - taskClaimingOpts = TaskClaimingMock.mock.calls[1][0]; - expect(taskClaimingOpts.strategy).toBe('update_by_query'); - - // task doc should still exist and be running - const task = await kibanaServer.coreStart.elasticsearch.client.asInternalUser.get<{ - task: SerializedConcreteTaskInstance; - }>({ - id: `task:${id1}`, - index: '.kibana_task_manager', - }); - - expect(task._source?.task?.status).toBe(TaskStatus.Running); - - // task manager should pick up and claim the task that was running during shutdown - await retry( - async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(2); - }, - { times: 60, intervalMs: 1000 } - ); - - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - it('should switch from update_by_query to default and claim tasks that were running during shutdown', async () => { - const setupResultUbq = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: 'update_by_query', - }, - }, - }); - const esServer = setupResultUbq.esServer; - let kibanaServer = setupResultUbq.kibanaServer; - let taskClaimingOpts: TaskClaimingOpts = TaskClaimingMock.mock.calls[0][0]; - - expect(taskClaimingOpts.strategy).toBe('update_by_query'); - - mockTaskTypeRunFn.mockImplementation(async () => { - await new Promise((resolve) => setTimeout(resolve, 2000)); - return { state: {} }; - }); - - // inject a task to run and ensure it is claimed and run - const id1 = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: id1, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - timeoutOverride: '5s', - retryAt: null, - ownerId: null, - }); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(1); - }); - - if (kibanaServer) { - await kibanaServer.stop(); - } - - const setupResultDefault = await setupKibanaServer(); - kibanaServer = setupResultDefault.kibanaServer; - - taskClaimingOpts = TaskClaimingMock.mock.calls[1][0]; - expect(taskClaimingOpts.strategy).toBe('mget'); - - // task doc should still exist and be running - const task = await kibanaServer.coreStart.elasticsearch.client.asInternalUser.get<{ - task: SerializedConcreteTaskInstance; - }>({ - id: `task:${id1}`, - index: '.kibana_task_manager', - }); - - expect(task._source?.task?.status).toBe(TaskStatus.Running); - - await retry( - async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(2); - }, - { times: 60, intervalMs: 1000 } - ); - - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_priority_check.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_priority_check.test.ts deleted file mode 100644 index ebbea6f1e8a07..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_priority_check.test.ts +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { - type TestElasticsearchUtils, - type TestKibanaUtils, -} from '@kbn/core-test-helpers-kbn-server'; -import { TaskDefinition, TaskPriority } from '../task'; -import { setupTestServers } from './lib'; -import { TaskTypeDictionary } from '../task_type_dictionary'; - -jest.mock('../task_type_dictionary', () => { - const actual = jest.requireActual('../task_type_dictionary'); - return { - ...actual, - TaskTypeDictionary: jest.fn().mockImplementation((opts) => { - return new actual.TaskTypeDictionary(opts); - }), - }; -}); - -// Notify response-ops if a task sets a priority to something other than `Normal` -describe('Task priority checks', () => { - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskTypeDictionary: TaskTypeDictionary; - - beforeAll(async () => { - const setupResult = await setupTestServers(); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - const mockedTaskTypeDictionary = jest.requireMock('../task_type_dictionary'); - expect(mockedTaskTypeDictionary.TaskTypeDictionary).toHaveBeenCalledTimes(1); - taskTypeDictionary = mockedTaskTypeDictionary.TaskTypeDictionary.mock.results[0].value; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - it('detects tasks with priority definitions', async () => { - const taskTypes = taskTypeDictionary.getAllDefinitions(); - const taskTypesWithPriority = taskTypes - .map((taskType: TaskDefinition) => - !!taskType.priority ? { taskType: taskType.type, priority: taskType.priority } : null - ) - .filter((tt: { taskType: string; priority: TaskPriority } | null) => null != tt); - expect(taskTypesWithPriority).toMatchSnapshot(); - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_state_validation.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_state_validation.test.ts deleted file mode 100644 index 294b4fd905807..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_state_validation.test.ts +++ /dev/null @@ -1,340 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { TaskStatus } from '../task'; -import type { TaskPollingLifecycleOpts } from '../polling_lifecycle'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; - -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const mockTaskTypeRunFn = jest.fn(); -const mockCreateTaskRunner = jest.fn(); -const mockTaskType = { - title: '', - description: '', - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - 2: { - up: (state: Record) => ({ ...state, bar: state.bar || '' }), - schema: schema.object({ - foo: schema.string(), - bar: schema.string(), - }), - }, - 3: { - up: (state: Record) => ({ ...state, baz: state.baz || '' }), - schema: schema.object({ - foo: schema.string(), - bar: schema.string(), - baz: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunner.mockImplementation(() => ({ - run: mockTaskTypeRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - // We need to register here because once the class is instantiated, adding - // definitions won't get claimed because of "partitionIntoClaimingBatches". - opts.definitions.registerTaskDefinitions({ - fooType: mockTaskType, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('task state validation', () => { - describe('allow_reading_invalid_state: true', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let pollingLifecycleOpts: TaskPollingLifecycleOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers(); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - pollingLifecycleOpts = TaskPollingLifecycleMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should drop unknown fields from the task state', async () => { - mockTaskTypeRunFn.mockImplementation(() => { - return { state: {} }; - }); - - const id = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test', invalidProperty: 'invalid' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalled(); - }); - - expect(mockCreateTaskRunner).toHaveBeenCalledTimes(1); - const call = mockCreateTaskRunner.mock.calls[0][0]; - expect(call.taskInstance.state).toEqual({ - foo: 'test', - bar: 'test', - baz: 'test', - }); - }); - - it('should fail to update the task if the task runner returns an unknown property in the state', async () => { - const errorLogSpy = jest.spyOn(pollingLifecycleOpts.logger, 'error'); - mockTaskTypeRunFn.mockImplementation(() => { - return { state: { invalidField: true, foo: 'test', bar: 'test', baz: 'test' } }; - }); - - const task = await taskManagerPlugin.schedule({ - taskType: 'fooType', - params: {}, - state: { foo: 'test', bar: 'test', baz: 'test' }, - schedule: { interval: '1d' }, - }); - taskIdsToRemove.push(task.id); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalled(); - }); - - expect(mockCreateTaskRunner).toHaveBeenCalledTimes(1); - const call = mockCreateTaskRunner.mock.calls[0][0]; - expect(call.taskInstance.state).toEqual({ - foo: 'test', - bar: 'test', - baz: 'test', - }); - expect(errorLogSpy).toHaveBeenCalledWith( - `Task fooType "${task.id}" failed: Error: [invalidField]: definition for this key is missing`, - expect.anything() - ); - }); - - it('should migrate the task state', async () => { - mockTaskTypeRunFn.mockImplementation(() => { - return { state: {} }; - }); - - const id = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: 'fooType', - params: { foo: true }, - state: {}, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalled(); - }); - - expect(mockCreateTaskRunner).toHaveBeenCalledTimes(1); - const call = mockCreateTaskRunner.mock.calls[0][0]; - expect(call.taskInstance.state).toEqual({ - foo: '', - bar: '', - baz: '', - }); - }); - - it('should debug log by default when reading an invalid task state', async () => { - const debugLogSpy = jest.spyOn(pollingLifecycleOpts.logger, 'debug'); - mockTaskTypeRunFn.mockImplementation(() => { - return { state: {} }; - }); - - const id = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: 'fooType', - params: { foo: true }, - state: { foo: true, bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalled(); - }); - - expect(mockCreateTaskRunner).toHaveBeenCalledTimes(1); - const call = mockCreateTaskRunner.mock.calls[0][0]; - expect(call.taskInstance.state).toEqual({ - foo: true, - bar: 'test', - baz: 'test', - }); - - expect(debugLogSpy).toHaveBeenCalledWith( - `[fooType][${id}] Failed to validate the task's state. Allowing read operation to proceed because allow_reading_invalid_state is true. Error: [foo]: expected value of type [string] but got [boolean]` - ); - }); - }); - - describe('allow_reading_invalid_state: false', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let pollingLifecycleOpts: TaskPollingLifecycleOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - allow_reading_invalid_state: false, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - pollingLifecycleOpts = TaskPollingLifecycleMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should fail the task run when setting allow_reading_invalid_state:false and reading an invalid state', async () => { - const logSpy = jest.spyOn(pollingLifecycleOpts.logger, 'warn'); - const updateSpy = jest.spyOn(pollingLifecycleOpts.taskStore, 'bulkPartialUpdate'); - - const id = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: 'fooType', - params: { foo: true }, - state: { foo: true, bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - - await retry(async () => { - const calls = logSpy.mock.calls as string[][]; - const expected = - /^Task \(fooType\/.*\) has a validation error: \[foo\]: expected value of type \[string\] but got \[boolean\]/; - const found = calls.map((arr) => arr[0]).find((message) => message.match(expected) != null); - expect(found).toMatch(expected); - expect(updateSpy).toHaveBeenCalledWith( - expect.arrayContaining([expect.objectContaining({ id })]) - ); - }); - }); - }); -}); diff --git a/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.ts b/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.ts index 431daab8dd2cb..b3667b753a800 100644 --- a/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.ts +++ b/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.ts @@ -153,6 +153,8 @@ async function claimAvailableTasks(opts: TaskClaimerOpts): Promise Date: Mon, 25 Nov 2024 15:52:40 -0500 Subject: [PATCH 02/11] running the test a lot --- .../task_manager_capacity_based_claiming.test.ts | 1 + .../task_manager_capacity_based_claiming.test_01.test.ts | 1 + .../task_manager_capacity_based_claiming.test_02.test.ts | 1 + .../task_manager_capacity_based_claiming.test_03.test.ts | 1 + .../task_manager_capacity_based_claiming.test_04.test.ts | 1 + .../task_manager_capacity_based_claiming.test_05.test.ts | 1 + .../task_manager_capacity_based_claiming.test_06.test.ts | 1 + .../task_manager_capacity_based_claiming.test_07.test.ts | 1 + .../task_manager_capacity_based_claiming.test_08.test.ts | 1 + .../task_manager_capacity_based_claiming.test_09.test.ts | 1 + .../task_manager_capacity_based_claiming.test_10.test.ts | 1 + .../task_manager_capacity_based_claiming.test_11.test.ts | 1 + .../task_manager_capacity_based_claiming.test_12.test.ts | 1 + .../task_manager_capacity_based_claiming.test_13.test.ts | 1 + .../task_manager_capacity_based_claiming.test_14.test.ts | 1 + .../task_manager_capacity_based_claiming.test_15.test.ts | 1 + .../task_manager_capacity_based_claiming.test_16.test.ts | 1 + .../task_manager_capacity_based_claiming.test_17.test.ts | 1 + .../task_manager_capacity_based_claiming.test_18.test.ts | 1 + .../task_manager_capacity_based_claiming.test_19.test.ts | 1 + .../task_manager_capacity_based_claiming.test_20.test.ts | 1 + .../task_manager_capacity_based_claiming.test_21.test.ts | 1 + .../task_manager_capacity_based_claiming.test_22.test.ts | 1 + .../task_manager_capacity_based_claiming.test_23.test.ts | 1 + .../task_manager_capacity_based_claiming.test_24.test.ts | 1 + .../task_manager_capacity_based_claiming.test_25.test.ts | 1 + .../task_manager_capacity_based_claiming.test_26.test.ts | 1 + .../task_manager_capacity_based_claiming.test_27.test.ts | 1 + .../task_manager_capacity_based_claiming.test_28.test.ts | 1 + .../task_manager_capacity_based_claiming.test_29.test.ts | 1 + .../task_manager_capacity_based_claiming.test_30.test.ts | 1 + .../task_manager_capacity_based_claiming.test_31.test.ts | 1 + .../task_manager_capacity_based_claiming.test_32.test.ts | 1 + .../task_manager_capacity_based_claiming.test_33.test.ts | 1 + .../task_manager_capacity_based_claiming.test_34.test.ts | 1 + .../task_manager_capacity_based_claiming.test_35.test.ts | 1 + .../task_manager_capacity_based_claiming.test_36.test.ts | 1 + .../task_manager_capacity_based_claiming.test_37.test.ts | 1 + .../task_manager_capacity_based_claiming.test_38.test.ts | 1 + .../task_manager_capacity_based_claiming.test_39.test.ts | 1 + .../task_manager_capacity_based_claiming.test_40.test.ts | 1 + .../task_manager_capacity_based_claiming.test_41.test.ts | 1 + .../task_manager_capacity_based_claiming.test_42.test.ts | 1 + .../task_manager_capacity_based_claiming.test_43.test.ts | 1 + .../task_manager_capacity_based_claiming.test_44.test.ts | 1 + .../task_manager_capacity_based_claiming.test_45.test.ts | 1 + .../task_manager_capacity_based_claiming.test_46.test.ts | 1 + .../task_manager_capacity_based_claiming.test_47.test.ts | 1 + .../task_manager_capacity_based_claiming.test_48.test.ts | 1 + .../task_manager_capacity_based_claiming.test_49.test.ts | 1 + .../task_manager_capacity_based_claiming.test_50.test.ts | 1 + .../task_manager_capacity_based_claiming.test_51.test.ts | 1 + .../task_manager_capacity_based_claiming.test_52.test.ts | 1 + .../task_manager_capacity_based_claiming.test_53.test.ts | 1 + .../task_manager_capacity_based_claiming.test_54.test.ts | 1 + .../task_manager_capacity_based_claiming.test_55.test.ts | 1 + .../task_manager_capacity_based_claiming.test_56.test.ts | 1 + .../task_manager_capacity_based_claiming.test_57.test.ts | 1 + .../task_manager_capacity_based_claiming.test_58.test.ts | 1 + .../task_manager_capacity_based_claiming.test_59.test.ts | 1 + .../task_manager_capacity_based_claiming.test_60.test.ts | 1 + .../plugins/task_manager/server/task_claimers/strategy_mget.ts | 1 + 62 files changed, 62 insertions(+) diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_41.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_41.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_41.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_41.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_42.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_42.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_42.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_42.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_43.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_43.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_43.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_43.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_44.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_44.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_44.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_44.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_45.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_45.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_45.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_45.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_46.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_46.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_46.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_46.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_47.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_47.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_47.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_47.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_48.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_48.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_48.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_48.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_49.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_49.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_49.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_49.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_50.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_50.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_50.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_50.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_51.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_51.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_51.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_51.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_52.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_52.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_52.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_52.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_53.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_53.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_53.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_53.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_54.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_54.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_54.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_54.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_55.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_55.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_55.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_55.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_56.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_56.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_56.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_56.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_57.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_57.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_57.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_57.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_58.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_58.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_58.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_58.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_59.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_59.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_59.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_59.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_60.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_60.test.ts index 312f847575702..61928968212cf 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_60.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_60.test.ts @@ -195,6 +195,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); + // eslint-disable-next-line no-console console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other diff --git a/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.ts b/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.ts index b3667b753a800..374c7c1647412 100644 --- a/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.ts +++ b/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.ts @@ -153,6 +153,7 @@ async function claimAvailableTasks(opts: TaskClaimerOpts): Promise Date: Mon, 25 Nov 2024 20:32:01 -0500 Subject: [PATCH 03/11] Removing some --- ...er_capacity_based_claiming.test_41.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_42.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_43.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_44.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_45.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_46.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_47.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_48.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_49.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_50.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_51.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_52.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_53.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_54.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_55.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_56.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_57.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_58.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_59.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_60.test.ts | 331 ------------------ 20 files changed, 6620 deletions(-) delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_41.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_42.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_43.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_44.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_45.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_46.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_47.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_48.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_49.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_50.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_51.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_52.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_53.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_54.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_55.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_56.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_57.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_58.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_59.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_60.test.ts diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_41.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_41.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_41.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_42.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_42.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_42.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_43.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_43.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_43.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_44.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_44.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_44.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_45.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_45.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_45.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_46.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_46.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_46.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_47.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_47.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_47.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_48.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_48.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_48.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_49.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_49.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_49.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_50.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_50.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_50.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_51.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_51.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_51.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_52.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_52.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_52.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_53.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_53.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_53.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_54.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_54.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_54.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_55.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_55.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_55.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_56.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_56.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_56.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_57.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_57.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_57.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_58.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_58.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_58.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_59.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_59.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_59.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_60.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_60.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_60.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); From d31782d20f06f07780b4daec17d67ff6de10f9a4 Mon Sep 17 00:00:00 2001 From: Ying Date: Tue, 26 Nov 2024 07:49:57 -0500 Subject: [PATCH 04/11] Removing some --- ...er_capacity_based_claiming.test_36.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_37.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_38.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_39.test.ts | 331 ------------------ ...er_capacity_based_claiming.test_40.test.ts | 331 ------------------ 5 files changed, 1655 deletions(-) delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts deleted file mode 100644 index 61928968212cf..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { times } from 'lodash'; -import { TaskCost, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; -import { CreateMonitoringStatsOpts } from '../monitoring'; -import { filter, map } from 'rxjs'; -import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; -import { TaskLifecycleEvent } from '../polling_lifecycle'; -import { Ok } from '../lib/result_type'; - -const POLLING_INTERVAL = 5000; -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); -jest.mock('../monitoring', () => { - const actual = jest.requireActual('../monitoring'); - return { - ...actual, - createMonitoringStats: jest.fn().mockImplementation((opts) => { - return new actual.createMonitoringStats(opts); - }), - }; -}); - -const mockTaskTypeNormalCostRunFn = jest.fn(); -const mockCreateTaskRunnerNormalCost = jest.fn(); -const mockTaskTypeNormalCost = { - title: 'Normal cost task', - description: '', - cost: TaskCost.Normal, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ - run: mockTaskTypeNormalCostRunFn, - })), -}; -const mockTaskTypeXLCostRunFn = jest.fn(); -const mockCreateTaskRunnerXLCost = jest.fn(); -const mockTaskTypeXLCost = { - title: 'XL cost task', - description: '', - cost: TaskCost.ExtraLarge, - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ - run: mockTaskTypeXLCostRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - opts.definitions.registerTaskDefinitions({ - _normalCostType: mockTaskTypeNormalCost, - _xlCostType: mockTaskTypeXLCost, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('capacity based claiming', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createMonitoringStatsOpts: CreateMonitoringStatsOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: `mget`, - capacity: 10, - poll_interval: POLLING_INTERVAL, - unsafe: { - exclude_task_types: ['[A-Za-z]*'], - }, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - - expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); - createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should claim tasks to full capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const taskRunAtDates: Date[] = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); - return { state: { foo: 'test' } }; - }); - - // inject 10 normal cost tasks with the same runAt value - const ids: string[] = []; - times(10, () => ids.push(uuidV4())); - - const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); - }); - - expect(taskRunAtDates.length).toBe(10); - - // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); - - // run at dates should be within a few seconds of each other - const firstRunAt = taskRunAtDates[0].getTime(); - const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); - - expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); - - // background task load should be 0 or 100 since we're only running these tasks - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 100).toBe(true); - } - }); - - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); -}); From 0218042200f1cd757a7832bbc70bd54bfe0742f0 Mon Sep 17 00:00:00 2001 From: Ying Date: Tue, 26 Nov 2024 15:57:04 -0500 Subject: [PATCH 05/11] Fixing test --- .../task_cost_check.test.ts.snap | 122 ------------------ .../task_priority_check.test.ts.snap | 10 -- 2 files changed, 132 deletions(-) delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/__snapshots__/task_cost_check.test.ts.snap delete mode 100644 x-pack/plugins/task_manager/server/integration_tests/__snapshots__/task_priority_check.test.ts.snap diff --git a/x-pack/plugins/task_manager/server/integration_tests/__snapshots__/task_cost_check.test.ts.snap b/x-pack/plugins/task_manager/server/integration_tests/__snapshots__/task_cost_check.test.ts.snap deleted file mode 100644 index 754d9f0c66b4b..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/__snapshots__/task_cost_check.test.ts.snap +++ /dev/null @@ -1,122 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`Task cost checks detects tasks with cost definitions 1`] = ` -Array [ - Object { - "cost": 1, - "taskType": "actions:.email", - }, - Object { - "cost": 1, - "taskType": "actions:.index", - }, - Object { - "cost": 1, - "taskType": "actions:.pagerduty", - }, - Object { - "cost": 1, - "taskType": "actions:.swimlane", - }, - Object { - "cost": 1, - "taskType": "actions:.server-log", - }, - Object { - "cost": 1, - "taskType": "actions:.slack", - }, - Object { - "cost": 1, - "taskType": "actions:.slack_api", - }, - Object { - "cost": 1, - "taskType": "actions:.webhook", - }, - Object { - "cost": 1, - "taskType": "actions:.cases-webhook", - }, - Object { - "cost": 1, - "taskType": "actions:.xmatters", - }, - Object { - "cost": 1, - "taskType": "actions:.servicenow", - }, - Object { - "cost": 1, - "taskType": "actions:.servicenow-sir", - }, - Object { - "cost": 1, - "taskType": "actions:.servicenow-itom", - }, - Object { - "cost": 1, - "taskType": "actions:.jira", - }, - Object { - "cost": 1, - "taskType": "actions:.teams", - }, - Object { - "cost": 1, - "taskType": "actions:.torq", - }, - Object { - "cost": 1, - "taskType": "actions:.opsgenie", - }, - Object { - "cost": 1, - "taskType": "actions:.tines", - }, - Object { - "cost": 1, - "taskType": "actions:.gen-ai", - }, - Object { - "cost": 1, - "taskType": "actions:.bedrock", - }, - Object { - "cost": 1, - "taskType": "actions:.gemini", - }, - Object { - "cost": 1, - "taskType": "actions:.d3security", - }, - Object { - "cost": 1, - "taskType": "actions:.resilient", - }, - Object { - "cost": 1, - "taskType": "actions:.thehive", - }, - Object { - "cost": 1, - "taskType": "actions:.sentinelone", - }, - Object { - "cost": 1, - "taskType": "actions:.crowdstrike", - }, - Object { - "cost": 1, - "taskType": "actions:.cases", - }, - Object { - "cost": 1, - "taskType": "actions:.observability-ai-assistant", - }, - Object { - "cost": 10, - "taskType": "alerting:siem.indicatorRule", - }, -] -`; diff --git a/x-pack/plugins/task_manager/server/integration_tests/__snapshots__/task_priority_check.test.ts.snap b/x-pack/plugins/task_manager/server/integration_tests/__snapshots__/task_priority_check.test.ts.snap deleted file mode 100644 index 412e2ae77bb5b..0000000000000 --- a/x-pack/plugins/task_manager/server/integration_tests/__snapshots__/task_priority_check.test.ts.snap +++ /dev/null @@ -1,10 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`Task priority checks detects tasks with priority definitions 1`] = ` -Array [ - Object { - "priority": 1, - "taskType": "ad_hoc_run-backfill", - }, -] -`; From e294206949def75957328737784c81e86e15d55d Mon Sep 17 00:00:00 2001 From: Ying Date: Wed, 4 Dec 2024 17:09:40 -0500 Subject: [PATCH 06/11] i dunno dude --- .../server/integration_tests/run.sh | 2 +- ...sk_manager_capacity_based_claiming.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_01.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_02.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_03.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_04.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_05.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_06.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_07.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_08.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_09.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_10.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_11.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_12.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_13.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_14.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_15.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_16.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_17.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_18.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_19.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_20.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_21.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_22.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_23.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_24.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_25.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_26.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_27.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_28.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_29.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_30.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_31.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_32.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_33.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_34.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_35.test.ts | 241 ++++++------- ...er_capacity_based_claiming.test_36.test.ts | 334 ++++++++++++++++++ ...er_capacity_based_claiming.test_37.test.ts | 334 ++++++++++++++++++ ...er_capacity_based_claiming.test_38.test.ts | 334 ++++++++++++++++++ ...er_capacity_based_claiming.test_39.test.ts | 334 ++++++++++++++++++ ...er_capacity_based_claiming.test_40.test.ts | 334 ++++++++++++++++++ .../server/task_claimers/strategy_mget.ts | 6 +- 43 files changed, 6066 insertions(+), 4288 deletions(-) create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts create mode 100644 x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts diff --git a/x-pack/plugins/task_manager/server/integration_tests/run.sh b/x-pack/plugins/task_manager/server/integration_tests/run.sh index c790592acca9d..c964f27a64742 100755 --- a/x-pack/plugins/task_manager/server/integration_tests/run.sh +++ b/x-pack/plugins/task_manager/server/integration_tests/run.sh @@ -4,7 +4,7 @@ source_file="task_manager_capacity_based_claiming.test.ts" # Loop from 001 to 100 -for i in $(seq -w 1 60); do +for i in $(seq -w 1 40); do # Construct the new file name with suffix destination_file="${source_file%.*}_$i.test.${source_file##*.}" # Copy the file diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts index 61928968212cf..34464fccdfbf9 100644 --- a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts @@ -160,7 +160,10 @@ describe('capacity based claiming', () => { }); const taskRunAtDates: Date[] = []; mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push(new Date()); + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -210,122 +213,122 @@ describe('capacity based claiming', () => { } }); - it('should claim tasks until the next task will exceed capacity', async () => { - const backgroundTaskLoads: number[] = []; - createMonitoringStatsOpts.taskPollingLifecycle?.events - .pipe( - filter(isTaskManagerWorkerUtilizationStatEvent), - map((taskEvent: TaskLifecycleEvent) => { - return (taskEvent.event as unknown as Ok).value; - }) - ) - .subscribe((load: number) => { - backgroundTaskLoads.push(load); - }); - const now = new Date(); - const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; - mockTaskTypeNormalCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'normal', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - mockTaskTypeXLCostRunFn.mockImplementation(() => { - taskRunAtDates.push({ type: 'xl', runAt: new Date() }); - return { state: { foo: 'test' } }; - }); - - // inject 6 normal cost tasks for total cost of 12 - const ids: string[] = []; - times(6, () => ids.push(uuidV4())); - const runAt1 = new Date(now.valueOf() - 5); - for (const id of ids) { - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt1, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - } - - // inject 1 XL cost task that will put us over the max cost capacity of 20 - const xlid = uuidV4(); - const runAt2 = now; - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: xlid, - taskType: '_xlCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt2, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(xlid); - - // inject one more normal cost task - const runAt3 = new Date(now.valueOf() + 5); - const lastid = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: lastid, - taskType: '_normalCostType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: runAt3, - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(lastid); - - // retry until all tasks have been run - await retry(async () => { - expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); - expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); - }); - - expect(taskRunAtDates.length).toBe(8); - - const firstRunAt = taskRunAtDates[0].runAt.getTime(); - - // the first 6 tasks should have been run at the same time (adding some fudge factor) - // and they should all be normal cost tasks - for (let i = 0; i < 6; i++) { - expect(taskRunAtDates[i].type).toBe('normal'); - expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); - } - - // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) - expect(taskRunAtDates[6].type).toBe('xl'); - expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // last task should be normal cost and be run after one polling interval has passed - expect(taskRunAtDates[7].type).toBe('normal'); - expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); - - // background task load should be 0 or 60 or 100 since we're only running these tasks - // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue - // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% - for (const load of backgroundTaskLoads) { - expect(load === 0 || load === 60 || load === 100).toBe(true); - } - }); + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); }); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts new file mode 100644 index 0000000000000..34464fccdfbf9 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts @@ -0,0 +1,334 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + // eslint-disable-next-line no-console + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts new file mode 100644 index 0000000000000..34464fccdfbf9 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts @@ -0,0 +1,334 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + // eslint-disable-next-line no-console + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts new file mode 100644 index 0000000000000..34464fccdfbf9 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts @@ -0,0 +1,334 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + // eslint-disable-next-line no-console + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts new file mode 100644 index 0000000000000..34464fccdfbf9 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts @@ -0,0 +1,334 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + // eslint-disable-next-line no-console + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); +}); diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts new file mode 100644 index 0000000000000..34464fccdfbf9 --- /dev/null +++ b/x-pack/plugins/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts @@ -0,0 +1,334 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { v4 as uuidV4 } from 'uuid'; +import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; +import { schema } from '@kbn/config-schema'; +import { times } from 'lodash'; +import { TaskCost, TaskStatus } from '../task'; +import type { TaskClaimingOpts } from '../queries/task_claiming'; +import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; +import { injectTask, setupTestServers, retry } from './lib'; +import { CreateMonitoringStatsOpts } from '../monitoring'; +import { filter, map } from 'rxjs'; +import { isTaskManagerWorkerUtilizationStatEvent } from '../task_events'; +import { TaskLifecycleEvent } from '../polling_lifecycle'; +import { Ok } from '../lib/result_type'; + +const POLLING_INTERVAL = 5000; +const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); +jest.mock('../polling_lifecycle', () => { + const actual = jest.requireActual('../polling_lifecycle'); + return { + ...actual, + TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { + return new actual.TaskPollingLifecycle(opts); + }), + }; +}); + +const { createMonitoringStats: createMonitoringStatsMock } = jest.requireMock('../monitoring'); +jest.mock('../monitoring', () => { + const actual = jest.requireActual('../monitoring'); + return { + ...actual, + createMonitoringStats: jest.fn().mockImplementation((opts) => { + return new actual.createMonitoringStats(opts); + }), + }; +}); + +const mockTaskTypeNormalCostRunFn = jest.fn(); +const mockCreateTaskRunnerNormalCost = jest.fn(); +const mockTaskTypeNormalCost = { + title: 'Normal cost task', + description: '', + cost: TaskCost.Normal, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerNormalCost.mockImplementation(() => ({ + run: mockTaskTypeNormalCostRunFn, + })), +}; +const mockTaskTypeXLCostRunFn = jest.fn(); +const mockCreateTaskRunnerXLCost = jest.fn(); +const mockTaskTypeXLCost = { + title: 'XL cost task', + description: '', + cost: TaskCost.ExtraLarge, + stateSchemaByVersion: { + 1: { + up: (state: Record) => ({ foo: state.foo || '' }), + schema: schema.object({ + foo: schema.string(), + }), + }, + }, + createTaskRunner: mockCreateTaskRunnerXLCost.mockImplementation(() => ({ + run: mockTaskTypeXLCostRunFn, + })), +}; +jest.mock('../queries/task_claiming', () => { + const actual = jest.requireActual('../queries/task_claiming'); + return { + ...actual, + TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { + opts.definitions.registerTaskDefinitions({ + _normalCostType: mockTaskTypeNormalCost, + _xlCostType: mockTaskTypeXLCost, + }); + return new actual.TaskClaiming(opts); + }), + }; +}); + +const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); + +describe('capacity based claiming', () => { + const taskIdsToRemove: string[] = []; + let esServer: TestElasticsearchUtils; + let kibanaServer: TestKibanaUtils; + let taskManagerPlugin: TaskManagerStartContract; + let createMonitoringStatsOpts: CreateMonitoringStatsOpts; + + beforeAll(async () => { + const setupResult = await setupTestServers({ + xpack: { + task_manager: { + claim_strategy: `mget`, + capacity: 10, + poll_interval: POLLING_INTERVAL, + unsafe: { + exclude_task_types: ['[A-Za-z]*'], + }, + }, + }, + }); + esServer = setupResult.esServer; + kibanaServer = setupResult.kibanaServer; + + expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); + taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; + + expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); + + expect(createMonitoringStatsMock).toHaveBeenCalledTimes(1); + createMonitoringStatsOpts = createMonitoringStatsMock.mock.calls[0][0]; + }); + + afterAll(async () => { + if (kibanaServer) { + await kibanaServer.stop(); + } + if (esServer) { + await esServer.stop(); + } + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + afterEach(async () => { + while (taskIdsToRemove.length > 0) { + const id = taskIdsToRemove.pop(); + await taskManagerPlugin.removeIfExists(id!); + } + }); + + it('should claim tasks to full capacity', async () => { + const backgroundTaskLoads: number[] = []; + createMonitoringStatsOpts.taskPollingLifecycle?.events + .pipe( + filter(isTaskManagerWorkerUtilizationStatEvent), + map((taskEvent: TaskLifecycleEvent) => { + return (taskEvent.event as unknown as Ok).value; + }) + ) + .subscribe((load: number) => { + backgroundTaskLoads.push(load); + }); + const taskRunAtDates: Date[] = []; + mockTaskTypeNormalCostRunFn.mockImplementation(() => { + const now = new Date(); + // eslint-disable-next-line no-console + console.log(`runAt Date ${now.toISOString()}`); + taskRunAtDates.push(now); + return { state: { foo: 'test' } }; + }); + + // inject 10 normal cost tasks with the same runAt value + const ids: string[] = []; + times(10, () => ids.push(uuidV4())); + + const now = new Date(); + const runAt = new Date(now.valueOf() + 5000); + for (const id of ids) { + await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + id, + taskType: '_normalCostType', + params: {}, + state: { foo: 'test' }, + stateVersion: 1, + runAt, + enabled: true, + scheduledAt: new Date(), + attempts: 0, + status: TaskStatus.Idle, + startedAt: null, + retryAt: null, + ownerId: null, + }); + taskIdsToRemove.push(id); + } + + await retry(async () => { + expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(10); + }); + + expect(taskRunAtDates.length).toBe(10); + + // eslint-disable-next-line no-console + console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + + // run at dates should be within a few seconds of each other + const firstRunAt = taskRunAtDates[0].getTime(); + const lastRunAt = taskRunAtDates[taskRunAtDates.length - 1].getTime(); + + expect(lastRunAt - firstRunAt).toBeLessThanOrEqual(1000); + + // background task load should be 0 or 100 since we're only running these tasks + for (const load of backgroundTaskLoads) { + expect(load === 0 || load === 100).toBe(true); + } + }); + + // it('should claim tasks until the next task will exceed capacity', async () => { + // const backgroundTaskLoads: number[] = []; + // createMonitoringStatsOpts.taskPollingLifecycle?.events + // .pipe( + // filter(isTaskManagerWorkerUtilizationStatEvent), + // map((taskEvent: TaskLifecycleEvent) => { + // return (taskEvent.event as unknown as Ok).value; + // }) + // ) + // .subscribe((load: number) => { + // backgroundTaskLoads.push(load); + // }); + // const now = new Date(); + // const taskRunAtDates: Array<{ runAt: Date; type: string }> = []; + // mockTaskTypeNormalCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'normal', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + // mockTaskTypeXLCostRunFn.mockImplementation(() => { + // taskRunAtDates.push({ type: 'xl', runAt: new Date() }); + // return { state: { foo: 'test' } }; + // }); + + // // inject 6 normal cost tasks for total cost of 12 + // const ids: string[] = []; + // times(6, () => ids.push(uuidV4())); + // const runAt1 = new Date(now.valueOf() - 5); + // for (const id of ids) { + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt1, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(id); + // } + + // // inject 1 XL cost task that will put us over the max cost capacity of 20 + // const xlid = uuidV4(); + // const runAt2 = now; + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: xlid, + // taskType: '_xlCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt2, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(xlid); + + // // inject one more normal cost task + // const runAt3 = new Date(now.valueOf() + 5); + // const lastid = uuidV4(); + // await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { + // id: lastid, + // taskType: '_normalCostType', + // params: {}, + // state: { foo: 'test' }, + // stateVersion: 1, + // runAt: runAt3, + // enabled: true, + // scheduledAt: new Date(), + // attempts: 0, + // status: TaskStatus.Idle, + // startedAt: null, + // retryAt: null, + // ownerId: null, + // }); + // taskIdsToRemove.push(lastid); + + // // retry until all tasks have been run + // await retry(async () => { + // expect(mockTaskTypeNormalCostRunFn).toHaveBeenCalledTimes(7); + // expect(mockTaskTypeXLCostRunFn).toHaveBeenCalledTimes(1); + // }); + + // expect(taskRunAtDates.length).toBe(8); + + // const firstRunAt = taskRunAtDates[0].runAt.getTime(); + + // // the first 6 tasks should have been run at the same time (adding some fudge factor) + // // and they should all be normal cost tasks + // for (let i = 0; i < 6; i++) { + // expect(taskRunAtDates[i].type).toBe('normal'); + // expect(taskRunAtDates[i].runAt.getTime() - firstRunAt).toBeLessThanOrEqual(500); + // } + + // // the next task should be XL cost task and be run after one polling interval has passed (with some fudge factor) + // expect(taskRunAtDates[6].type).toBe('xl'); + // expect(taskRunAtDates[6].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // last task should be normal cost and be run after one polling interval has passed + // expect(taskRunAtDates[7].type).toBe('normal'); + // expect(taskRunAtDates[7].runAt.getTime() - firstRunAt).toBeGreaterThan(POLLING_INTERVAL - 500); + + // // background task load should be 0 or 60 or 100 since we're only running these tasks + // // should be 100 during the claim cycle where we claimed 6 normal tasks but left the large capacity task in the queue + // // should be 60 during the next claim cycle where we claimed the large capacity task and the normal capacity: 10 + 2 / 20 = 60% + // for (const load of backgroundTaskLoads) { + // expect(load === 0 || load === 60 || load === 100).toBe(true); + // } + // }); +}); diff --git a/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.ts b/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.ts index 374c7c1647412..17446c485178e 100644 --- a/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.ts +++ b/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.ts @@ -13,7 +13,8 @@ // - from the non-stale search results, return as many as we can run based on available // capacity and the cost of each task type to run -import apm, { Logger } from 'elastic-apm-node'; +import apm from 'elastic-apm-node'; +import type { Logger } from '@kbn/core/server'; import { Subject } from 'rxjs'; import { createWrappedLogger } from '../lib/wrapped_logger'; @@ -153,8 +154,7 @@ async function claimAvailableTasks(opts: TaskClaimerOpts): Promise Date: Thu, 2 Jan 2025 16:21:04 -0500 Subject: [PATCH 07/11] removing other tests --- .../task_cost_check.test.ts.snap | 122 ----- .../task_priority_check.test.ts.snap | 10 - .../managed_configuration.test.ts | 430 ------------------ .../integration_tests/removed_types.test.ts | 158 ------- .../integration_tests/task_cost_check.test.ts | 67 --- .../task_manager_switch_task_claimers.test.ts | 369 --------------- .../task_priority_check.test.ts | 60 --- .../task_state_validation.test.ts | 340 -------------- 8 files changed, 1556 deletions(-) delete mode 100644 x-pack/platform/plugins/shared/task_manager/server/integration_tests/__snapshots__/task_cost_check.test.ts.snap delete mode 100644 x-pack/platform/plugins/shared/task_manager/server/integration_tests/__snapshots__/task_priority_check.test.ts.snap delete mode 100644 x-pack/platform/plugins/shared/task_manager/server/integration_tests/managed_configuration.test.ts delete mode 100644 x-pack/platform/plugins/shared/task_manager/server/integration_tests/removed_types.test.ts delete mode 100644 x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_cost_check.test.ts delete mode 100644 x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_switch_task_claimers.test.ts delete mode 100644 x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_priority_check.test.ts delete mode 100644 x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_state_validation.test.ts diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/__snapshots__/task_cost_check.test.ts.snap b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/__snapshots__/task_cost_check.test.ts.snap deleted file mode 100644 index 1e3fa4cbf8645..0000000000000 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/__snapshots__/task_cost_check.test.ts.snap +++ /dev/null @@ -1,122 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`Task cost checks detects tasks with cost definitions 1`] = ` -Array [ - Object { - "cost": 1, - "taskType": "actions:.bedrock", - }, - Object { - "cost": 1, - "taskType": "actions:.cases", - }, - Object { - "cost": 1, - "taskType": "actions:.cases-webhook", - }, - Object { - "cost": 1, - "taskType": "actions:.crowdstrike", - }, - Object { - "cost": 1, - "taskType": "actions:.d3security", - }, - Object { - "cost": 1, - "taskType": "actions:.email", - }, - Object { - "cost": 1, - "taskType": "actions:.gemini", - }, - Object { - "cost": 1, - "taskType": "actions:.gen-ai", - }, - Object { - "cost": 1, - "taskType": "actions:.index", - }, - Object { - "cost": 1, - "taskType": "actions:.jira", - }, - Object { - "cost": 1, - "taskType": "actions:.observability-ai-assistant", - }, - Object { - "cost": 1, - "taskType": "actions:.opsgenie", - }, - Object { - "cost": 1, - "taskType": "actions:.pagerduty", - }, - Object { - "cost": 1, - "taskType": "actions:.resilient", - }, - Object { - "cost": 1, - "taskType": "actions:.sentinelone", - }, - Object { - "cost": 1, - "taskType": "actions:.server-log", - }, - Object { - "cost": 1, - "taskType": "actions:.servicenow", - }, - Object { - "cost": 1, - "taskType": "actions:.servicenow-itom", - }, - Object { - "cost": 1, - "taskType": "actions:.servicenow-sir", - }, - Object { - "cost": 1, - "taskType": "actions:.slack", - }, - Object { - "cost": 1, - "taskType": "actions:.slack_api", - }, - Object { - "cost": 1, - "taskType": "actions:.swimlane", - }, - Object { - "cost": 1, - "taskType": "actions:.teams", - }, - Object { - "cost": 1, - "taskType": "actions:.thehive", - }, - Object { - "cost": 1, - "taskType": "actions:.tines", - }, - Object { - "cost": 1, - "taskType": "actions:.torq", - }, - Object { - "cost": 1, - "taskType": "actions:.webhook", - }, - Object { - "cost": 1, - "taskType": "actions:.xmatters", - }, - Object { - "cost": 10, - "taskType": "alerting:siem.indicatorRule", - }, -] -`; diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/__snapshots__/task_priority_check.test.ts.snap b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/__snapshots__/task_priority_check.test.ts.snap deleted file mode 100644 index 412e2ae77bb5b..0000000000000 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/__snapshots__/task_priority_check.test.ts.snap +++ /dev/null @@ -1,10 +0,0 @@ -// Jest Snapshot v1, https://goo.gl/fbAQLP - -exports[`Task priority checks detects tasks with priority definitions 1`] = ` -Array [ - Object { - "priority": 1, - "taskType": "ad_hoc_run-backfill", - }, -] -`; diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/managed_configuration.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/managed_configuration.test.ts deleted file mode 100644 index 2af8c72190c5a..0000000000000 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/managed_configuration.test.ts +++ /dev/null @@ -1,430 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import sinon from 'sinon'; -import { Client } from '@elastic/elasticsearch'; -import { elasticsearchServiceMock, savedObjectsRepositoryMock } from '@kbn/core/server/mocks'; -import { SavedObjectsErrorHelpers, Logger } from '@kbn/core/server'; -import { ADJUST_THROUGHPUT_INTERVAL } from '../lib/create_managed_configuration'; -import { TaskManagerPlugin, TaskManagerStartContract } from '../plugin'; -import { coreMock } from '@kbn/core/server/mocks'; -import { TaskManagerConfig } from '../config'; -import { BulkUpdateError } from '../lib/bulk_update_error'; - -describe('managed configuration', () => { - let taskManagerStart: TaskManagerStartContract; - let logger: Logger; - - let clock: sinon.SinonFakeTimers; - const savedObjectsClient = savedObjectsRepositoryMock.create(); - const esStart = elasticsearchServiceMock.createStart(); - - const inlineScriptError = new Error('cannot execute [inline] scripts" error') as Error & { - meta: unknown; - }; - inlineScriptError.meta = { - body: { - error: { - caused_by: { - reason: 'cannot execute [inline] scripts', - }, - }, - }, - }; - - afterEach(() => clock.restore()); - - describe('managed poll interval', () => { - beforeEach(async () => { - jest.resetAllMocks(); - clock = sinon.useFakeTimers(); - - const context = coreMock.createPluginInitializerContext({ - discovery: { - active_nodes_lookback: '30s', - interval: 10000, - }, - kibanas_per_partition: 2, - capacity: 10, - max_attempts: 9, - poll_interval: 3000, - allow_reading_invalid_state: false, - version_conflict_threshold: 80, - monitored_aggregated_stats_refresh_rate: 60000, - monitored_stats_health_verbose_log: { - enabled: false, - level: 'debug' as const, - warn_delayed_task_start_in_seconds: 60, - }, - monitored_stats_required_freshness: 4000, - monitored_stats_running_average_window: 50, - request_capacity: 1000, - monitored_task_execution_thresholds: { - default: { - error_threshold: 90, - warn_threshold: 80, - }, - custom: {}, - }, - unsafe: { - exclude_task_types: [], - authenticate_background_task_utilization: true, - }, - event_loop_delay: { - monitor: true, - warn_threshold: 5000, - }, - worker_utilization_running_average_window: 5, - metrics_reset_interval: 3000, - claim_strategy: 'update_by_query', - request_timeouts: { - update_by_query: 1000, - }, - auto_calculate_default_ech_capacity: false, - }); - logger = context.logger.get('taskManager'); - - const taskManager = new TaskManagerPlugin(context); - ( - await taskManager.setup(coreMock.createSetup(), { usageCollection: undefined }) - ).registerTaskDefinitions({ - foo: { - title: 'Foo', - createTaskRunner: jest.fn(), - }, - }); - - const coreStart = coreMock.createStart(); - coreStart.elasticsearch = esStart; - esStart.client.asInternalUser.child.mockReturnValue( - esStart.client.asInternalUser as unknown as Client - ); - coreStart.savedObjects.createInternalRepository.mockReturnValue(savedObjectsClient); - taskManagerStart = await taskManager.start(coreStart, {}); - - // force rxjs timers to fire when they are scheduled for setTimeout(0) as the - // sinon fake timers cause them to stall - clock.tick(0); - }); - - test('should increase poll interval when Elasticsearch returns 429 error', async () => { - savedObjectsClient.create.mockRejectedValueOnce( - SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b') - ); - - // Cause "too many requests" error to be thrown - await expect( - taskManagerStart.schedule({ - taskType: 'foo', - state: {}, - params: {}, - }) - ).rejects.toThrowErrorMatchingInlineSnapshot(`"Too Many Requests"`); - clock.tick(ADJUST_THROUGHPUT_INTERVAL); - - expect(logger.warn).toHaveBeenCalledWith( - 'Poll interval configuration is temporarily increased after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" and/or "cluster_block_exception" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Poll interval configuration changing from 3000 to 3600 after seeing 1 "too many request" and/or "execute [inline] script" and/or "cluster_block_exception" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith('Task poller now using interval of 3600ms'); - }); - - test('should increase poll interval when Elasticsearch returns a cluster_block_exception error', async () => { - savedObjectsClient.create.mockRejectedValueOnce( - new BulkUpdateError({ - statusCode: 403, - message: 'index is blocked', - type: 'cluster_block_exception', - }) - ); - - await expect( - taskManagerStart.schedule({ - taskType: 'foo', - state: {}, - params: {}, - }) - ).rejects.toThrowErrorMatchingInlineSnapshot(`"index is blocked"`); - clock.tick(100000); - - expect(logger.warn).toHaveBeenCalledWith( - 'Poll interval configuration is temporarily increased after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" and/or "cluster_block_exception" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Poll interval configuration changing from 3000 to 61000 after seeing 1 "too many request" and/or "execute [inline] script" and/or "cluster_block_exception" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith('Task poller now using interval of 61000ms'); - }); - - test('should increase poll interval when Elasticsearch returns "cannot execute [inline] scripts" error', async () => { - const childEsClient = esStart.client.asInternalUser.child({}) as jest.Mocked; - childEsClient.search.mockImplementationOnce(async () => { - throw inlineScriptError; - }); - - await expect(taskManagerStart.fetch({})).rejects.toThrowErrorMatchingInlineSnapshot( - `"cannot execute [inline] scripts\\" error"` - ); - - clock.tick(ADJUST_THROUGHPUT_INTERVAL); - - expect(logger.warn).toHaveBeenCalledWith( - 'Poll interval configuration is temporarily increased after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" and/or "cluster_block_exception" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Poll interval configuration changing from 3000 to 3600 after seeing 1 "too many request" and/or "execute [inline] script" and/or "cluster_block_exception" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith('Task poller now using interval of 3600ms'); - }); - }); - - describe('managed capacity with default claim strategy', () => { - beforeEach(async () => { - jest.resetAllMocks(); - clock = sinon.useFakeTimers(); - - const context = coreMock.createPluginInitializerContext({ - discovery: { - active_nodes_lookback: '30s', - interval: 10000, - }, - kibanas_per_partition: 2, - capacity: 10, - max_attempts: 9, - poll_interval: 3000, - allow_reading_invalid_state: false, - version_conflict_threshold: 80, - monitored_aggregated_stats_refresh_rate: 60000, - monitored_stats_health_verbose_log: { - enabled: false, - level: 'debug' as const, - warn_delayed_task_start_in_seconds: 60, - }, - monitored_stats_required_freshness: 4000, - monitored_stats_running_average_window: 50, - request_capacity: 1000, - monitored_task_execution_thresholds: { - default: { - error_threshold: 90, - warn_threshold: 80, - }, - custom: {}, - }, - unsafe: { - exclude_task_types: [], - authenticate_background_task_utilization: true, - }, - event_loop_delay: { - monitor: true, - warn_threshold: 5000, - }, - worker_utilization_running_average_window: 5, - metrics_reset_interval: 3000, - claim_strategy: 'update_by_query', - request_timeouts: { - update_by_query: 1000, - }, - auto_calculate_default_ech_capacity: false, - }); - logger = context.logger.get('taskManager'); - - const taskManager = new TaskManagerPlugin(context); - ( - await taskManager.setup(coreMock.createSetup(), { usageCollection: undefined }) - ).registerTaskDefinitions({ - foo: { - title: 'Foo', - createTaskRunner: jest.fn(), - }, - }); - - const coreStart = coreMock.createStart(); - coreStart.elasticsearch = esStart; - esStart.client.asInternalUser.child.mockReturnValue( - esStart.client.asInternalUser as unknown as Client - ); - coreStart.savedObjects.createInternalRepository.mockReturnValue(savedObjectsClient); - taskManagerStart = await taskManager.start(coreStart, {}); - - // force rxjs timers to fire when they are scheduled for setTimeout(0) as the - // sinon fake timers cause them to stall - clock.tick(0); - }); - - test('should lower capacity when Elasticsearch returns 429 error', async () => { - savedObjectsClient.create.mockRejectedValueOnce( - SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b') - ); - - // Cause "too many requests" error to be thrown - await expect( - taskManagerStart.schedule({ - taskType: 'foo', - state: {}, - params: {}, - }) - ).rejects.toThrowErrorMatchingInlineSnapshot(`"Too Many Requests"`); - clock.tick(ADJUST_THROUGHPUT_INTERVAL); - - expect(logger.warn).toHaveBeenCalledWith( - 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Capacity configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Task pool now using 10 as the max worker value which is based on a capacity of 10' - ); - }); - - test('should lower capacity when Elasticsearch returns "cannot execute [inline] scripts" error', async () => { - const childEsClient = esStart.client.asInternalUser.child({}) as jest.Mocked; - childEsClient.search.mockImplementationOnce(async () => { - throw inlineScriptError; - }); - - await expect(taskManagerStart.fetch({})).rejects.toThrowErrorMatchingInlineSnapshot( - `"cannot execute [inline] scripts\\" error"` - ); - clock.tick(ADJUST_THROUGHPUT_INTERVAL); - - expect(logger.warn).toHaveBeenCalledWith( - 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Capacity configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Task pool now using 10 as the max worker value which is based on a capacity of 10' - ); - }); - }); - - describe('managed capacity with mget claim strategy', () => { - beforeEach(async () => { - jest.resetAllMocks(); - clock = sinon.useFakeTimers(); - - const context = coreMock.createPluginInitializerContext({ - discovery: { - active_nodes_lookback: '30s', - interval: 10000, - }, - kibanas_per_partition: 2, - capacity: 10, - max_attempts: 9, - poll_interval: 3000, - allow_reading_invalid_state: false, - version_conflict_threshold: 80, - monitored_aggregated_stats_refresh_rate: 60000, - monitored_stats_health_verbose_log: { - enabled: false, - level: 'debug' as const, - warn_delayed_task_start_in_seconds: 60, - }, - monitored_stats_required_freshness: 4000, - monitored_stats_running_average_window: 50, - request_capacity: 1000, - monitored_task_execution_thresholds: { - default: { - error_threshold: 90, - warn_threshold: 80, - }, - custom: {}, - }, - unsafe: { - exclude_task_types: [], - authenticate_background_task_utilization: true, - }, - event_loop_delay: { - monitor: true, - warn_threshold: 5000, - }, - worker_utilization_running_average_window: 5, - metrics_reset_interval: 3000, - claim_strategy: 'mget', - request_timeouts: { - update_by_query: 1000, - }, - auto_calculate_default_ech_capacity: false, - }); - logger = context.logger.get('taskManager'); - - const taskManager = new TaskManagerPlugin(context); - ( - await taskManager.setup(coreMock.createSetup(), { usageCollection: undefined }) - ).registerTaskDefinitions({ - foo: { - title: 'Foo', - createTaskRunner: jest.fn(), - }, - }); - - const coreStart = coreMock.createStart(); - coreStart.elasticsearch = esStart; - esStart.client.asInternalUser.child.mockReturnValue( - esStart.client.asInternalUser as unknown as Client - ); - coreStart.savedObjects.createInternalRepository.mockReturnValue(savedObjectsClient); - taskManagerStart = await taskManager.start(coreStart, {}); - - // force rxjs timers to fire when they are scheduled for setTimeout(0) as the - // sinon fake timers cause them to stall - clock.tick(0); - }); - - test('should lower capacity when Elasticsearch returns 429 error', async () => { - savedObjectsClient.create.mockRejectedValueOnce( - SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b') - ); - - // Cause "too many requests" error to be thrown - await expect( - taskManagerStart.schedule({ - taskType: 'foo', - state: {}, - params: {}, - }) - ).rejects.toThrowErrorMatchingInlineSnapshot(`"Too Many Requests"`); - clock.tick(ADJUST_THROUGHPUT_INTERVAL); - - expect(logger.warn).toHaveBeenCalledWith( - 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Capacity configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Task pool now using 20 as the max allowed cost which is based on a capacity of 10' - ); - }); - - test('should lower capacity when Elasticsearch returns "cannot execute [inline] scripts" error', async () => { - const childEsClient = esStart.client.asInternalUser.child({}) as jest.Mocked; - childEsClient.search.mockImplementationOnce(async () => { - throw inlineScriptError; - }); - - await expect(taskManagerStart.fetch({})).rejects.toThrowErrorMatchingInlineSnapshot( - `"cannot execute [inline] scripts\\" error"` - ); - clock.tick(ADJUST_THROUGHPUT_INTERVAL); - - expect(logger.warn).toHaveBeenCalledWith( - 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Capacity configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)' - ); - expect(logger.debug).toHaveBeenCalledWith( - 'Task pool now using 20 as the max allowed cost which is based on a capacity of 10' - ); - }); - }); -}); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/removed_types.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/removed_types.test.ts deleted file mode 100644 index 390c426e9c69f..0000000000000 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/removed_types.test.ts +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import { ElasticsearchClient } from '@kbn/core/server'; -import { TaskManagerPlugin, TaskManagerStartContract } from '../plugin'; -import { injectTask, retry, setupTestServers } from './lib'; -import { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { ConcreteTaskInstance, TaskStatus } from '../task'; -import { CreateWorkloadAggregatorOpts } from '../monitoring/workload_statistics'; - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -const { createWorkloadAggregator: createWorkloadAggregatorMock } = jest.requireMock( - '../monitoring/workload_statistics' -); -jest.mock('../monitoring/workload_statistics', () => { - const actual = jest.requireActual('../monitoring/workload_statistics'); - return { - ...actual, - createWorkloadAggregator: jest.fn().mockImplementation((opts) => { - return new actual.createWorkloadAggregator(opts); - }), - }; -}); - -// FLAKY: https://github.com/elastic/kibana/issues/194208 -describe.skip('unrecognized task types', () => { - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let createWorkloadAggregatorOpts: CreateWorkloadAggregatorOpts; - - const taskIdsToRemove: string[] = []; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - monitored_aggregated_stats_refresh_rate: 5000, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(createWorkloadAggregatorMock).toHaveBeenCalledTimes(1); - createWorkloadAggregatorOpts = createWorkloadAggregatorMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(async () => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - test('should be no workload aggregator errors when there are removed task types', async () => { - const errorLogSpy = jest.spyOn(createWorkloadAggregatorOpts.logger, 'error'); - const removeTypeId = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: removeTypeId, - taskType: 'sampleTaskRemovedType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - const notRegisteredTypeId = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: notRegisteredTypeId, - taskType: 'sampleTaskNotRegisteredType', - params: {}, - state: { foo: 'test' }, - stateVersion: 1, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - - taskIdsToRemove.push(removeTypeId); - taskIdsToRemove.push(notRegisteredTypeId); - - await retry(async () => { - const task = await getTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser); - expect(task?._source?.task?.status).toBe('unrecognized'); - }); - - // monitored_aggregated_stats_refresh_rate is set to the minimum of 5 seconds - // so we want to wait that long to let it refresh - await new Promise((r) => setTimeout(r, 5100)); - - const errorLogCalls = errorLogSpy.mock.calls[0]; - - // if there are any error logs, none of them should be workload aggregator errors - if (errorLogCalls) { - // should be no workload aggregator errors - for (const elog of errorLogCalls) { - if (typeof elog === 'string') { - expect(elog).not.toMatch(/^\[WorkloadAggregator\]: Error: Unsupported task type/i); - } - } - } - }); -}); - -async function getTask(esClient: ElasticsearchClient) { - const response = await esClient.search<{ task: ConcreteTaskInstance }>({ - index: '.kibana_task_manager', - body: { - query: { - bool: { - filter: [ - { - term: { - 'task.taskType': 'sampleTaskRemovedType', - }, - }, - ], - }, - }, - }, - }); - - return response.hits.hits[0]; -} diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_cost_check.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_cost_check.test.ts deleted file mode 100644 index df11792b2c4ad..0000000000000 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_cost_check.test.ts +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { - type TestElasticsearchUtils, - type TestKibanaUtils, -} from '@kbn/core-test-helpers-kbn-server'; -import { TaskCost, TaskDefinition } from '../task'; -import { setupTestServers } from './lib'; -import { TaskTypeDictionary } from '../task_type_dictionary'; -import { sortBy } from 'lodash'; - -jest.mock('../task_type_dictionary', () => { - const actual = jest.requireActual('../task_type_dictionary'); - return { - ...actual, - TaskTypeDictionary: jest.fn().mockImplementation((opts) => { - return new actual.TaskTypeDictionary(opts); - }), - }; -}); - -// Notify response-ops if a task sets a cost to something other than `Normal` -describe('Task cost checks', () => { - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskTypeDictionary: TaskTypeDictionary; - - beforeAll(async () => { - const setupResult = await setupTestServers(); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - const mockedTaskTypeDictionary = jest.requireMock('../task_type_dictionary'); - expect(mockedTaskTypeDictionary.TaskTypeDictionary).toHaveBeenCalledTimes(1); - taskTypeDictionary = mockedTaskTypeDictionary.TaskTypeDictionary.mock.results[0].value; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - it('detects tasks with cost definitions', async () => { - const taskTypes = taskTypeDictionary.getAllDefinitions(); - const taskTypesWithCost = sortBy( - taskTypes - .map((taskType: TaskDefinition) => - !!taskType.cost ? { taskType: taskType.type, cost: taskType.cost } : null - ) - .filter( - (tt: { taskType: string; cost: TaskCost } | null) => - null != tt && tt.cost !== TaskCost.Normal - ), - 'taskType' - ); - expect(taskTypesWithCost).toMatchSnapshot(); - }); -}); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_switch_task_claimers.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_switch_task_claimers.test.ts deleted file mode 100644 index b89f9f92586fe..0000000000000 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_switch_task_claimers.test.ts +++ /dev/null @@ -1,369 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import { schema } from '@kbn/config-schema'; -import { SerializedConcreteTaskInstance, TaskStatus } from '../task'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { injectTask, setupTestServers, retry } from './lib'; -import { setupKibanaServer } from './lib/setup_test_servers'; - -const mockTaskTypeRunFn = jest.fn(); -const mockCreateTaskRunner = jest.fn(); -const mockTaskType = { - title: '', - description: '', - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ ...state, baz: state.baz || '' }), - schema: schema.object({ - foo: schema.string(), - bar: schema.string(), - baz: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunner.mockImplementation(() => ({ - run: mockTaskTypeRunFn, - })), -}; -const { TaskClaiming: TaskClaimingMock } = jest.requireMock('../queries/task_claiming'); -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - // We need to register here because once the class is instantiated, adding - // definitions won't get claimed because of "partitionIntoClaimingBatches". - opts.definitions.registerTaskDefinitions({ - fooType: mockTaskType, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -describe('switch task claiming strategies', () => { - beforeEach(() => { - jest.clearAllMocks(); - }); - - it('should switch from default to update_by_query and still claim tasks', async () => { - const setupResultDefault = await setupTestServers(); - const esServer = setupResultDefault.esServer; - let kibanaServer = setupResultDefault.kibanaServer; - let taskClaimingOpts: TaskClaimingOpts = TaskClaimingMock.mock.calls[0][0]; - - expect(taskClaimingOpts.strategy).toBe('mget'); - - mockTaskTypeRunFn.mockImplementation(() => { - return { state: {} }; - }); - - // inject a task to run and ensure it is claimed and run - const id1 = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: id1, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(1); - }); - - if (kibanaServer) { - await kibanaServer.stop(); - } - - const setupResultUbq = await setupKibanaServer({ - xpack: { - task_manager: { - claim_strategy: 'update_by_query', - }, - }, - }); - kibanaServer = setupResultUbq.kibanaServer; - - taskClaimingOpts = TaskClaimingMock.mock.calls[1][0]; - expect(taskClaimingOpts.strategy).toBe('update_by_query'); - - // inject a task to run and ensure it is claimed and run - const id2 = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: id2, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(2); - }); - - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - it('should switch from update_by_query to default and still claim tasks', async () => { - const setupResultUbq = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: 'update_by_query', - }, - }, - }); - const esServer = setupResultUbq.esServer; - let kibanaServer = setupResultUbq.kibanaServer; - let taskClaimingOpts: TaskClaimingOpts = TaskClaimingMock.mock.calls[0][0]; - - expect(taskClaimingOpts.strategy).toBe('update_by_query'); - - mockTaskTypeRunFn.mockImplementation(() => { - return { state: {} }; - }); - - // inject a task to run and ensure it is claimed and run - const id1 = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: id1, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(1); - }); - - if (kibanaServer) { - await kibanaServer.stop(); - } - - const setupResultDefault = await setupKibanaServer(); - kibanaServer = setupResultDefault.kibanaServer; - - taskClaimingOpts = TaskClaimingMock.mock.calls[1][0]; - expect(taskClaimingOpts.strategy).toBe('mget'); - - // inject a task to run and ensure it is claimed and run - const id2 = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: id2, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(2); - }); - - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - it('should switch from default to update_by_query and claim tasks that were running during shutdown', async () => { - const setupResultDefault = await setupTestServers(); - const esServer = setupResultDefault.esServer; - let kibanaServer = setupResultDefault.kibanaServer; - let taskClaimingOpts: TaskClaimingOpts = TaskClaimingMock.mock.calls[0][0]; - - expect(taskClaimingOpts.strategy).toBe('mget'); - - mockTaskTypeRunFn.mockImplementation(async () => { - await new Promise((resolve) => setTimeout(resolve, 2000)); - return { state: {} }; - }); - - // inject a task to run and ensure it is claimed and run - const id1 = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: id1, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - timeoutOverride: '5s', - retryAt: null, - ownerId: null, - }); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(1); - }); - - if (kibanaServer) { - await kibanaServer.stop(); - } - - const setupResultUbq = await setupKibanaServer({ - xpack: { - task_manager: { - claim_strategy: 'update_by_query', - }, - }, - }); - kibanaServer = setupResultUbq.kibanaServer; - - taskClaimingOpts = TaskClaimingMock.mock.calls[1][0]; - expect(taskClaimingOpts.strategy).toBe('update_by_query'); - - // task doc should still exist and be running - const task = await kibanaServer.coreStart.elasticsearch.client.asInternalUser.get<{ - task: SerializedConcreteTaskInstance; - }>({ - id: `task:${id1}`, - index: '.kibana_task_manager', - }); - - expect(task._source?.task?.status).toBe(TaskStatus.Running); - - // task manager should pick up and claim the task that was running during shutdown - await retry( - async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(2); - }, - { times: 60, intervalMs: 1000 } - ); - - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - it('should switch from update_by_query to default and claim tasks that were running during shutdown', async () => { - const setupResultUbq = await setupTestServers({ - xpack: { - task_manager: { - claim_strategy: 'update_by_query', - }, - }, - }); - const esServer = setupResultUbq.esServer; - let kibanaServer = setupResultUbq.kibanaServer; - let taskClaimingOpts: TaskClaimingOpts = TaskClaimingMock.mock.calls[0][0]; - - expect(taskClaimingOpts.strategy).toBe('update_by_query'); - - mockTaskTypeRunFn.mockImplementation(async () => { - await new Promise((resolve) => setTimeout(resolve, 2000)); - return { state: {} }; - }); - - // inject a task to run and ensure it is claimed and run - const id1 = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id: id1, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - timeoutOverride: '5s', - retryAt: null, - ownerId: null, - }); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(1); - }); - - if (kibanaServer) { - await kibanaServer.stop(); - } - - const setupResultDefault = await setupKibanaServer(); - kibanaServer = setupResultDefault.kibanaServer; - - taskClaimingOpts = TaskClaimingMock.mock.calls[1][0]; - expect(taskClaimingOpts.strategy).toBe('mget'); - - // task doc should still exist and be running - const task = await kibanaServer.coreStart.elasticsearch.client.asInternalUser.get<{ - task: SerializedConcreteTaskInstance; - }>({ - id: `task:${id1}`, - index: '.kibana_task_manager', - }); - - expect(task._source?.task?.status).toBe(TaskStatus.Running); - - await retry( - async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalledTimes(2); - }, - { times: 60, intervalMs: 1000 } - ); - - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); -}); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_priority_check.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_priority_check.test.ts deleted file mode 100644 index ebbea6f1e8a07..0000000000000 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_priority_check.test.ts +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { - type TestElasticsearchUtils, - type TestKibanaUtils, -} from '@kbn/core-test-helpers-kbn-server'; -import { TaskDefinition, TaskPriority } from '../task'; -import { setupTestServers } from './lib'; -import { TaskTypeDictionary } from '../task_type_dictionary'; - -jest.mock('../task_type_dictionary', () => { - const actual = jest.requireActual('../task_type_dictionary'); - return { - ...actual, - TaskTypeDictionary: jest.fn().mockImplementation((opts) => { - return new actual.TaskTypeDictionary(opts); - }), - }; -}); - -// Notify response-ops if a task sets a priority to something other than `Normal` -describe('Task priority checks', () => { - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskTypeDictionary: TaskTypeDictionary; - - beforeAll(async () => { - const setupResult = await setupTestServers(); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - const mockedTaskTypeDictionary = jest.requireMock('../task_type_dictionary'); - expect(mockedTaskTypeDictionary.TaskTypeDictionary).toHaveBeenCalledTimes(1); - taskTypeDictionary = mockedTaskTypeDictionary.TaskTypeDictionary.mock.results[0].value; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - it('detects tasks with priority definitions', async () => { - const taskTypes = taskTypeDictionary.getAllDefinitions(); - const taskTypesWithPriority = taskTypes - .map((taskType: TaskDefinition) => - !!taskType.priority ? { taskType: taskType.type, priority: taskType.priority } : null - ) - .filter((tt: { taskType: string; priority: TaskPriority } | null) => null != tt); - expect(taskTypesWithPriority).toMatchSnapshot(); - }); -}); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_state_validation.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_state_validation.test.ts deleted file mode 100644 index 294b4fd905807..0000000000000 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_state_validation.test.ts +++ /dev/null @@ -1,340 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -import { v4 as uuidV4 } from 'uuid'; -import type { TestElasticsearchUtils, TestKibanaUtils } from '@kbn/core-test-helpers-kbn-server'; -import { schema } from '@kbn/config-schema'; -import { TaskStatus } from '../task'; -import type { TaskPollingLifecycleOpts } from '../polling_lifecycle'; -import type { TaskClaimingOpts } from '../queries/task_claiming'; -import { TaskManagerPlugin, type TaskManagerStartContract } from '../plugin'; -import { injectTask, setupTestServers, retry } from './lib'; - -const { TaskPollingLifecycle: TaskPollingLifecycleMock } = jest.requireMock('../polling_lifecycle'); -jest.mock('../polling_lifecycle', () => { - const actual = jest.requireActual('../polling_lifecycle'); - return { - ...actual, - TaskPollingLifecycle: jest.fn().mockImplementation((opts) => { - return new actual.TaskPollingLifecycle(opts); - }), - }; -}); - -const mockTaskTypeRunFn = jest.fn(); -const mockCreateTaskRunner = jest.fn(); -const mockTaskType = { - title: '', - description: '', - stateSchemaByVersion: { - 1: { - up: (state: Record) => ({ foo: state.foo || '' }), - schema: schema.object({ - foo: schema.string(), - }), - }, - 2: { - up: (state: Record) => ({ ...state, bar: state.bar || '' }), - schema: schema.object({ - foo: schema.string(), - bar: schema.string(), - }), - }, - 3: { - up: (state: Record) => ({ ...state, baz: state.baz || '' }), - schema: schema.object({ - foo: schema.string(), - bar: schema.string(), - baz: schema.string(), - }), - }, - }, - createTaskRunner: mockCreateTaskRunner.mockImplementation(() => ({ - run: mockTaskTypeRunFn, - })), -}; -jest.mock('../queries/task_claiming', () => { - const actual = jest.requireActual('../queries/task_claiming'); - return { - ...actual, - TaskClaiming: jest.fn().mockImplementation((opts: TaskClaimingOpts) => { - // We need to register here because once the class is instantiated, adding - // definitions won't get claimed because of "partitionIntoClaimingBatches". - opts.definitions.registerTaskDefinitions({ - fooType: mockTaskType, - }); - return new actual.TaskClaiming(opts); - }), - }; -}); - -const taskManagerStartSpy = jest.spyOn(TaskManagerPlugin.prototype, 'start'); - -describe('task state validation', () => { - describe('allow_reading_invalid_state: true', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let pollingLifecycleOpts: TaskPollingLifecycleOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers(); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - pollingLifecycleOpts = TaskPollingLifecycleMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should drop unknown fields from the task state', async () => { - mockTaskTypeRunFn.mockImplementation(() => { - return { state: {} }; - }); - - const id = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: 'fooType', - params: { foo: true }, - state: { foo: 'test', bar: 'test', baz: 'test', invalidProperty: 'invalid' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalled(); - }); - - expect(mockCreateTaskRunner).toHaveBeenCalledTimes(1); - const call = mockCreateTaskRunner.mock.calls[0][0]; - expect(call.taskInstance.state).toEqual({ - foo: 'test', - bar: 'test', - baz: 'test', - }); - }); - - it('should fail to update the task if the task runner returns an unknown property in the state', async () => { - const errorLogSpy = jest.spyOn(pollingLifecycleOpts.logger, 'error'); - mockTaskTypeRunFn.mockImplementation(() => { - return { state: { invalidField: true, foo: 'test', bar: 'test', baz: 'test' } }; - }); - - const task = await taskManagerPlugin.schedule({ - taskType: 'fooType', - params: {}, - state: { foo: 'test', bar: 'test', baz: 'test' }, - schedule: { interval: '1d' }, - }); - taskIdsToRemove.push(task.id); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalled(); - }); - - expect(mockCreateTaskRunner).toHaveBeenCalledTimes(1); - const call = mockCreateTaskRunner.mock.calls[0][0]; - expect(call.taskInstance.state).toEqual({ - foo: 'test', - bar: 'test', - baz: 'test', - }); - expect(errorLogSpy).toHaveBeenCalledWith( - `Task fooType "${task.id}" failed: Error: [invalidField]: definition for this key is missing`, - expect.anything() - ); - }); - - it('should migrate the task state', async () => { - mockTaskTypeRunFn.mockImplementation(() => { - return { state: {} }; - }); - - const id = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: 'fooType', - params: { foo: true }, - state: {}, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalled(); - }); - - expect(mockCreateTaskRunner).toHaveBeenCalledTimes(1); - const call = mockCreateTaskRunner.mock.calls[0][0]; - expect(call.taskInstance.state).toEqual({ - foo: '', - bar: '', - baz: '', - }); - }); - - it('should debug log by default when reading an invalid task state', async () => { - const debugLogSpy = jest.spyOn(pollingLifecycleOpts.logger, 'debug'); - mockTaskTypeRunFn.mockImplementation(() => { - return { state: {} }; - }); - - const id = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: 'fooType', - params: { foo: true }, - state: { foo: true, bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - - await retry(async () => { - expect(mockTaskTypeRunFn).toHaveBeenCalled(); - }); - - expect(mockCreateTaskRunner).toHaveBeenCalledTimes(1); - const call = mockCreateTaskRunner.mock.calls[0][0]; - expect(call.taskInstance.state).toEqual({ - foo: true, - bar: 'test', - baz: 'test', - }); - - expect(debugLogSpy).toHaveBeenCalledWith( - `[fooType][${id}] Failed to validate the task's state. Allowing read operation to proceed because allow_reading_invalid_state is true. Error: [foo]: expected value of type [string] but got [boolean]` - ); - }); - }); - - describe('allow_reading_invalid_state: false', () => { - const taskIdsToRemove: string[] = []; - let esServer: TestElasticsearchUtils; - let kibanaServer: TestKibanaUtils; - let taskManagerPlugin: TaskManagerStartContract; - let pollingLifecycleOpts: TaskPollingLifecycleOpts; - - beforeAll(async () => { - const setupResult = await setupTestServers({ - xpack: { - task_manager: { - allow_reading_invalid_state: false, - }, - }, - }); - esServer = setupResult.esServer; - kibanaServer = setupResult.kibanaServer; - - expect(taskManagerStartSpy).toHaveBeenCalledTimes(1); - taskManagerPlugin = taskManagerStartSpy.mock.results[0].value; - - expect(TaskPollingLifecycleMock).toHaveBeenCalledTimes(1); - pollingLifecycleOpts = TaskPollingLifecycleMock.mock.calls[0][0]; - }); - - afterAll(async () => { - if (kibanaServer) { - await kibanaServer.stop(); - } - if (esServer) { - await esServer.stop(); - } - }); - - beforeEach(() => { - jest.clearAllMocks(); - }); - - afterEach(async () => { - while (taskIdsToRemove.length > 0) { - const id = taskIdsToRemove.pop(); - await taskManagerPlugin.removeIfExists(id!); - } - }); - - it('should fail the task run when setting allow_reading_invalid_state:false and reading an invalid state', async () => { - const logSpy = jest.spyOn(pollingLifecycleOpts.logger, 'warn'); - const updateSpy = jest.spyOn(pollingLifecycleOpts.taskStore, 'bulkPartialUpdate'); - - const id = uuidV4(); - await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { - id, - taskType: 'fooType', - params: { foo: true }, - state: { foo: true, bar: 'test', baz: 'test' }, - stateVersion: 4, - runAt: new Date(), - enabled: true, - scheduledAt: new Date(), - attempts: 0, - status: TaskStatus.Idle, - startedAt: null, - retryAt: null, - ownerId: null, - }); - taskIdsToRemove.push(id); - - await retry(async () => { - const calls = logSpy.mock.calls as string[][]; - const expected = - /^Task \(fooType\/.*\) has a validation error: \[foo\]: expected value of type \[string\] but got \[boolean\]/; - const found = calls.map((arr) => arr[0]).find((message) => message.match(expected) != null); - expect(found).toMatch(expected); - expect(updateSpy).toHaveBeenCalledWith( - expect.arrayContaining([expect.objectContaining({ id })]) - ); - }); - }); - }); -}); From 246dc9a007f2ef719bff566640b38f9a87a695e2 Mon Sep 17 00:00:00 2001 From: Ying Date: Fri, 3 Jan 2025 11:29:31 -0500 Subject: [PATCH 08/11] allow console logs --- packages/kbn-test/jest-preset.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/kbn-test/jest-preset.js b/packages/kbn-test/jest-preset.js index 791ee4a974823..6012c4ffba18a 100644 --- a/packages/kbn-test/jest-preset.js +++ b/packages/kbn-test/jest-preset.js @@ -68,7 +68,7 @@ module.exports = { '/packages/kbn-test/src/jest/setup/mocks.eui.js', '/packages/kbn-test/src/jest/setup/react_testing_library.js', '/packages/kbn-test/src/jest/setup/mocks.kbn_i18n_react.js', - process.env.CI ? '/packages/kbn-test/src/jest/setup/disable_console_logs.js' : [], + // process.env.CI ? '/packages/kbn-test/src/jest/setup/disable_console_logs.js' : [], ].flat(), snapshotFormat: { From bf54b4269ce161e72374c0f28e6f48c0320e4253 Mon Sep 17 00:00:00 2001 From: Ying Date: Fri, 3 Jan 2025 12:09:40 -0500 Subject: [PATCH 09/11] allow console logs --- .../task_manager_capacity_based_claiming.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_01.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_02.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_03.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_04.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_05.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_06.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_07.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_08.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_09.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_10.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_11.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_12.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_13.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_14.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_15.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_16.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_17.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_18.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_19.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_20.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_21.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_22.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_23.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_24.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_25.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_26.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_27.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_28.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_29.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_30.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_31.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_32.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_33.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_34.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_35.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_36.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_37.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_38.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_39.test.ts | 4 ++-- .../task_manager_capacity_based_claiming.test_40.test.ts | 4 ++-- .../shared/task_manager/server/task_claimers/strategy_mget.ts | 3 ++- 42 files changed, 84 insertions(+), 83 deletions(-) diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts index 34464fccdfbf9..fcfd38774d6f9 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts @@ -162,7 +162,7 @@ describe('capacity based claiming', () => { mockTaskTypeNormalCostRunFn.mockImplementation(() => { const now = new Date(); // eslint-disable-next-line no-console - console.log(`runAt Date ${now.toISOString()}`); + console.warn(`runAt Date ${now.toISOString()}`); taskRunAtDates.push(now); return { state: { foo: 'test' } }; }); @@ -199,7 +199,7 @@ describe('capacity based claiming', () => { expect(taskRunAtDates.length).toBe(10); // eslint-disable-next-line no-console - console.log(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); + console.warn(`taskRunAtDates: ${JSON.stringify(taskRunAtDates)}`); // run at dates should be within a few seconds of each other const firstRunAt = taskRunAtDates[0].getTime(); diff --git a/x-pack/platform/plugins/shared/task_manager/server/task_claimers/strategy_mget.ts b/x-pack/platform/plugins/shared/task_manager/server/task_claimers/strategy_mget.ts index 17446c485178e..06b4870fa32aa 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/task_claimers/strategy_mget.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/task_claimers/strategy_mget.ts @@ -154,7 +154,8 @@ async function claimAvailableTasks(opts: TaskClaimerOpts): Promise Date: Fri, 3 Jan 2025 12:10:12 -0500 Subject: [PATCH 10/11] allow console logs --- packages/kbn-test/jest-preset.js | 2 +- packages/kbn-test/src/jest/setup/disable_console_logs.js | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/kbn-test/jest-preset.js b/packages/kbn-test/jest-preset.js index 6012c4ffba18a..791ee4a974823 100644 --- a/packages/kbn-test/jest-preset.js +++ b/packages/kbn-test/jest-preset.js @@ -68,7 +68,7 @@ module.exports = { '/packages/kbn-test/src/jest/setup/mocks.eui.js', '/packages/kbn-test/src/jest/setup/react_testing_library.js', '/packages/kbn-test/src/jest/setup/mocks.kbn_i18n_react.js', - // process.env.CI ? '/packages/kbn-test/src/jest/setup/disable_console_logs.js' : [], + process.env.CI ? '/packages/kbn-test/src/jest/setup/disable_console_logs.js' : [], ].flat(), snapshotFormat: { diff --git a/packages/kbn-test/src/jest/setup/disable_console_logs.js b/packages/kbn-test/src/jest/setup/disable_console_logs.js index 8fd061ba7867c..efb8151431d47 100644 --- a/packages/kbn-test/src/jest/setup/disable_console_logs.js +++ b/packages/kbn-test/src/jest/setup/disable_console_logs.js @@ -9,5 +9,5 @@ // on CI these logs just muddy up the console and produce a ton of unnecessary noise console.log = () => {}; -console.error = () => {}; -console.warn = () => {}; +// console.error = () => {}; +// console.warn = () => {}; From bca186330dc64cfa85621109515651a8c312d57c Mon Sep 17 00:00:00 2001 From: Ying Date: Mon, 6 Jan 2025 15:25:28 -0500 Subject: [PATCH 11/11] Increasing time --- .../task_manager_capacity_based_claiming.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_01.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_02.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_03.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_04.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_05.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_06.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_07.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_08.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_09.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_10.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_11.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_12.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_13.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_14.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_15.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_16.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_17.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_18.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_19.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_20.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_21.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_22.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_23.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_24.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_25.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_26.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_27.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_28.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_29.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_30.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_31.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_32.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_33.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_34.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_35.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_36.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_37.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_38.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_39.test.ts | 2 +- .../task_manager_capacity_based_claiming.test_40.test.ts | 2 +- 41 files changed, 41 insertions(+), 41 deletions(-) diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_01.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_02.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_03.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_04.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_05.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_06.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_07.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_08.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_09.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_10.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_11.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_12.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_13.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_14.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_15.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_16.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_17.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_18.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_19.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_20.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_21.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_22.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_23.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_24.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_25.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_26.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_27.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_28.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_29.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_30.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_31.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_32.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_33.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_34.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_35.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_36.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_37.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_38.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_39.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id, diff --git a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts index fcfd38774d6f9..ab9b132a78c75 100644 --- a/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts +++ b/x-pack/platform/plugins/shared/task_manager/server/integration_tests/task_manager_capacity_based_claiming.test_40.test.ts @@ -172,7 +172,7 @@ describe('capacity based claiming', () => { times(10, () => ids.push(uuidV4())); const now = new Date(); - const runAt = new Date(now.valueOf() + 5000); + const runAt = new Date(now.valueOf() + 6000); for (const id of ids) { await injectTask(kibanaServer.coreStart.elasticsearch.client.asInternalUser, { id,