From 1719e635e704eee8d2dbd92a771d828f6e6db11f Mon Sep 17 00:00:00 2001
From: Samiul Monir
Date: Mon, 16 Dec 2024 09:39:54 -0500
Subject: [PATCH 01/18] Adding package by extracting UI components from AI
connector
---
.github/CODEOWNERS | 1 +
package.json | 1 +
tsconfig.base.json | 2 +
x-pack/.i18nrc.json | 1 +
.../README.md | 7 +
.../kbn-inference-endpoint-ui-common/index.ts | 10 +
.../jest.config.js | 12 +
.../kibana.jsonc | 12 +
.../package.json | 6 +
.../components/additional_options_fields.tsx | 291 ++++++++++++++
.../configuration/configuration_field.tsx | 232 +++++++++++
.../configuration_form_items.tsx | 90 +++++
.../configuration/configuration_utils.ts | 49 +++
.../provider_config_hidden_field.tsx | 41 ++
.../provider_secret_hidden_field.tsx | 42 ++
.../src/components/inference_services.tsx | 359 ++++++++++++++++++
.../providers/assets/images/alibaba_cloud.svg | 3 +
.../assets/images/amazon_bedrock.svg | 11 +
.../providers/assets/images/anthropic.svg | 3 +
.../assets/images/azure_ai_studio.svg | 44 +++
.../providers/assets/images/azure_open_ai.svg | 9 +
.../providers/assets/images/cohere.svg | 9 +
.../providers/assets/images/elastic.svg | 16 +
.../assets/images/google_ai_studio.svg | 6 +
.../providers/assets/images/hugging_face.svg | 10 +
.../providers/assets/images/ibm_watsonx.svg | 3 +
.../providers/assets/images/mistral.svg | 34 ++
.../providers/assets/images/open_ai.svg | 3 +
.../service_provider.tsx | 125 ++++++
.../src/components/providers/selectable.tsx | 133 +++++++
.../src/constants.ts | 24 ++
.../src/translations.ts | 129 +++++++
.../src/types/types.ts | 51 +++
.../src/utils/helpers.ts | 80 ++++
.../tsconfig.json | 26 ++
yarn.lock | 4 +
36 files changed, 1879 insertions(+)
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/README.md
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/index.ts
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/jest.config.js
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/kibana.jsonc
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/package.json
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/additional_options_fields.tsx
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/configuration/configuration_field.tsx
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/configuration/configuration_form_items.tsx
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/configuration/configuration_utils.ts
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/hidden_fields/provider_config_hidden_field.tsx
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/hidden_fields/provider_secret_hidden_field.tsx
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/inference_services.tsx
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/alibaba_cloud.svg
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/amazon_bedrock.svg
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/anthropic.svg
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/azure_ai_studio.svg
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/azure_open_ai.svg
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/cohere.svg
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/elastic.svg
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/google_ai_studio.svg
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/hugging_face.svg
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/ibm_watsonx.svg
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/mistral.svg
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/open_ai.svg
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/render_service_provider/service_provider.tsx
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/selectable.tsx
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/constants.ts
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/translations.ts
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/types/types.ts
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/utils/helpers.ts
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/tsconfig.json
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index a6d5f85891f62..1e2b72acdcc1f 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -764,6 +764,7 @@ x-pack/packages/ai-infra/product-doc-artifact-builder @elastic/appex-ai-infra
x-pack/packages/kbn-ai-assistant @elastic/search-kibana
x-pack/packages/kbn-alerting-comparators @elastic/response-ops
x-pack/packages/kbn-alerting-state-types @elastic/response-ops
+x-pack/packages/kbn-inference-endpoint-ui-common @elastic/response-ops @elastic/appex-ai-infra @elastic/obs-ai-assistant @elastic/security-generative-ai
x-pack/packages/kbn-random-sampling @elastic/kibana-visualizations
x-pack/packages/kbn-synthetics-private-location @elastic/obs-ux-management-team
x-pack/packages/maps/vector_tile_utils @elastic/kibana-presentation
diff --git a/package.json b/package.json
index 0b62d48e35a04..81f8fe520a948 100644
--- a/package.json
+++ b/package.json
@@ -578,6 +578,7 @@
"@kbn/index-management-shared-types": "link:x-pack/platform/packages/shared/index-management/index_management_shared_types",
"@kbn/index-patterns-test-plugin": "link:test/plugin_functional/plugins/index_patterns",
"@kbn/inference-common": "link:x-pack/platform/packages/shared/ai-infra/inference-common",
+ "@kbn/inference-endpoint-ui-common": "link:x-pack/packages/kbn-inference-endpoint-ui-common",
"@kbn/inference-plugin": "link:x-pack/platform/plugins/shared/inference",
"@kbn/inference_integration_flyout": "link:x-pack/platform/packages/private/ml/inference_integration_flyout",
"@kbn/infra-forge": "link:x-pack/platform/packages/private/kbn-infra-forge",
diff --git a/tsconfig.base.json b/tsconfig.base.json
index 10c2066c09866..05bc9d4a33efe 100644
--- a/tsconfig.base.json
+++ b/tsconfig.base.json
@@ -1066,6 +1066,8 @@
"@kbn/inference_integration_flyout/*": ["x-pack/platform/packages/private/ml/inference_integration_flyout/*"],
"@kbn/inference-common": ["x-pack/platform/packages/shared/ai-infra/inference-common"],
"@kbn/inference-common/*": ["x-pack/platform/packages/shared/ai-infra/inference-common/*"],
+ "@kbn/inference-endpoint-ui-common": ["x-pack/packages/kbn-inference-endpoint-ui-common"],
+ "@kbn/inference-endpoint-ui-common/*": ["x-pack/packages/kbn-inference-endpoint-ui-common/*"],
"@kbn/inference-plugin": ["x-pack/platform/plugins/shared/inference"],
"@kbn/inference-plugin/*": ["x-pack/platform/plugins/shared/inference/*"],
"@kbn/infra-forge": ["x-pack/platform/packages/private/kbn-infra-forge"],
diff --git a/x-pack/.i18nrc.json b/x-pack/.i18nrc.json
index c01b9ef40aed4..34588ed936bf5 100644
--- a/x-pack/.i18nrc.json
+++ b/x-pack/.i18nrc.json
@@ -46,6 +46,7 @@
"xpack.dataVisualizer": "platform/plugins/private/data_visualizer",
"xpack.exploratoryView": "solutions/observability/plugins/exploratory_view",
"xpack.fileUpload": "plugins/file_upload",
+ "xpack.inferenceEndpointUICommon": "packages/kbn-inference-endpoint-ui-common",
"xpack.globalSearch": [
"plugins/global_search"
],
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/README.md b/x-pack/packages/kbn-inference-endpoint-ui-common/README.md
new file mode 100644
index 0000000000000..206267522f29d
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/README.md
@@ -0,0 +1,7 @@
+# @kbn/kbn-inference-endpoint-ui-common
+
+The `Inference Endpoint UI common` is a shared UI library to create AI Connector and/or inference endpoints.
+
+This package provides:
+
+- Components for rendering the GenAI services and their associated fields
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/index.ts b/x-pack/packages/kbn-inference-endpoint-ui-common/index.ts
new file mode 100644
index 0000000000000..e6881f6e048af
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/index.ts
@@ -0,0 +1,10 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+export { InferenceServices } from './src/components/inference_services';
+
+export * from './src/types/types';
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/jest.config.js b/x-pack/packages/kbn-inference-endpoint-ui-common/jest.config.js
new file mode 100644
index 0000000000000..36c8ed71a7b2f
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/jest.config.js
@@ -0,0 +1,12 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+module.exports = {
+ preset: '@kbn/test',
+ rootDir: '../..',
+ roots: ['/x-pack/packages/kbn-inference-endpoint-ui-common'],
+};
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/kibana.jsonc b/x-pack/packages/kbn-inference-endpoint-ui-common/kibana.jsonc
new file mode 100644
index 0000000000000..e902264afb61a
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/kibana.jsonc
@@ -0,0 +1,12 @@
+{
+ "type": "shared-browser",
+ "id": "@kbn/inference-endpoint-ui-common",
+ "owner": [
+ "@elastic/response-ops",
+ "@elastic/appex-ai-infra",
+ "@elastic/obs-ai-assistant",
+ "@elastic/security-generative-ai"
+ ],
+ "group": "platform",
+ "visibility": "shared"
+}
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/package.json b/x-pack/packages/kbn-inference-endpoint-ui-common/package.json
new file mode 100644
index 0000000000000..c3ea31bb0a4f0
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/package.json
@@ -0,0 +1,6 @@
+{
+ "name": "@kbn/inference-endpoint-ui-common",
+ "private": true,
+ "version": "1.0.0",
+ "license": "Elastic License 2.0"
+}
\ No newline at end of file
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/additional_options_fields.tsx b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/additional_options_fields.tsx
new file mode 100644
index 0000000000000..c0d2e13de6435
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/additional_options_fields.tsx
@@ -0,0 +1,291 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React, { useMemo } from 'react';
+import { css } from '@emotion/react';
+
+import {
+ EuiFormRow,
+ EuiSpacer,
+ EuiTitle,
+ EuiAccordion,
+ EuiFieldText,
+ useEuiTheme,
+ EuiTextColor,
+ EuiButtonGroup,
+ EuiPanel,
+ EuiButtonEmpty,
+ EuiCopy,
+ EuiButton,
+ useEuiFontSize,
+ EuiText,
+} from '@elastic/eui';
+import {
+ getFieldValidityAndErrorMessage,
+ UseField,
+ useFormContext,
+} from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
+import { FormattedMessage } from '@kbn/i18n-react';
+
+import { fieldValidators } from '@kbn/es-ui-shared-plugin/static/forms/helpers';
+import { ConfigurationFormItems } from './configuration/configuration_form_items';
+import * as i18n from '../translations';
+import { DEFAULT_TASK_TYPE } from '../constants';
+import { Config, ConfigEntryView } from '../types/types';
+import { TaskTypeOption } from '../utils/helpers';
+
+// Custom trigger button CSS
+const buttonCss = css`
+ &:hover {
+ text-decoration: none;
+ }
+`;
+
+interface AdditionalOptionsFieldsProps {
+ config: Config;
+ optionalProviderFormFields: ConfigEntryView[];
+ onSetProviderConfigEntry: (key: string, value: unknown) => Promise;
+ onTaskTypeOptionsSelect: (taskType: string, provider?: string) => void;
+ selectedTaskType?: string;
+ taskTypeFormFields: ConfigEntryView[];
+ taskTypeOptions: TaskTypeOption[];
+}
+
+export const AdditionalOptionsFields: React.FC = ({
+ config,
+ taskTypeOptions,
+ optionalProviderFormFields,
+ taskTypeFormFields,
+ selectedTaskType,
+ onSetProviderConfigEntry,
+ onTaskTypeOptionsSelect,
+}) => {
+ const xsFontSize = useEuiFontSize('xs').fontSize;
+ const { euiTheme } = useEuiTheme();
+ const { setFieldValue } = useFormContext();
+
+ const taskTypeSettings = useMemo(
+ () =>
+ selectedTaskType || config.taskType?.length ? (
+ <>
+
+
+
+
+
+
+
+
+
+
+ {(field) => {
+ const { isInvalid, errorMessage } = getFieldValidityAndErrorMessage(field);
+
+ return (
+
+ {taskTypeOptions.length === 1 ? (
+ onTaskTypeOptionsSelect(config.taskType)}
+ >
+ {config.taskType}
+
+ ) : (
+ onTaskTypeOptionsSelect(id)}
+ options={taskTypeOptions}
+ color="text"
+ type="single"
+ />
+ )}
+
+ );
+ }}
+
+ >
+ ) : null,
+ [
+ selectedTaskType,
+ config.taskType,
+ xsFontSize,
+ euiTheme.colors,
+ taskTypeOptions,
+ onTaskTypeOptionsSelect,
+ ]
+ );
+
+ const inferenceUri = useMemo(() => `_inference/${selectedTaskType}/`, [selectedTaskType]);
+
+ return (
+
+
+
+ }
+ initialIsOpen={true}
+ >
+
+
+ {optionalProviderFormFields.length > 0 ? (
+ <>
+
+
+
+
+
+
+
+
+
+
+
+ >
+ ) : null}
+
+ {taskTypeSettings}
+
+
+
+
+
+
+
+
+
+
+
+
+ {(field) => {
+ const { isInvalid, errorMessage } = getFieldValidityAndErrorMessage(field);
+
+ return (
+
+ }
+ >
+ {
+ setFieldValue('config.inferenceId', e.target.value);
+ }}
+ prepend={inferenceUri}
+ append={
+
+ {(copy) => (
+
+
+
+ )}
+
+ }
+ />
+
+ );
+ }}
+
+
+
+ );
+};
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/configuration/configuration_field.tsx b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/configuration/configuration_field.tsx
new file mode 100644
index 0000000000000..fb278b826146d
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/configuration/configuration_field.tsx
@@ -0,0 +1,232 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React, { useEffect, useState } from 'react';
+
+import {
+ EuiAccordion,
+ EuiFieldText,
+ EuiFieldPassword,
+ EuiSwitch,
+ EuiTextArea,
+ EuiFieldNumber,
+} from '@elastic/eui';
+
+import { isEmpty } from 'lodash/fp';
+import { ConfigEntryView, FieldType } from '../../types/types';
+import { ensureBooleanType, ensureCorrectTyping, ensureStringType } from './configuration_utils';
+
+interface ConfigurationFieldProps {
+ configEntry: ConfigEntryView;
+ isLoading: boolean;
+ setConfigValue: (value: number | string | boolean | null) => void;
+}
+
+interface ConfigInputFieldProps {
+ configEntry: ConfigEntryView;
+ isLoading: boolean;
+ validateAndSetConfigValue: (value: string | boolean) => void;
+}
+export const ConfigInputField: React.FC = ({
+ configEntry,
+ isLoading,
+ validateAndSetConfigValue,
+}) => {
+ // eslint-disable-next-line @typescript-eslint/naming-convention
+ const { isValid, value, default_value, key } = configEntry;
+ const [innerValue, setInnerValue] = useState(
+ !value || value.toString().length === 0 ? default_value : value
+ );
+
+ useEffect(() => {
+ setInnerValue(!value || value.toString().length === 0 ? default_value : value);
+ }, [default_value, value]);
+ return (
+ {
+ setInnerValue(event.target.value);
+ validateAndSetConfigValue(event.target.value);
+ }}
+ />
+ );
+};
+
+export const ConfigSwitchField: React.FC = ({
+ configEntry,
+ isLoading,
+ validateAndSetConfigValue,
+}) => {
+ // eslint-disable-next-line @typescript-eslint/naming-convention
+ const { label, value, default_value, key } = configEntry;
+ const [innerValue, setInnerValue] = useState(value ?? default_value);
+ useEffect(() => {
+ setInnerValue(value ?? default_value);
+ }, [default_value, value]);
+ return (
+ {label}
}
+ onChange={(event) => {
+ setInnerValue(event.target.checked);
+ validateAndSetConfigValue(event.target.checked);
+ }}
+ />
+ );
+};
+
+export const ConfigInputTextArea: React.FC = ({
+ isLoading,
+ configEntry,
+ validateAndSetConfigValue,
+}) => {
+ // eslint-disable-next-line @typescript-eslint/naming-convention
+ const { isValid, value, default_value, key } = configEntry;
+ const [innerValue, setInnerValue] = useState(value ?? default_value);
+ useEffect(() => {
+ setInnerValue(value ?? '');
+ }, [default_value, value]);
+ return (
+ {
+ setInnerValue(event.target.value);
+ validateAndSetConfigValue(event.target.value);
+ }}
+ />
+ );
+};
+
+export const ConfigNumberField: React.FC = ({
+ configEntry,
+ isLoading,
+ validateAndSetConfigValue,
+}) => {
+ // eslint-disable-next-line @typescript-eslint/naming-convention
+ const { isValid, value, default_value, key } = configEntry;
+ const [innerValue, setInnerValue] = useState(value ?? default_value);
+ useEffect(() => {
+ setInnerValue(!value || value.toString().length === 0 ? default_value : value);
+ }, [default_value, value]);
+ return (
+ {
+ const newValue = isEmpty(event.target.value) ? '0' : event.target.value;
+ setInnerValue(newValue);
+ validateAndSetConfigValue(newValue);
+ }}
+ />
+ );
+};
+
+export const ConfigSensitiveTextArea: React.FC = ({
+ isLoading,
+ configEntry,
+ validateAndSetConfigValue,
+}) => {
+ const { key, label } = configEntry;
+ return (
+ {label}}>
+
+
+ );
+};
+
+export const ConfigInputPassword: React.FC = ({
+ isLoading,
+ configEntry,
+ validateAndSetConfigValue,
+}) => {
+ const { value, key } = configEntry;
+ const [innerValue, setInnerValue] = useState(value ?? null);
+ useEffect(() => {
+ setInnerValue(value ?? null);
+ }, [value]);
+ return (
+ <>
+ {
+ setInnerValue(event.target.value);
+ validateAndSetConfigValue(event.target.value);
+ }}
+ />
+ >
+ );
+};
+
+export const ConfigurationField: React.FC = ({
+ configEntry,
+ isLoading,
+ setConfigValue,
+}) => {
+ const validateAndSetConfigValue = (value: number | string | boolean) => {
+ setConfigValue(ensureCorrectTyping(configEntry.type, value));
+ };
+
+ const { key, type, sensitive } = configEntry;
+
+ switch (type) {
+ case FieldType.INTEGER:
+ return (
+
+ );
+
+ case FieldType.BOOLEAN:
+ return (
+
+ );
+
+ default:
+ return sensitive ? (
+
+ ) : (
+
+ );
+ }
+};
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/configuration/configuration_form_items.tsx b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/configuration/configuration_form_items.tsx
new file mode 100644
index 0000000000000..850f574c2fe2b
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/configuration/configuration_form_items.tsx
@@ -0,0 +1,90 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React from 'react';
+
+import {
+ EuiCallOut,
+ EuiFlexGroup,
+ EuiFlexItem,
+ EuiFormRow,
+ EuiSpacer,
+ EuiText,
+} from '@elastic/eui';
+
+import { ConfigEntryView } from '../../types/types';
+import { ConfigurationField } from './configuration_field';
+import * as i18n from '../../translations';
+
+interface ConfigurationFormItemsProps {
+ isLoading: boolean;
+ items: ConfigEntryView[];
+ setConfigEntry: (key: string, value: string | number | boolean | null) => void;
+ direction?: 'column' | 'row' | 'rowReverse' | 'columnReverse' | undefined;
+}
+
+export const ConfigurationFormItems: React.FC = ({
+ isLoading,
+ items,
+ setConfigEntry,
+ direction,
+}) => {
+ return (
+
+ {items.map((configEntry) => {
+ const { key, isValid, label, sensitive, description, validationErrors, required } =
+ configEntry;
+
+ const helpText = description;
+ // toggle and sensitive textarea labels go next to the element, not in the row
+ const rowLabel = description ? (
+
+
+ {label}
+
+
+ ) : (
+ {label}
+ );
+
+ const optionalLabel = !required ? (
+
+ {i18n.OPTIONALTEXT}
+
+ ) : undefined;
+
+ return (
+
+
+ {
+ setConfigEntry(key, value);
+ }}
+ />
+
+ {sensitive ? (
+ <>
+
+
+ >
+ ) : null}
+
+ );
+ })}
+
+ );
+};
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/configuration/configuration_utils.ts b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/configuration/configuration_utils.ts
new file mode 100644
index 0000000000000..45e886b368443
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/configuration/configuration_utils.ts
@@ -0,0 +1,49 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { FieldType } from '../../types/types';
+
+export const validIntInput = (value: string | number | boolean | null): boolean => {
+ // reject non integers (including x.0 floats), but don't validate if empty
+ return (value !== null || value !== '') &&
+ (isNaN(Number(value)) ||
+ !Number.isSafeInteger(Number(value)) ||
+ ensureStringType(value).indexOf('.') >= 0)
+ ? false
+ : true;
+};
+
+export const ensureCorrectTyping = (
+ type: FieldType,
+ value: string | number | boolean | null
+): string | number | boolean | null => {
+ switch (type) {
+ case FieldType.INTEGER:
+ return validIntInput(value) ? ensureIntType(value) : value;
+ case FieldType.BOOLEAN:
+ return ensureBooleanType(value);
+ default:
+ return ensureStringType(value);
+ }
+};
+
+export const ensureStringType = (value: string | number | boolean | null): string => {
+ return value !== null ? String(value) : '';
+};
+
+export const ensureIntType = (value: string | number | boolean | null): number | null => {
+ // int is null-safe to prevent empty values from becoming zeroes
+ if (value === null || value === '') {
+ return null;
+ }
+
+ return parseInt(String(value), 10);
+};
+
+export const ensureBooleanType = (value: string | number | boolean | null): boolean => {
+ return Boolean(value);
+};
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/hidden_fields/provider_config_hidden_field.tsx b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/hidden_fields/provider_config_hidden_field.tsx
new file mode 100644
index 0000000000000..4196bd0a2b709
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/hidden_fields/provider_config_hidden_field.tsx
@@ -0,0 +1,41 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { UseField } from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
+import { HiddenField } from '@kbn/es-ui-shared-plugin/static/forms/components';
+import React from 'react';
+import { ConfigEntryView } from '../../types/types';
+import { getNonEmptyValidator } from '../../utils/helpers';
+
+interface ProviderConfigHiddenFieldProps {
+ providerSchema: ConfigEntryView[];
+ setRequiredProviderFormFields: React.Dispatch>;
+ isSubmitting: boolean;
+}
+
+export const ProviderConfigHiddenField: React.FC = ({
+ providerSchema,
+ setRequiredProviderFormFields,
+ isSubmitting,
+}) => (
+
+);
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/hidden_fields/provider_secret_hidden_field.tsx b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/hidden_fields/provider_secret_hidden_field.tsx
new file mode 100644
index 0000000000000..8060fe1cce13d
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/hidden_fields/provider_secret_hidden_field.tsx
@@ -0,0 +1,42 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { UseField } from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
+import React from 'react';
+import { HiddenField } from '@kbn/es-ui-shared-plugin/static/forms/components';
+import { ConfigEntryView } from '../../types/types';
+import { getNonEmptyValidator } from '../../utils/helpers';
+
+interface ProviderSecretHiddenFieldProps {
+ providerSchema: ConfigEntryView[];
+ setRequiredProviderFormFields: React.Dispatch>;
+ isSubmitting: boolean;
+}
+
+export const ProviderSecretHiddenField: React.FC = ({
+ providerSchema,
+ setRequiredProviderFormFields,
+ isSubmitting,
+}) => (
+
+);
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/inference_services.tsx b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/inference_services.tsx
new file mode 100644
index 0000000000000..5e5ea592f3950
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/inference_services.tsx
@@ -0,0 +1,359 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React, { useCallback, useEffect, useMemo, useState } from 'react';
+import {
+ getFieldValidityAndErrorMessage,
+ UseField,
+ useFormContext,
+ useFormData,
+} from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
+import { fieldValidators } from '@kbn/es-ui-shared-plugin/static/forms/helpers';
+import {
+ EuiFieldText,
+ EuiFieldTextProps,
+ EuiFormControlLayout,
+ EuiFormRow,
+ EuiHorizontalRule,
+ EuiInputPopover,
+ EuiSpacer,
+ keys,
+} from '@elastic/eui';
+import { FormattedMessage } from '@kbn/i18n-react';
+import { ConnectorFormSchema } from '@kbn/triggers-actions-ui-plugin/public';
+
+import * as i18n from '../translations';
+import { Config, ConfigEntryView, InferenceProvider, Secrets } from '../types/types';
+import { SERVICE_PROVIDERS } from './providers/render_service_provider/service_provider';
+import { DEFAULT_TASK_TYPE, ServiceProviderKeys } from '../constants';
+import { SelectableProvider } from './providers/selectable';
+import { TaskTypeOption, generateInferenceEndpointId, getTaskTypeOptions } from '../utils/helpers';
+import { ConfigurationFormItems } from './configuration/configuration_form_items';
+import { AdditionalOptionsFields } from './additional_options_fields';
+import { ProviderSecretHiddenField } from './hidden_fields/provider_secret_hidden_field';
+import { ProviderConfigHiddenField } from './hidden_fields/provider_config_hidden_field';
+
+interface InferenceServicesProps {
+ providers: InferenceProvider[];
+}
+
+export const InferenceServices: React.FC = ({ providers }) => {
+ const [isProviderPopoverOpen, setProviderPopoverOpen] = useState(false);
+ const [providerSchema, setProviderSchema] = useState([]);
+ const [taskTypeOptions, setTaskTypeOptions] = useState([]);
+ const [selectedTaskType, setSelectedTaskType] = useState(DEFAULT_TASK_TYPE);
+ const [taskTypeFormFields] = useState([]);
+
+ const { updateFieldValues, setFieldValue, validateFields, isSubmitting } = useFormContext();
+ const [requiredProviderFormFields, setRequiredProviderFormFields] = useState(
+ []
+ );
+ const [optionalProviderFormFields, setOptionalProviderFormFields] = useState(
+ []
+ );
+ const [{ config, secrets }] = useFormData>({
+ watch: [
+ 'secrets.providerSecrets',
+ 'config.taskType',
+ 'config.inferenceId',
+ 'config.provider',
+ 'config.providerConfig',
+ ],
+ });
+
+ const handleProviderPopover = useCallback(() => {
+ setProviderPopoverOpen((isOpen) => !isOpen);
+ }, []);
+
+ const handleProviderClosePopover = useCallback(() => {
+ setProviderPopoverOpen(false);
+ }, []);
+
+ const handleProviderKeyboardOpen: EuiFieldTextProps['onKeyDown'] = useCallback((event: any) => {
+ if (event.key === keys.ENTER) {
+ setProviderPopoverOpen(true);
+ }
+ }, []);
+
+ const providerIcon = useMemo(
+ () =>
+ Object.keys(SERVICE_PROVIDERS).includes(config?.provider)
+ ? SERVICE_PROVIDERS[config?.provider as ServiceProviderKeys].icon
+ : undefined,
+ [config?.provider]
+ );
+
+ const providerName = useMemo(
+ () =>
+ Object.keys(SERVICE_PROVIDERS).includes(config?.provider)
+ ? SERVICE_PROVIDERS[config?.provider as ServiceProviderKeys].name
+ : config?.provider,
+ [config?.provider]
+ );
+
+ const onTaskTypeOptionsSelect = useCallback(
+ (taskType: string) => {
+ setSelectedTaskType(taskType);
+
+ const inferenceId = generateInferenceEndpointId({
+ ...config,
+ taskType,
+ });
+
+ updateFieldValues({
+ config: {
+ taskType,
+ inferenceId,
+ },
+ });
+ },
+ [config, updateFieldValues]
+ );
+
+ const onProviderChange = useCallback(
+ (provider?: string) => {
+ const newProvider = providers?.find((p) => p.service === provider);
+
+ setTaskTypeOptions(getTaskTypeOptions(newProvider?.task_types ?? []));
+ if (newProvider?.task_types && newProvider?.task_types.length > 0) {
+ onTaskTypeOptionsSelect(newProvider?.task_types[0]);
+ }
+
+ // Update providerSchema
+ const newProviderSchema = Object.keys(newProvider?.configurations ?? {}).map((k) => ({
+ key: k,
+ isValid: true,
+ ...newProvider?.configurations[k],
+ })) as ConfigEntryView[];
+
+ setProviderSchema(newProviderSchema);
+
+ const defaultProviderConfig: Record = {};
+ const defaultProviderSecrets: Record = {};
+
+ Object.keys(newProvider?.configurations ?? {}).forEach((k) => {
+ if (!newProvider?.configurations[k].sensitive) {
+ if (newProvider?.configurations[k] && !!newProvider?.configurations[k].default_value) {
+ defaultProviderConfig[k] = newProvider.configurations[k].default_value;
+ } else {
+ defaultProviderConfig[k] = null;
+ }
+ } else {
+ defaultProviderSecrets[k] = null;
+ }
+ });
+ const inferenceId = generateInferenceEndpointId({
+ ...config,
+ provider: newProvider?.service ?? '',
+ taskType: newProvider?.task_types[0] ?? DEFAULT_TASK_TYPE,
+ });
+
+ updateFieldValues({
+ config: {
+ provider: newProvider?.service,
+ providerConfig: defaultProviderConfig,
+ inferenceId,
+ },
+ secrets: {
+ providerSecrets: defaultProviderSecrets,
+ },
+ });
+ },
+ [config, onTaskTypeOptionsSelect, providers, updateFieldValues]
+ );
+
+ const onSetProviderConfigEntry = useCallback(
+ async (key: string, value: unknown) => {
+ const entry: ConfigEntryView | undefined = providerSchema.find(
+ (p: ConfigEntryView) => p.key === key
+ );
+ if (entry) {
+ if (entry.sensitive) {
+ if (!secrets.providerSecrets) {
+ secrets.providerSecrets = {};
+ }
+ const newSecrets = { ...secrets.providerSecrets };
+ newSecrets[key] = value;
+ setFieldValue('secrets.providerSecrets', newSecrets);
+ await validateFields(['secrets.providerSecrets']);
+ } else {
+ if (!config.providerConfig) {
+ config.providerConfig = {};
+ }
+ const newConfig = { ...config.providerConfig };
+ newConfig[key] = value;
+ setFieldValue('config.providerConfig', newConfig);
+ await validateFields(['config.providerConfig']);
+ }
+ }
+ },
+ [config, providerSchema, secrets, setFieldValue, validateFields]
+ );
+
+ const onClearProvider = useCallback(() => {
+ onProviderChange();
+ setFieldValue('config.taskType', '');
+ setFieldValue('config.provider', '');
+ }, [onProviderChange, setFieldValue]);
+
+ const providerSuperSelect = useCallback(
+ (isInvalid: boolean) => (
+
+ {
+ /* Intentionally left blank as onChange is required to avoid console error
+ but not used in this context
+ */
+ }}
+ />
+
+ ),
+ [
+ config?.provider,
+ handleProviderKeyboardOpen,
+ handleProviderPopover,
+ isProviderPopoverOpen,
+ onClearProvider,
+ providerIcon,
+ providerName,
+ ]
+ );
+
+ // useEffect(() => {
+ // if (config?.provider) {
+ // console.log('generating inference id');
+ // generateInferenceEndpointId(config, setFieldValue);
+ // } else {
+ // console.log('config.inference not generated');
+ // }
+ // }, [config, config?.provider, setFieldValue]);
+
+ useEffect(() => {
+ if (isSubmitting) {
+ validateFields(['config.providerConfig']);
+ validateFields(['secrets.providerSecrets']);
+ }
+ }, [isSubmitting, config, validateFields]);
+
+ useEffect(() => {
+ // Set values from the provider secrets and config to the schema
+ const existingConfiguration = providerSchema
+ ? providerSchema.map((item: ConfigEntryView) => {
+ const itemValue = item;
+ itemValue.isValid = true;
+ if (item.sensitive && secrets?.providerSecrets) {
+ itemValue.value = secrets?.providerSecrets[item.key] as any;
+ } else if (config?.providerConfig) {
+ itemValue.value = config?.providerConfig[item.key] as any;
+ }
+ return itemValue;
+ })
+ : [];
+
+ setOptionalProviderFormFields(existingConfiguration.filter((p) => !p.required && !p.sensitive));
+ setRequiredProviderFormFields(existingConfiguration.filter((p) => p.required || p.sensitive));
+ }, [config?.providerConfig, providerSchema, secrets]);
+
+ return (
+ <>
+
+ {(field) => {
+ const { isInvalid, errorMessage } = getFieldValidityAndErrorMessage(field);
+ const selectInput = providerSuperSelect(isInvalid);
+ return (
+
+ }
+ isInvalid={isInvalid}
+ error={errorMessage}
+ >
+
+
+
+
+ );
+ }}
+
+ {config?.provider ? (
+ <>
+
+
+
+
+
+
+
+
+ >
+ ) : null}
+ >
+ );
+};
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/alibaba_cloud.svg b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/alibaba_cloud.svg
new file mode 100644
index 0000000000000..1ae552d509c3a
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/alibaba_cloud.svg
@@ -0,0 +1,3 @@
+
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/amazon_bedrock.svg b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/amazon_bedrock.svg
new file mode 100644
index 0000000000000..f8815d4f75ec5
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/amazon_bedrock.svg
@@ -0,0 +1,11 @@
+
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/anthropic.svg b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/anthropic.svg
new file mode 100644
index 0000000000000..c361cda86a7df
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/anthropic.svg
@@ -0,0 +1,3 @@
+
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/azure_ai_studio.svg b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/azure_ai_studio.svg
new file mode 100644
index 0000000000000..405e182a10394
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/azure_ai_studio.svg
@@ -0,0 +1,44 @@
+
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/azure_open_ai.svg b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/azure_open_ai.svg
new file mode 100644
index 0000000000000..122c0c65af13c
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/azure_open_ai.svg
@@ -0,0 +1,9 @@
+
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/cohere.svg b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/cohere.svg
new file mode 100644
index 0000000000000..69953809fec35
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/cohere.svg
@@ -0,0 +1,9 @@
+
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/elastic.svg b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/elastic.svg
new file mode 100644
index 0000000000000..e763c2e2f2ab6
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/elastic.svg
@@ -0,0 +1,16 @@
+
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/google_ai_studio.svg b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/google_ai_studio.svg
new file mode 100644
index 0000000000000..b6e34ae15c9e4
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/google_ai_studio.svg
@@ -0,0 +1,6 @@
+
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/hugging_face.svg b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/hugging_face.svg
new file mode 100644
index 0000000000000..87ac70c5a18f4
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/hugging_face.svg
@@ -0,0 +1,10 @@
+
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/ibm_watsonx.svg b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/ibm_watsonx.svg
new file mode 100644
index 0000000000000..5883eff3884d6
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/ibm_watsonx.svg
@@ -0,0 +1,3 @@
+
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/mistral.svg b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/mistral.svg
new file mode 100644
index 0000000000000..f62258a327594
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/mistral.svg
@@ -0,0 +1,34 @@
+
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/open_ai.svg b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/open_ai.svg
new file mode 100644
index 0000000000000..9ddc8f8fd63b8
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/assets/images/open_ai.svg
@@ -0,0 +1,3 @@
+
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/render_service_provider/service_provider.tsx b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/render_service_provider/service_provider.tsx
new file mode 100644
index 0000000000000..e50cfae1d30bc
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/render_service_provider/service_provider.tsx
@@ -0,0 +1,125 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { EuiHighlight, EuiIcon } from '@elastic/eui';
+import React from 'react';
+
+import { ServiceProviderKeys } from '../../../constants';
+import elasticIcon from '../assets/images/elastic.svg';
+import huggingFaceIcon from '../assets/images/hugging_face.svg';
+import cohereIcon from '../assets/images/cohere.svg';
+import openAIIcon from '../assets/images/open_ai.svg';
+import azureAIStudioIcon from '../assets/images/azure_ai_studio.svg';
+import azureOpenAIIcon from '../assets/images/azure_open_ai.svg';
+import googleAIStudioIcon from '../assets/images/google_ai_studio.svg';
+import mistralIcon from '../assets/images/mistral.svg';
+import amazonBedrockIcon from '../assets/images/amazon_bedrock.svg';
+import anthropicIcon from '../assets/images/anthropic.svg';
+import alibabaCloudIcon from '../assets/images/alibaba_cloud.svg';
+import ibmWatsonxIcon from '../assets/images/ibm_watsonx.svg';
+
+interface ServiceProviderProps {
+ providerKey: ServiceProviderKeys;
+ searchValue?: string;
+}
+
+export type ProviderSolution = 'Observability' | 'Security' | 'Search';
+
+interface ServiceProviderRecord {
+ icon: string;
+ name: string;
+ solutions: ProviderSolution[];
+}
+
+export const SERVICE_PROVIDERS: Record = {
+ [ServiceProviderKeys.amazonbedrock]: {
+ icon: amazonBedrockIcon,
+ name: 'Amazon Bedrock',
+ solutions: ['Observability', 'Security', 'Search'],
+ },
+ [ServiceProviderKeys.azureaistudio]: {
+ icon: azureAIStudioIcon,
+ name: 'Azure AI Studio',
+ solutions: ['Search'],
+ },
+ [ServiceProviderKeys.azureopenai]: {
+ icon: azureOpenAIIcon,
+ name: 'Azure OpenAI',
+ solutions: ['Observability', 'Security', 'Search'],
+ },
+ [ServiceProviderKeys.anthropic]: {
+ icon: anthropicIcon,
+ name: 'Anthropic',
+ solutions: ['Search'],
+ },
+ [ServiceProviderKeys.cohere]: {
+ icon: cohereIcon,
+ name: 'Cohere',
+ solutions: ['Search'],
+ },
+ [ServiceProviderKeys.elasticsearch]: {
+ icon: elasticIcon,
+ name: 'Elasticsearch',
+ solutions: ['Search'],
+ },
+ [ServiceProviderKeys.googleaistudio]: {
+ icon: googleAIStudioIcon,
+ name: 'Google AI Studio',
+ solutions: ['Search'],
+ },
+ [ServiceProviderKeys.googlevertexai]: {
+ icon: googleAIStudioIcon,
+ name: 'Google Vertex AI',
+ solutions: ['Observability', 'Security', 'Search'],
+ },
+ [ServiceProviderKeys.hugging_face]: {
+ icon: huggingFaceIcon,
+ name: 'Hugging Face',
+ solutions: ['Search'],
+ },
+ [ServiceProviderKeys.mistral]: {
+ icon: mistralIcon,
+ name: 'Mistral',
+ solutions: ['Search'],
+ },
+ [ServiceProviderKeys.openai]: {
+ icon: openAIIcon,
+ name: 'OpenAI',
+ solutions: ['Observability', 'Security', 'Search'],
+ },
+ [ServiceProviderKeys['alibabacloud-ai-search']]: {
+ icon: alibabaCloudIcon,
+ name: 'AlibabaCloud AI Search',
+ solutions: ['Search'],
+ },
+ [ServiceProviderKeys.watsonxai]: {
+ icon: ibmWatsonxIcon,
+ name: 'IBM Watsonx',
+ solutions: ['Search'],
+ },
+};
+
+export const ServiceProviderIcon: React.FC = ({ providerKey }) => {
+ const provider = SERVICE_PROVIDERS[providerKey];
+
+ return provider ? (
+
+ ) : null;
+};
+
+export const ServiceProviderName: React.FC = ({
+ providerKey,
+ searchValue,
+}) => {
+ const provider = SERVICE_PROVIDERS[providerKey];
+
+ return provider ? (
+ {provider.name}
+ ) : (
+ {providerKey}
+ );
+};
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/selectable.tsx b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/selectable.tsx
new file mode 100644
index 0000000000000..ab125ce273366
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/selectable.tsx
@@ -0,0 +1,133 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import {
+ EuiBadge,
+ EuiFlexGroup,
+ EuiFlexItem,
+ EuiSelectable,
+ EuiSelectableOption,
+ EuiSelectableProps,
+} from '@elastic/eui';
+import React, { useCallback, useMemo } from 'react';
+
+import {
+ ProviderSolution,
+ SERVICE_PROVIDERS,
+ ServiceProviderIcon,
+ ServiceProviderName,
+} from './render_service_provider/service_provider';
+import { ServiceProviderKeys } from '../../constants';
+import { InferenceProvider } from '../../types/types';
+import * as i18n from '../../translations';
+
+interface SelectableProviderProps {
+ providers: InferenceProvider[];
+ onClosePopover: () => void;
+ onProviderChange: (provider?: string) => void;
+}
+
+export const SelectableProvider: React.FC = ({
+ providers,
+ onClosePopover,
+ onProviderChange,
+}) => {
+ const renderProviderOption = useCallback>(
+ (option, searchValue) => {
+ const provider = Object.keys(SERVICE_PROVIDERS).includes(option.label)
+ ? SERVICE_PROVIDERS[option.label as ServiceProviderKeys]
+ : undefined;
+
+ const supportedBySolutions = (provider &&
+ provider.solutions.map((solution) => (
+
+ {solution}
+
+ ))) ?? (
+
+ {'Search' as ProviderSolution}
+
+ );
+ return (
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {supportedBySolutions}
+
+
+
+ );
+ },
+ []
+ );
+
+ const EuiSelectableContent = useCallback>(
+ (list, search) => (
+ <>
+ {search}
+ {list}
+ >
+ ),
+ []
+ );
+
+ const searchProps: EuiSelectableProps['searchProps'] = useMemo(
+ () => ({
+ 'data-test-subj': 'provider-super-select-search-box',
+ placeholder: i18n.SEARCHLABEL,
+ incremental: false,
+ compressed: true,
+ fullWidth: true,
+ }),
+ []
+ );
+
+ const handleProviderChange = useCallback>(
+ (options) => {
+ const selectedProvider = options.filter((option) => option.checked === 'on');
+ if (selectedProvider != null && selectedProvider.length > 0) {
+ onProviderChange(selectedProvider[0].label);
+ }
+ onClosePopover();
+ },
+ [onClosePopover, onProviderChange]
+ );
+
+ const getSelectableOptions = useCallback(() => {
+ return providers?.map((p) => ({
+ label: p.service,
+ key: p.service,
+ })) as EuiSelectableOption[];
+ }, [providers]);
+
+ return (
+
+ {EuiSelectableContent}
+
+ );
+};
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/constants.ts b/x-pack/packages/kbn-inference-endpoint-ui-common/src/constants.ts
new file mode 100644
index 0000000000000..1ea2152ba2f49
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/constants.ts
@@ -0,0 +1,24 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+export enum ServiceProviderKeys {
+ amazonbedrock = 'amazonbedrock',
+ azureopenai = 'azureopenai',
+ azureaistudio = 'azureaistudio',
+ cohere = 'cohere',
+ elasticsearch = 'elasticsearch',
+ googleaistudio = 'googleaistudio',
+ googlevertexai = 'googlevertexai',
+ hugging_face = 'hugging_face',
+ mistral = 'mistral',
+ openai = 'openai',
+ anthropic = 'anthropic',
+ watsonxai = 'watsonxai',
+ 'alibabacloud-ai-search' = 'alibabacloud-ai-search',
+}
+
+export const DEFAULT_TASK_TYPE = 'completion';
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/translations.ts b/x-pack/packages/kbn-inference-endpoint-ui-common/src/translations.ts
new file mode 100644
index 0000000000000..6258fc94687fe
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/translations.ts
@@ -0,0 +1,129 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { i18n } from '@kbn/i18n';
+
+export const getRequiredMessage = (field: string) => {
+ return i18n.translate('xpack.inferenceEndpointUICommon.components.requiredGenericTextField', {
+ defaultMessage: '{field} is required.',
+ values: { field },
+ });
+};
+
+export const INPUT_INVALID = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.params.error.invalidInputText',
+ {
+ defaultMessage: 'Input does not have a valid Array format.',
+ }
+);
+
+export const INVALID_ACTION = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.invalidActionText',
+ {
+ defaultMessage: 'Invalid action name.',
+ }
+);
+
+export const BODY = i18n.translate('xpack.inferenceEndpointUICommon.components.bodyFieldLabel', {
+ defaultMessage: 'Body',
+});
+
+export const INPUT = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.completionInputLabel',
+ {
+ defaultMessage: 'Input',
+ }
+);
+
+export const INPUT_TYPE = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.completionInputTypeLabel',
+ {
+ defaultMessage: 'Input type',
+ }
+);
+
+export const QUERY = i18n.translate('xpack.inferenceEndpointUICommon.components.rerankQueryLabel', {
+ defaultMessage: 'Query',
+});
+
+export const BODY_DESCRIPTION = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.bodyCodeEditorAriaLabel',
+ {
+ defaultMessage: 'Code editor',
+ }
+);
+
+export const TASK_TYPE = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.taskTypeFieldLabel',
+ {
+ defaultMessage: 'Task type',
+ }
+);
+
+export const PROVIDER = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.providerFieldLabel',
+ {
+ defaultMessage: 'Provider',
+ }
+);
+
+export const PROVIDER_REQUIRED = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.error.requiredProviderText',
+ {
+ defaultMessage: 'Provider is required.',
+ }
+);
+
+export const DOCUMENTATION = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.documentation',
+ {
+ defaultMessage: 'Inference API documentation',
+ }
+);
+
+export const SELECT_PROVIDER = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.selectProvider',
+ {
+ defaultMessage: 'Select a service',
+ }
+);
+
+export const COPY_TOOLTIP = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.copy.tooltip',
+ {
+ defaultMessage: 'Copy to clipboard',
+ }
+);
+
+export const COPIED_TOOLTIP = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.copied.tooltip',
+ {
+ defaultMessage: 'Copied!',
+ }
+);
+
+export const SEARCHLABEL = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.searchLabel',
+ {
+ defaultMessage: 'Search',
+ }
+);
+
+export const OPTIONALTEXT = i18n.translate(
+ 'xpack.inferenceEndpointUICommon.components.optionalText',
+ {
+ defaultMessage: 'Optional',
+ }
+);
+
+export const RE_ENTER_SECRETS = (label: string) => {
+ return i18n.translate('xpack.inferenceEndpointUICommon.components.requiredGenericTextField', {
+ defaultMessage:
+ 'You will need to reenter your ${label} each time you edit the inference endpoint',
+ values: { label },
+ });
+};
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/types/types.ts b/x-pack/packages/kbn-inference-endpoint-ui-common/src/types/types.ts
new file mode 100644
index 0000000000000..fc1f32b668811
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/types/types.ts
@@ -0,0 +1,51 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+import { FieldType } from '@kbn/search-connectors';
+
+export { FieldType } from '@kbn/search-connectors';
+
+export interface ConfigProperties {
+ default_value: string | number | boolean | null;
+ description: string | null;
+ label: string;
+ required: boolean;
+ sensitive: boolean;
+ updatable: boolean;
+ type: FieldType;
+}
+
+interface ConfigEntry extends ConfigProperties {
+ key: string;
+}
+
+export interface ConfigEntryView extends ConfigEntry {
+ isValid: boolean;
+ validationErrors: string[];
+ value: string | number | boolean | null;
+}
+
+export type FieldsConfiguration = Record;
+
+export interface InferenceProvider {
+ service: string;
+ name: string;
+ task_types: string[];
+ logo?: string;
+ configurations: FieldsConfiguration;
+}
+
+export interface Config {
+ taskType: string;
+ taskTypeConfig?: Record;
+ inferenceId: string;
+ provider: string;
+ providerConfig?: Record;
+}
+
+export interface Secrets {
+ providerSecrets?: Record;
+}
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/utils/helpers.ts b/x-pack/packages/kbn-inference-endpoint-ui-common/src/utils/helpers.ts
new file mode 100644
index 0000000000000..25776e5f71f2a
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/utils/helpers.ts
@@ -0,0 +1,80 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { ValidationFunc } from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
+import { isEmpty } from 'lodash/fp';
+import { Config, ConfigEntryView } from '../types/types';
+import * as i18n from '../translations';
+
+export interface TaskTypeOption {
+ id: string;
+ value: string;
+ label: string;
+}
+
+export const getTaskTypeOptions = (taskTypes: string[]): TaskTypeOption[] =>
+ taskTypes.map((taskType) => ({
+ id: taskType,
+ label: taskType,
+ value: taskType,
+ }));
+
+export const generateInferenceEndpointId = (config: Config) => {
+ const taskTypeSuffix = config.taskType ? `${config.taskType}-` : '';
+ const inferenceEndpointId = `${config.provider}-${taskTypeSuffix}${Math.random()
+ .toString(36)
+ .slice(2)}`;
+ return inferenceEndpointId;
+};
+
+export const getNonEmptyValidator = (
+ schema: ConfigEntryView[],
+ validationEventHandler: (fieldsWithErrors: ConfigEntryView[]) => void,
+ isSubmitting: boolean = false,
+ isSecrets: boolean = false
+) => {
+ return (...args: Parameters): ReturnType => {
+ const [{ value, path }] = args;
+ const newSchema: ConfigEntryView[] = [];
+
+ const configData = (value ?? {}) as Record;
+ let hasErrors = false;
+ if (schema) {
+ schema
+ .filter((f: ConfigEntryView) => f.required)
+ .forEach((field: ConfigEntryView) => {
+ // validate if submitting or on field edit - value is not default to null
+ if (configData[field.key] !== null || isSubmitting) {
+ // validate secrets fields separately from regular
+ if (isSecrets ? field.sensitive : !field.sensitive) {
+ if (
+ !configData[field.key] ||
+ (typeof configData[field.key] === 'string' && isEmpty(configData[field.key]))
+ ) {
+ field.validationErrors = [i18n.getRequiredMessage(field.label)];
+ field.isValid = false;
+ hasErrors = true;
+ } else {
+ field.validationErrors = [];
+ field.isValid = true;
+ }
+ }
+ }
+ newSchema.push(field);
+ });
+
+ validationEventHandler(newSchema);
+ if (hasErrors) {
+ return {
+ code: 'ERR_FIELD_MISSING',
+ path,
+ message: i18n.getRequiredMessage('Action'),
+ };
+ }
+ }
+ };
+};
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/tsconfig.json b/x-pack/packages/kbn-inference-endpoint-ui-common/tsconfig.json
new file mode 100644
index 0000000000000..2e4535d6383c5
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/tsconfig.json
@@ -0,0 +1,26 @@
+{
+ "extends": "../../../tsconfig.base.json",
+ "compilerOptions": {
+ "outDir": "target/types",
+ "types": [
+ "jest",
+ "node",
+ "react",
+ "@kbn/ambient-ui-types"
+ ]
+ },
+ "include": [
+ "**/*.ts",
+ "**/*.tsx"
+ ],
+ "exclude": [
+ "target/**/*"
+ ],
+ "kbn_references": [
+ "@kbn/i18n",
+ "@kbn/i18n-react",
+ "@kbn/search-connectors",
+ "@kbn/es-ui-shared-plugin",
+ "@kbn/triggers-actions-ui-plugin"
+ ]
+}
diff --git a/yarn.lock b/yarn.lock
index 64471fa3dd692..3b161158374fc 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -5945,6 +5945,10 @@
version "0.0.0"
uid ""
+"@kbn/inference-endpoint-ui-common@link:x-pack/packages/kbn-inference-endpoint-ui-common":
+ version "0.0.0"
+ uid ""
+
"@kbn/inference-plugin@link:x-pack/platform/plugins/shared/inference":
version "0.0.0"
uid ""
From 1c508252868cdcd2493b1572bc841116dee3d713 Mon Sep 17 00:00:00 2001
From: Samiul Monir
Date: Mon, 16 Dec 2024 10:19:51 -0500
Subject: [PATCH 02/18] Integrating packaged library into inference management
and add connector through flyout
---
.../common/translations.ts | 7 +
.../common/types.ts | 12 +-
.../search_inference_endpoints/kibana.jsonc | 3 +-
.../add_inference_flyout_wrapper.tsx | 65 ++
.../inference_form.tsx | 60 ++
.../add_inference_endpoints/translations.ts | 36 +
.../public/components/inference_endpoints.tsx | 9 +-
.../components/inference_endpoints_header.tsx | 18 +-
.../public/hooks/translations.ts | 14 +
.../public/hooks/use_add_endpoint.ts | 52 ++
.../public/hooks/user_providers.ts | 689 ++++++++++++++++++
.../server/lib/add_inference_endpoint.ts | 48 ++
.../server/lib/fetch_inference_services.ts | 26 +
.../server/routes.ts | 68 +-
.../server/utils/unflatten_object.ts | 17 +
.../search_inference_endpoints/tsconfig.json | 5 +-
16 files changed, 1120 insertions(+), 9 deletions(-)
create mode 100644 x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.tsx
create mode 100644 x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx
create mode 100644 x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/translations.ts
create mode 100644 x-pack/plugins/search_inference_endpoints/public/hooks/use_add_endpoint.ts
create mode 100644 x-pack/plugins/search_inference_endpoints/public/hooks/user_providers.ts
create mode 100644 x-pack/plugins/search_inference_endpoints/server/lib/add_inference_endpoint.ts
create mode 100644 x-pack/plugins/search_inference_endpoints/server/lib/fetch_inference_services.ts
create mode 100644 x-pack/plugins/search_inference_endpoints/server/utils/unflatten_object.ts
diff --git a/x-pack/plugins/search_inference_endpoints/common/translations.ts b/x-pack/plugins/search_inference_endpoints/common/translations.ts
index 9d4238b494e6b..ed7fc1f0d80a1 100644
--- a/x-pack/plugins/search_inference_endpoints/common/translations.ts
+++ b/x-pack/plugins/search_inference_endpoints/common/translations.ts
@@ -40,6 +40,13 @@ export const API_DOCUMENTATION_LINK = i18n.translate(
}
);
+export const ADD_ENDPOINT_LABEL = i18n.translate(
+ 'xpack.searchInferenceEndpoints.addConnectorButtonLabel',
+ {
+ defaultMessage: 'Add endpoint',
+ }
+);
+
export const ERROR_TITLE = i18n.translate('xpack.searchInferenceEndpoints.inferenceId.errorTitle', {
defaultMessage: 'Error adding inference endpoint',
});
diff --git a/x-pack/plugins/search_inference_endpoints/common/types.ts b/x-pack/plugins/search_inference_endpoints/common/types.ts
index 2c23f542ee2bb..0c1da5a59b799 100644
--- a/x-pack/plugins/search_inference_endpoints/common/types.ts
+++ b/x-pack/plugins/search_inference_endpoints/common/types.ts
@@ -5,9 +5,12 @@
* 2.0.
*/
+import type { Config, Secrets } from '@kbn/inference-endpoint-ui-common';
+
export enum APIRoutes {
GET_INFERENCE_ENDPOINTS = '/internal/inference_endpoints/endpoints',
- DELETE_INFERENCE_ENDPOINT = '/internal/inference_endpoint/endpoints/{type}/{id}',
+ INFERENCE_ENDPOINT = '/internal/inference_endpoint/endpoints/{type}/{id}',
+ GET_INFERENCE_SERVICES = 'internal/inference_endpoints/_inference/_services',
}
export interface SearchInferenceEndpointsConfigType {
@@ -22,3 +25,10 @@ export enum TaskTypes {
sparse_embedding = 'sparse_embedding',
text_embedding = 'text_embedding',
}
+
+export type { InferenceProvider } from '@kbn/inference-endpoint-ui-common';
+
+export interface InferenceEndpoint {
+ config: Config;
+ secrets: Secrets;
+}
diff --git a/x-pack/plugins/search_inference_endpoints/kibana.jsonc b/x-pack/plugins/search_inference_endpoints/kibana.jsonc
index dca472a92d437..8f65c2be1b16a 100644
--- a/x-pack/plugins/search_inference_endpoints/kibana.jsonc
+++ b/x-pack/plugins/search_inference_endpoints/kibana.jsonc
@@ -29,7 +29,8 @@
"searchNavigation",
],
"requiredBundles": [
- "kibanaReact"
+ "kibanaReact",
+ "esUiShared"
]
}
}
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.tsx b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.tsx
new file mode 100644
index 0000000000000..9a9dba4f11633
--- /dev/null
+++ b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.tsx
@@ -0,0 +1,65 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import {
+ EuiButtonEmpty,
+ EuiFlexGroup,
+ EuiFlexItem,
+ EuiFlyout,
+ EuiFlyoutBody,
+ EuiFlyoutFooter,
+ EuiFlyoutHeader,
+ EuiTitle,
+ useGeneratedHtmlId,
+} from '@elastic/eui';
+import React from 'react';
+
+import { InferenceForm } from './inference_form';
+import * as i18n from './translations';
+
+interface AddInferenceFlyoutWrapperProps {
+ setIsAddInferenceFlyoutOpen: (state: boolean) => void;
+}
+
+export const AddInferenceFlyoutWrapper: React.FC = ({
+ setIsAddInferenceFlyoutOpen,
+}) => {
+ const inferenceCreationFlyoutId = useGeneratedHtmlId({
+ prefix: 'addInferenceFlyoutId',
+ });
+ const closeFlyout = () => setIsAddInferenceFlyoutOpen(false);
+
+ return (
+ setIsAddInferenceFlyoutOpen(false)}
+ aria-labelledby={inferenceCreationFlyoutId}
+ >
+
+
+ {i18n.CREATE_ENDPOINT_TITLE}
+
+
+
+
+
+
+
+
+
+ {i18n.CANCEL}
+
+
+
+
+
+ );
+};
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx
new file mode 100644
index 0000000000000..940fe2af1a07b
--- /dev/null
+++ b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx
@@ -0,0 +1,60 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { Form, useForm } from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
+import React, { useCallback } from 'react';
+import { InferenceServices } from '@kbn/inference-endpoint-ui-common';
+import { EuiButton, EuiFlexGroup, EuiFlexItem, EuiSpacer } from '@elastic/eui';
+import { useKibana } from '../../hooks/use_kibana';
+import { useProviders } from '../../hooks/user_providers';
+import * as i18n from './translations';
+import { useAddEndpoint } from '../../hooks/use_add_endpoint';
+import { InferenceEndpoint } from '../../types';
+
+interface InferenceFormProps {
+ onSubmitSuccess: (state: boolean) => void;
+}
+export const InferenceForm: React.FC = ({ onSubmitSuccess }) => {
+ const {
+ services: { http },
+ } = useKibana();
+ const { mutate: addEndpoint } = useAddEndpoint(() => onSubmitSuccess(false));
+ const { data: providers } = useProviders(http);
+ const { form } = useForm();
+ const handleSubmit = useCallback(async () => {
+ const { isValid, data } = await form.submit();
+
+ if (isValid) {
+ addEndpoint({
+ inferenceEndpoint: data as InferenceEndpoint,
+ });
+ return;
+ }
+ }, [addEndpoint, form]);
+
+ return providers ? (
+
+ ) : null;
+};
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/translations.ts b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/translations.ts
new file mode 100644
index 0000000000000..330a2b434d4da
--- /dev/null
+++ b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/translations.ts
@@ -0,0 +1,36 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { i18n } from '@kbn/i18n';
+
+export const SAVE = i18n.translate(
+ 'xpack.searchInferenceEndpoints.addInferenceEndpoint.saveBtnLabel',
+ {
+ defaultMessage: 'Save',
+ }
+);
+
+export const SAVE_TEST = i18n.translate(
+ 'xpack.searchInferenceEndpoints.addInferenceEndpoint.saveAndTestBtnLabel',
+ {
+ defaultMessage: 'Save and test',
+ }
+);
+
+export const CANCEL = i18n.translate(
+ 'xpack.searchInferenceEndpoints.addInferenceEndpoint.cancelBtnLabel',
+ {
+ defaultMessage: 'Cancel',
+ }
+);
+
+export const CREATE_ENDPOINT_TITLE = i18n.translate(
+ 'xpack.searchInferenceEndpoints.addInferenceEndpoint.createEndpointTitle',
+ {
+ defaultMessage: 'Create Inference Endpoint',
+ }
+);
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints.tsx b/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints.tsx
index c39bc69fc300b..6ff09ea6ece71 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints.tsx
@@ -5,25 +5,30 @@
* 2.0.
*/
-import React from 'react';
+import React, { useState } from 'react';
import { EuiPageTemplate } from '@elastic/eui';
import { useQueryInferenceEndpoints } from '../hooks/use_inference_endpoints';
import { TabularPage } from './all_inference_endpoints/tabular_page';
import { InferenceEndpointsHeader } from './inference_endpoints_header';
+import { AddInferenceFlyoutWrapper } from './add_inference_endpoints/add_inference_flyout_wrapper';
export const InferenceEndpoints: React.FC = () => {
const { data } = useQueryInferenceEndpoints();
+ const [isAddInferenceFlyoutOpen, setIsAddInferenceFlyoutOpen] = useState(false);
const inferenceEndpoints = data || [];
return (
<>
-
+
+ {isAddInferenceFlyoutOpen && (
+
+ )}
>
);
};
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints_header.tsx b/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints_header.tsx
index acb7e82db13b2..9d4d002400aad 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints_header.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints_header.tsx
@@ -5,13 +5,18 @@
* 2.0.
*/
-import { EuiPageTemplate, EuiButtonEmpty } from '@elastic/eui';
+import { EuiPageTemplate, EuiButtonEmpty, EuiButton } from '@elastic/eui';
import React from 'react';
import * as i18n from '../../common/translations';
import { docLinks } from '../../common/doc_links';
import { useTrainedModelPageUrl } from '../hooks/use_trained_model_page_url';
-export const InferenceEndpointsHeader: React.FC = () => {
+interface InferenceEndpointsHeaderProps {
+ setIsAddInferenceFlyoutOpen: (state: boolean) => void;
+}
+export const InferenceEndpointsHeader: React.FC = ({
+ setIsAddInferenceFlyoutOpen,
+}) => {
const trainedModelPageUrl = useTrainedModelPageUrl();
return (
@@ -21,6 +26,15 @@ export const InferenceEndpointsHeader: React.FC = () => {
description={i18n.MANAGE_INFERENCE_ENDPOINTS_LABEL}
bottomBorder={true}
rightSideItems={[
+ setIsAddInferenceFlyoutOpen(true)}
+ >
+ {i18n.ADD_ENDPOINT_LABEL}
+ ,
void) => {
+ const queryClient = useQueryClient();
+ const { services } = useKibana();
+ const toasts = services.notifications?.toasts;
+
+ return useMutation(
+ async ({ inferenceEndpoint }: MutationArgs) => {
+ return await services.http.put<{}>(
+ `/internal/inference_endpoint/endpoints/${inferenceEndpoint.config.taskType}/${inferenceEndpoint.config.inferenceId}`,
+ {
+ body: JSON.stringify(inferenceEndpoint),
+ }
+ );
+ },
+ {
+ onSuccess: () => {
+ queryClient.invalidateQueries([INFERENCE_ENDPOINTS_QUERY_KEY]);
+ toasts?.addSuccess({
+ title: i18n.DELETE_SUCCESS,
+ });
+ if (onSuccess) {
+ onSuccess();
+ }
+ },
+ onError: (error: { body: KibanaServerError }) => {
+ toasts?.addError(new Error(error.body.message), {
+ title: i18n.ENDPOINT_CREATION_FAILED,
+ toastMessage: error.body.message,
+ });
+ },
+ }
+ );
+};
diff --git a/x-pack/plugins/search_inference_endpoints/public/hooks/user_providers.ts b/x-pack/plugins/search_inference_endpoints/public/hooks/user_providers.ts
new file mode 100644
index 0000000000000..4d59dca4fef96
--- /dev/null
+++ b/x-pack/plugins/search_inference_endpoints/public/hooks/user_providers.ts
@@ -0,0 +1,689 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import type { HttpSetup } from '@kbn/core-http-browser';
+import { useQuery } from '@tanstack/react-query';
+import { FieldType, InferenceProvider } from '@kbn/inference-endpoint-ui-common';
+import { KibanaServerError } from '@kbn/kibana-utils-plugin/common';
+import { useKibana } from './use_kibana';
+import * as i18n from './translations';
+
+/* FIX ME: Currently we are adding hard coded values which will be removed once
+ we have the endpoint ready to use.
+*/
+// const getProviders = async (http: HttpSetup): Promise => {
+// return await http.get(APIRoutes.GET_INFERENCE_SERVICES);
+// };
+
+const getProviders = (http: HttpSetup): Promise => {
+ const providers: InferenceProvider[] = [
+ {
+ service: 'cohere',
+ name: 'Cohere',
+ task_types: ['text_embedding', 'rerank', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ },
+ },
+ {
+ service: 'elastic',
+ name: 'Elastic',
+ task_types: ['sparse_embedding'],
+ configurations: {
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ max_input_tokens: {
+ default_value: null,
+ description: 'Allows you to specify the maximum number of tokens per input.',
+ label: 'Maximum Input Tokens',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ },
+ },
+ {
+ service: 'watsonxai',
+ name: 'IBM Watsonx',
+ task_types: ['text_embedding'],
+ configurations: {
+ project_id: {
+ default_value: null,
+ description: '',
+ label: 'Project ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ api_version: {
+ default_value: null,
+ description: 'The IBM Watsonx API version ID to use.',
+ label: 'API Version',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ max_input_tokens: {
+ default_value: null,
+ description: 'Allows you to specify the maximum number of tokens per input.',
+ label: 'Maximum Input Tokens',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ url: {
+ default_value: null,
+ description: '',
+ label: 'URL',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'azureaistudio',
+ name: 'Azure AI Studio',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ endpoint_type: {
+ default_value: null,
+ description: 'Specifies the type of endpoint that is used in your model deployment.',
+ label: 'Endpoint Type',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ provider: {
+ default_value: null,
+ description: 'The model provider for your deployment.',
+ label: 'Provider',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ target: {
+ default_value: null,
+ description: 'The target URL of your Azure AI Studio model deployment.',
+ label: 'Target',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'hugging_face',
+ name: 'Hugging Face',
+ task_types: ['text_embedding', 'sparse_embedding'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ url: {
+ default_value: 'https://api.openai.com/v1/embeddings',
+ description: 'The URL endpoint to use for the requests.',
+ label: 'URL',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'amazonbedrock',
+ name: 'Amazon Bedrock',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ secret_key: {
+ default_value: null,
+ description: 'A valid AWS secret key that is paired with the access_key.',
+ label: 'Secret Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ provider: {
+ default_value: null,
+ description: 'The model provider for your deployment.',
+ label: 'Provider',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ access_key: {
+ default_value: null,
+ description: 'A valid AWS access key that has permissions to use Amazon Bedrock.',
+ label: 'Access Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ model: {
+ default_value: null,
+ description:
+ 'The base model ID or an ARN to a custom model based on a foundational model.',
+ label: 'Model',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'By default, the amazonbedrock service sets the number of requests allowed per minute to 240.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ region: {
+ default_value: null,
+ description: 'The region that your model or ARN is deployed in.',
+ label: 'Region',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'anthropic',
+ name: 'Anthropic',
+ task_types: ['completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'By default, the anthropic service sets the number of requests allowed per minute to 50.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'googleaistudio',
+ name: 'Google AI Studio',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: "ID of the LLM you're using.",
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'elasticsearch',
+ name: 'Elasticsearch',
+ task_types: ['text_embedding', 'sparse_embedding', 'rerank'],
+ configurations: {
+ num_allocations: {
+ default_value: 1,
+ description:
+ 'The total number of allocations this model is assigned across machine learning nodes.',
+ label: 'Number Allocations',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ num_threads: {
+ default_value: 2,
+ description: 'Sets the number of threads used by each model allocation during inference.',
+ label: 'Number Threads',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: '.multilingual-e5-small',
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'openai',
+ name: 'OpenAI',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description:
+ 'The OpenAI API authentication key. For more details about generating OpenAI API keys, refer to the https://platform.openai.com/account/api-keys.',
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ organization_id: {
+ default_value: null,
+ description: 'The unique identifier of your organization.',
+ label: 'Organization ID',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'Default number of requests allowed per minute. For text_embedding is 3000. For completion is 500.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ url: {
+ default_value: 'https://api.openai.com/v1/chat/completions',
+ description:
+ 'The OpenAI API endpoint URL. For more information on the URL, refer to the https://platform.openai.com/docs/api-reference.',
+ label: 'URL',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'azureopenai',
+ name: 'Azure OpenAI',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ entra_id: {
+ default_value: null,
+ description: 'You must provide either an API key or an Entra ID.',
+ label: 'Entra ID',
+ required: false,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'The azureopenai service sets a default number of requests allowed per minute depending on the task type.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ deployment_id: {
+ default_value: null,
+ description: 'The deployment name of your deployed models.',
+ label: 'Deployment ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ resource_name: {
+ default_value: null,
+ description: 'The name of your Azure OpenAI resource.',
+ label: 'Resource Name',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ api_version: {
+ default_value: null,
+ description: 'The Azure API version ID to use.',
+ label: 'API Version',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'mistral',
+ name: 'Mistral',
+ task_types: ['text_embedding'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ model: {
+ default_value: null,
+ description:
+ 'Refer to the Mistral models documentation for the list of available text embedding models.',
+ label: 'Model',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ max_input_tokens: {
+ default_value: null,
+ description: 'Allows you to specify the maximum number of tokens per input.',
+ label: 'Maximum Input Tokens',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ },
+ },
+ {
+ service: 'googlevertexai',
+ name: 'Google Vertex AI',
+ task_types: ['text_embedding', 'rerank'],
+ configurations: {
+ service_account_json: {
+ default_value: null,
+ description: "API Key for the provider you're connecting to.",
+ label: 'Credentials JSON',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ project_id: {
+ default_value: null,
+ description:
+ 'The GCP Project ID which has Vertex AI API(s) enabled. For more information on the URL, refer to the {geminiVertexAIDocs}.',
+ label: 'GCP Project',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ location: {
+ default_value: null,
+ description:
+ 'Please provide the GCP region where the Vertex AI API(s) is enabled. For more information, refer to the {geminiVertexAIDocs}.',
+ label: 'GCP Region',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: `ID of the LLM you're using.`,
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'alibabacloud-ai-search',
+ name: 'AlibabaCloud AI Search',
+ task_types: ['text_embedding', 'sparse_embedding', 'rerank', 'completion'],
+ configurations: {
+ workspace: {
+ default_value: null,
+ description: 'The name of the workspace used for the {infer} task.',
+ label: 'Workspace',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ api_key: {
+ default_value: null,
+ description: `A valid API key for the AlibabaCloud AI Search API.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ service_id: {
+ default_value: null,
+ description: 'The name of the model service to use for the {infer} task.',
+ label: 'Project ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ host: {
+ default_value: null,
+ description:
+ 'The name of the host address used for the {infer} task. You can find the host address at https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key[ the API keys section] of the documentation.',
+ label: 'Host',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ http_schema: {
+ default_value: null,
+ description: '',
+ label: 'HTTP Schema',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ ];
+ return Promise.resolve(
+ providers.sort((a, b) => (a.service > b.service ? 1 : b.service > a.service ? -1 : 0))
+ );
+};
+
+/* FIX ME: ToastsStart */
+export const useProviders = (http: HttpSetup) => {
+ const { services } = useKibana();
+ const toasts = services.notifications?.toasts;
+ const onErrorFn = (error: { body: KibanaServerError }) => {
+ toasts?.addError(new Error(error.body.message), {
+ title: i18n.GET_PROVIDERS_FAILED,
+ toastMessage: error.body.message,
+ });
+ };
+
+ const query = useQuery(['user-profile'], {
+ queryFn: () => getProviders(http),
+ staleTime: Infinity,
+ refetchOnWindowFocus: false,
+ onError: onErrorFn,
+ });
+ return query;
+};
diff --git a/x-pack/plugins/search_inference_endpoints/server/lib/add_inference_endpoint.ts b/x-pack/plugins/search_inference_endpoints/server/lib/add_inference_endpoint.ts
new file mode 100644
index 0000000000000..afc174ac41ce8
--- /dev/null
+++ b/x-pack/plugins/search_inference_endpoints/server/lib/add_inference_endpoint.ts
@@ -0,0 +1,48 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { InferenceTaskType } from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
+import { ElasticsearchClient } from '@kbn/core/server';
+import type { Config, Secrets } from '@kbn/inference-endpoint-ui-common';
+import type { Logger } from '@kbn/logging';
+import { unflattenObject } from '../utils/unflatten_object';
+
+export const addInferenceEndpoint = async (
+ esClient: ElasticsearchClient,
+ type: string,
+ id: string,
+ config: Config,
+ secrets: Secrets,
+ logger: Logger
+) => {
+ try {
+ const taskSettings = config?.taskTypeConfig
+ ? {
+ ...unflattenObject(config?.taskTypeConfig),
+ }
+ : {};
+ const serviceSettings = {
+ ...unflattenObject(config?.providerConfig ?? {}),
+ ...unflattenObject(secrets?.providerSecrets ?? {}),
+ };
+
+ return await esClient?.inference.put({
+ inference_id: config?.inferenceId ?? '',
+ task_type: config?.taskType as InferenceTaskType,
+ inference_config: {
+ service: config!.provider,
+ service_settings: serviceSettings,
+ task_settings: taskSettings,
+ },
+ });
+ } catch (e) {
+ logger.warn(
+ `Failed to create inference endpoint for task type "${config?.taskType}" and inference id ${config?.inferenceId}. Error: ${e.message}`
+ );
+ throw e;
+ }
+};
diff --git a/x-pack/plugins/search_inference_endpoints/server/lib/fetch_inference_services.ts b/x-pack/plugins/search_inference_endpoints/server/lib/fetch_inference_services.ts
new file mode 100644
index 0000000000000..d9d4d2e4c071d
--- /dev/null
+++ b/x-pack/plugins/search_inference_endpoints/server/lib/fetch_inference_services.ts
@@ -0,0 +1,26 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { ElasticsearchClient } from '@kbn/core/server';
+import { InferenceProvider } from '../types';
+
+export const fetchInferenceServices = async (
+ client: ElasticsearchClient
+): Promise<{
+ services: InferenceProvider[];
+}> => {
+ const { services } = await client.transport.request<{
+ services: InferenceProvider[];
+ }>({
+ method: 'GET',
+ path: `/_inference/_services`,
+ });
+
+ return {
+ services,
+ };
+};
diff --git a/x-pack/plugins/search_inference_endpoints/server/routes.ts b/x-pack/plugins/search_inference_endpoints/server/routes.ts
index 80d7a15ab99c4..6d6a5dfbb26e8 100644
--- a/x-pack/plugins/search_inference_endpoints/server/routes.ts
+++ b/x-pack/plugins/search_inference_endpoints/server/routes.ts
@@ -9,9 +9,11 @@ import { IRouter } from '@kbn/core/server';
import { schema } from '@kbn/config-schema';
import type { Logger } from '@kbn/logging';
import { fetchInferenceEndpoints } from './lib/fetch_inference_endpoints';
-import { APIRoutes } from './types';
+import { APIRoutes, InferenceEndpoint } from './types';
import { errorHandler } from './utils/error_handler';
import { deleteInferenceEndpoint } from './lib/delete_inference_endpoint';
+// import { fetchInferenceServices } from './lib/fetch_inference_services';
+import { addInferenceEndpoint } from './lib/add_inference_endpoint';
export function defineRoutes({ logger, router }: { logger: Logger; router: IRouter }) {
router.get(
@@ -35,9 +37,71 @@ export function defineRoutes({ logger, router }: { logger: Logger; router: IRout
})
);
+ /* FIX ME: Currently we are adding hard coded values which will be removed once
+ we have the endpoint ready to use.
+ */
+
+ // router.get(
+ // {
+ // path: APIRoutes.GET_INFERENCE_SERVICES,
+ // validate: {},
+ // },
+ // errorHandler(logger)(async (context, request, response) => {
+ // const {
+ // client: { asCurrentUser },
+ // } = (await context.core).elasticsearch;
+
+ // const services = await fetchInferenceServices(asCurrentUser);
+
+ // return response.ok({
+ // body: {
+ // inference_services: services,
+ // },
+ // headers: { 'content-type': 'application/json' },
+ // });
+ // })
+ // );
+
+ router.put(
+ {
+ path: APIRoutes.INFERENCE_ENDPOINT,
+ validate: {
+ params: schema.object({
+ type: schema.string(),
+ id: schema.string(),
+ }),
+ body: schema.object({
+ config: schema.object({
+ inferenceId: schema.string(),
+ provider: schema.string(),
+ taskType: schema.string(),
+ providerConfig: schema.any(),
+ }),
+ secrets: schema.object({
+ providerSecrets: schema.any(),
+ }),
+ }),
+ },
+ },
+ errorHandler(logger)(async (context, request, response) => {
+ const {
+ client: { asCurrentUser },
+ } = (await context.core).elasticsearch;
+
+ const { type, id } = request.params;
+ const { config, secrets }: InferenceEndpoint = request.body;
+ const result = await addInferenceEndpoint(asCurrentUser, type, id, config, secrets, logger);
+
+ return response.ok({
+ body: result,
+ headers: { 'content-type': 'application/json' },
+ });
+ })
+ );
+
router.delete(
{
- path: APIRoutes.DELETE_INFERENCE_ENDPOINT,
+ path: APIRoutes.INFERENCE_ENDPOINT,
validate: {
params: schema.object({
type: schema.string(),
diff --git a/x-pack/plugins/search_inference_endpoints/server/utils/unflatten_object.ts b/x-pack/plugins/search_inference_endpoints/server/utils/unflatten_object.ts
new file mode 100644
index 0000000000000..625f43bb4f8f6
--- /dev/null
+++ b/x-pack/plugins/search_inference_endpoints/server/utils/unflatten_object.ts
@@ -0,0 +1,17 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+import { set } from '@kbn/safer-lodash-set';
+
+interface GenericObject {
+ [key: string]: any;
+}
+
+export const unflattenObject = (object: object): T =>
+ Object.entries(object).reduce((acc, [key, value]) => {
+ set(acc, key, value);
+ return acc;
+ }, {} as T);
diff --git a/x-pack/plugins/search_inference_endpoints/tsconfig.json b/x-pack/plugins/search_inference_endpoints/tsconfig.json
index f448d36c7f463..dc1441f88227d 100644
--- a/x-pack/plugins/search_inference_endpoints/tsconfig.json
+++ b/x-pack/plugins/search_inference_endpoints/tsconfig.json
@@ -35,7 +35,10 @@
"@kbn/utility-types",
"@kbn/search-navigation",
"@kbn/shared-ux-page-kibana-template",
- "@kbn/licensing-plugin"
+ "@kbn/licensing-plugin",
+ "@kbn/triggers-actions-ui-plugin",
+ "@kbn/inference-endpoint-ui-common",
+ "@kbn/es-ui-shared-plugin"
],
"exclude": [
"target/**/*",
From eab4a6888b3915f943c9eb3f419bf265f8472af7 Mon Sep 17 00:00:00 2001
From: Samiul Monir
Date: Wed, 18 Dec 2024 11:42:40 -0500
Subject: [PATCH 03/18] Addig test subjects, comments and unit tests
---
.github/CODEOWNERS | 2 +-
.../jest.config.js | 2 +-
.../setup_tests.ts | 9 +
.../components/additional_options_fields.tsx | 3 +-
.../components/inference_services.test.tsx | 188 +++++
.../service_provider.test.tsx | 42 +
.../components/providers/selectable.test.tsx | 75 ++
.../add_inference_flyout_wrapper.test.tsx | 730 ++++++++++++++++++
.../add_inference_flyout_wrapper.tsx | 15 +-
.../inference_form.tsx | 8 +-
.../public/components/inference_endpoints.tsx | 2 +-
.../public/hooks/use_add_endpoint.test.tsx | 87 +++
.../public/hooks/use_add_endpoint.ts | 2 +-
.../{user_providers.ts => use_providers.ts} | 5 +-
.../server/lib/add_inference_endpoint.test.ts | 55 ++
.../server/lib/add_inference_endpoint.ts | 9 +-
.../lib/fetch_inference_services.test.ts | 681 ++++++++++++++++
17 files changed, 1889 insertions(+), 26 deletions(-)
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/setup_tests.ts
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/inference_services.test.tsx
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/render_service_provider/service_provider.test.tsx
create mode 100644 x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/selectable.test.tsx
create mode 100644 x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.test.tsx
create mode 100644 x-pack/plugins/search_inference_endpoints/public/hooks/use_add_endpoint.test.tsx
rename x-pack/plugins/search_inference_endpoints/public/hooks/{user_providers.ts => use_providers.ts} (99%)
create mode 100644 x-pack/plugins/search_inference_endpoints/server/lib/add_inference_endpoint.test.ts
create mode 100644 x-pack/plugins/search_inference_endpoints/server/lib/fetch_inference_services.test.ts
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 1e2b72acdcc1f..8dce022a2140e 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -764,7 +764,7 @@ x-pack/packages/ai-infra/product-doc-artifact-builder @elastic/appex-ai-infra
x-pack/packages/kbn-ai-assistant @elastic/search-kibana
x-pack/packages/kbn-alerting-comparators @elastic/response-ops
x-pack/packages/kbn-alerting-state-types @elastic/response-ops
-x-pack/packages/kbn-inference-endpoint-ui-common @elastic/response-ops @elastic/appex-ai-infra @elastic/obs-ai-assistant @elastic/security-generative-ai
+x-pack/packages/kbn-inference-endpoint-ui-common @elastic/response-ops @elastic/appex-ai-infra @elastic/obs-ai-assistant @elastic/security-generative-ai @elastic/search-kibana
x-pack/packages/kbn-random-sampling @elastic/kibana-visualizations
x-pack/packages/kbn-synthetics-private-location @elastic/obs-ux-management-team
x-pack/packages/maps/vector_tile_utils @elastic/kibana-presentation
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/jest.config.js b/x-pack/packages/kbn-inference-endpoint-ui-common/jest.config.js
index 36c8ed71a7b2f..e38bac1241a32 100644
--- a/x-pack/packages/kbn-inference-endpoint-ui-common/jest.config.js
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/jest.config.js
@@ -7,6 +7,6 @@
module.exports = {
preset: '@kbn/test',
- rootDir: '../..',
+ rootDir: '../../..',
roots: ['/x-pack/packages/kbn-inference-endpoint-ui-common'],
};
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/setup_tests.ts b/x-pack/packages/kbn-inference-endpoint-ui-common/setup_tests.ts
new file mode 100644
index 0000000000000..72e0edd0d07f7
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/setup_tests.ts
@@ -0,0 +1,9 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+// eslint-disable-next-line import/no-extraneous-dependencies
+import '@testing-library/jest-dom';
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/additional_options_fields.tsx b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/additional_options_fields.tsx
index c0d2e13de6435..8387b337474dd 100644
--- a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/additional_options_fields.tsx
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/additional_options_fields.tsx
@@ -153,7 +153,7 @@ export const AdditionalOptionsFields: React.FC = (
return (
= (
}
>
{
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/inference_services.test.tsx b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/inference_services.test.tsx
new file mode 100644
index 0000000000000..0205dc51a9cb8
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/inference_services.test.tsx
@@ -0,0 +1,188 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { InferenceServices } from './inference_services';
+import { FieldType, InferenceProvider } from '../types/types';
+import React from 'react';
+import { render, screen } from '@testing-library/react';
+import userEvent from '@testing-library/user-event';
+import { Form, useForm } from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
+import { I18nProvider } from '@kbn/i18n-react';
+
+const providers = [
+ {
+ service: 'hugging_face',
+ name: 'Hugging Face',
+ task_types: ['text_embedding', 'sparse_embedding'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ url: {
+ default_value: 'https://api.openai.com/v1/embeddings',
+ description: 'The URL endpoint to use for the requests.',
+ label: 'URL',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'cohere',
+ name: 'Cohere',
+ task_types: ['text_embedding', 'rerank', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ },
+ },
+ {
+ service: 'anthropic',
+ name: 'Anthropic',
+ task_types: ['completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'By default, the anthropic service sets the number of requests allowed per minute to 50.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+] as InferenceProvider[];
+
+const MockFormProvider = ({ children }: { children: React.ReactElement }) => {
+ const { form } = useForm();
+
+ return (
+
+
+
+ );
+};
+
+describe('Inference Services', () => {
+ it('renders', () => {
+ render(
+
+
+
+ );
+
+ expect(screen.getByTestId('provider-select')).toBeInTheDocument();
+ });
+
+ it('renders Selectable', async () => {
+ render(
+
+
+
+ );
+
+ await userEvent.click(screen.getByTestId('provider-select'));
+ expect(screen.getByTestId('euiSelectableList')).toBeInTheDocument();
+ });
+
+ it('renders selected provider fields - hugging_face', async () => {
+ render(
+
+
+
+ );
+
+ await userEvent.click(screen.getByTestId('provider-select'));
+ await userEvent.click(screen.getByText('Hugging Face'));
+
+ expect(screen.getByTestId('provider-select')).toHaveValue('Hugging Face');
+ expect(screen.getByTestId('api_key-password')).toBeInTheDocument();
+ expect(screen.getByTestId('url-input')).toBeInTheDocument();
+ expect(screen.getByTestId('taskTypeSelect')).toBeInTheDocument();
+ expect(screen.getByTestId('inference-endpoint-input-field')).toBeInTheDocument();
+ expect(screen.queryByTestId('inference-endpoint-input-field')).toHaveDisplayValue(
+ /hugging_face-text_embedding/
+ );
+ });
+
+ it('re-renders fields when selected to anthropic from hugging_face', async () => {
+ render(
+
+
+
+ );
+
+ await userEvent.click(screen.getByTestId('provider-select'));
+ await userEvent.click(screen.getByText('Hugging Face'));
+ expect(screen.getByTestId('provider-select')).toHaveValue('Hugging Face');
+
+ await userEvent.click(screen.getByTestId('provider-select'));
+ await userEvent.click(screen.getByText('Anthropic'));
+
+ expect(screen.getByTestId('provider-select')).toHaveValue('Anthropic');
+ expect(screen.getByTestId('api_key-password')).toBeInTheDocument();
+ expect(screen.getByTestId('model_id-input')).toBeInTheDocument();
+ expect(screen.getByTestId('taskTypeSelectSingle')).toBeInTheDocument();
+ expect(screen.getByTestId('inference-endpoint-input-field')).toBeInTheDocument();
+ expect(screen.queryByTestId('inference-endpoint-input-field')).toHaveDisplayValue(
+ /anthropic-completion/
+ );
+ });
+});
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/render_service_provider/service_provider.test.tsx b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/render_service_provider/service_provider.test.tsx
new file mode 100644
index 0000000000000..bd4591c31a73a
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/render_service_provider/service_provider.test.tsx
@@ -0,0 +1,42 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { render, screen } from '@testing-library/react';
+import React from 'react';
+import { ServiceProviderIcon, ServiceProviderName } from './service_provider';
+import { ServiceProviderKeys } from '../../../constants';
+
+jest.mock('../assets/images/elastic.svg', () => 'elasticIcon.svg');
+jest.mock('../assets/images/hugging_face.svg', () => 'huggingFaceIcon.svg');
+jest.mock('../assets/images/cohere.svg', () => 'cohereIcon.svg');
+jest.mock('../assets/images/open_ai.svg', () => 'openAIIcon.svg');
+
+describe('ServiceProviderIcon component', () => {
+ it('renders Hugging Face icon and name when providerKey is hugging_face', () => {
+ render();
+ const icon = screen.getByTestId('icon-service-provider-hugging_face');
+ expect(icon).toBeInTheDocument();
+ });
+
+ it('renders Open AI icon and name when providerKey is openai', () => {
+ render();
+ const icon = screen.getByTestId('icon-service-provider-openai');
+ expect(icon).toBeInTheDocument();
+ });
+});
+
+describe('ServiceProviderName component', () => {
+ it('renders Hugging Face icon and name when providerKey is hugging_face', () => {
+ render();
+ expect(screen.getByText('Hugging Face')).toBeInTheDocument();
+ });
+
+ it('renders Open AI icon and name when providerKey is openai', () => {
+ render();
+ expect(screen.getByText('OpenAI')).toBeInTheDocument();
+ });
+});
diff --git a/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/selectable.test.tsx b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/selectable.test.tsx
new file mode 100644
index 0000000000000..6e2bedbcf4516
--- /dev/null
+++ b/x-pack/packages/kbn-inference-endpoint-ui-common/src/components/providers/selectable.test.tsx
@@ -0,0 +1,75 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React from 'react';
+import { render, screen } from '@testing-library/react';
+import { FieldType } from '../../types/types';
+import { SelectableProvider } from './selectable';
+
+const providers = [
+ {
+ service: 'hugging_face',
+ name: 'Hugging Face',
+ task_types: ['text_embedding', 'sparse_embedding'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ url: {
+ default_value: 'https://api.openai.com/v1/embeddings',
+ description: 'The URL endpoint to use for the requests.',
+ label: 'URL',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+];
+
+describe('SelectableProvider', () => {
+ const props = {
+ providers,
+ onClosePopover: jest.fn(),
+ onProviderChange: jest.fn(),
+ };
+ describe('should render', () => {
+ describe('provider', () => {
+ afterAll(() => {
+ jest.clearAllMocks();
+ });
+
+ test('render placeholder', async () => {
+ render();
+ const searchInput = screen.getByTestId('provider-super-select-search-box');
+ expect(searchInput).toHaveAttribute('placeholder', 'Search');
+ });
+
+ test('render list of providers', async () => {
+ render();
+ const listOfProviders = screen.queryAllByRole('option');
+ expect(listOfProviders).toHaveLength(1);
+ });
+ });
+ });
+});
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.test.tsx b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.test.tsx
new file mode 100644
index 0000000000000..784170b5857c3
--- /dev/null
+++ b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.test.tsx
@@ -0,0 +1,730 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { render, screen } from '@testing-library/react';
+import userEvent from '@testing-library/user-event';
+import { FieldType } from '@kbn/search-connectors/types';
+import { Form, useForm } from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
+import React from 'react';
+import { I18nProvider } from '@kbn/i18n-react';
+
+import { AddInferenceFlyoutWrapper } from './add_inference_flyout_wrapper';
+
+const mockProviders = [
+ {
+ service: 'cohere',
+ name: 'Cohere',
+ task_types: ['text_embedding', 'rerank', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ },
+ },
+ {
+ service: 'elastic',
+ name: 'Elastic',
+ task_types: ['sparse_embedding'],
+ configurations: {
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ max_input_tokens: {
+ default_value: null,
+ description: 'Allows you to specify the maximum number of tokens per input.',
+ label: 'Maximum Input Tokens',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ },
+ },
+ {
+ service: 'watsonxai',
+ name: 'IBM Watsonx',
+ task_types: ['text_embedding'],
+ configurations: {
+ project_id: {
+ default_value: null,
+ description: '',
+ label: 'Project ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ api_version: {
+ default_value: null,
+ description: 'The IBM Watsonx API version ID to use.',
+ label: 'API Version',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ max_input_tokens: {
+ default_value: null,
+ description: 'Allows you to specify the maximum number of tokens per input.',
+ label: 'Maximum Input Tokens',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ url: {
+ default_value: null,
+ description: '',
+ label: 'URL',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'azureaistudio',
+ name: 'Azure AI Studio',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ endpoint_type: {
+ default_value: null,
+ description: 'Specifies the type of endpoint that is used in your model deployment.',
+ label: 'Endpoint Type',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ provider: {
+ default_value: null,
+ description: 'The model provider for your deployment.',
+ label: 'Provider',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ target: {
+ default_value: null,
+ description: 'The target URL of your Azure AI Studio model deployment.',
+ label: 'Target',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'hugging_face',
+ name: 'Hugging Face',
+ task_types: ['text_embedding', 'sparse_embedding'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ url: {
+ default_value: 'https://api.openai.com/v1/embeddings',
+ description: 'The URL endpoint to use for the requests.',
+ label: 'URL',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'amazonbedrock',
+ name: 'Amazon Bedrock',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ secret_key: {
+ default_value: null,
+ description: 'A valid AWS secret key that is paired with the access_key.',
+ label: 'Secret Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ provider: {
+ default_value: null,
+ description: 'The model provider for your deployment.',
+ label: 'Provider',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ access_key: {
+ default_value: null,
+ description: 'A valid AWS access key that has permissions to use Amazon Bedrock.',
+ label: 'Access Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ model: {
+ default_value: null,
+ description: 'The base model ID or an ARN to a custom model based on a foundational model.',
+ label: 'Model',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'By default, the amazonbedrock service sets the number of requests allowed per minute to 240.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ region: {
+ default_value: null,
+ description: 'The region that your model or ARN is deployed in.',
+ label: 'Region',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'anthropic',
+ name: 'Anthropic',
+ task_types: ['completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'By default, the anthropic service sets the number of requests allowed per minute to 50.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'googleaistudio',
+ name: 'Google AI Studio',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: "ID of the LLM you're using.",
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'elasticsearch',
+ name: 'Elasticsearch',
+ task_types: ['text_embedding', 'sparse_embedding', 'rerank'],
+ configurations: {
+ num_allocations: {
+ default_value: 1,
+ description:
+ 'The total number of allocations this model is assigned across machine learning nodes.',
+ label: 'Number Allocations',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ num_threads: {
+ default_value: 2,
+ description: 'Sets the number of threads used by each model allocation during inference.',
+ label: 'Number Threads',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: '.multilingual-e5-small',
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'openai',
+ name: 'OpenAI',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description:
+ 'The OpenAI API authentication key. For more details about generating OpenAI API keys, refer to the https://platform.openai.com/account/api-keys.',
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ organization_id: {
+ default_value: null,
+ description: 'The unique identifier of your organization.',
+ label: 'Organization ID',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'Default number of requests allowed per minute. For text_embedding is 3000. For completion is 500.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ url: {
+ default_value: 'https://api.openai.com/v1/chat/completions',
+ description:
+ 'The OpenAI API endpoint URL. For more information on the URL, refer to the https://platform.openai.com/docs/api-reference.',
+ label: 'URL',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'azureopenai',
+ name: 'Azure OpenAI',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ entra_id: {
+ default_value: null,
+ description: 'You must provide either an API key or an Entra ID.',
+ label: 'Entra ID',
+ required: false,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'The azureopenai service sets a default number of requests allowed per minute depending on the task type.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ deployment_id: {
+ default_value: null,
+ description: 'The deployment name of your deployed models.',
+ label: 'Deployment ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ resource_name: {
+ default_value: null,
+ description: 'The name of your Azure OpenAI resource.',
+ label: 'Resource Name',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ api_version: {
+ default_value: null,
+ description: 'The Azure API version ID to use.',
+ label: 'API Version',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'mistral',
+ name: 'Mistral',
+ task_types: ['text_embedding'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ model: {
+ default_value: null,
+ description:
+ 'Refer to the Mistral models documentation for the list of available text embedding models.',
+ label: 'Model',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ max_input_tokens: {
+ default_value: null,
+ description: 'Allows you to specify the maximum number of tokens per input.',
+ label: 'Maximum Input Tokens',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ },
+ },
+ {
+ service: 'googlevertexai',
+ name: 'Google Vertex AI',
+ task_types: ['text_embedding', 'rerank'],
+ configurations: {
+ service_account_json: {
+ default_value: null,
+ description: "API Key for the provider you're connecting to.",
+ label: 'Credentials JSON',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ project_id: {
+ default_value: null,
+ description:
+ 'The GCP Project ID which has Vertex AI API(s) enabled. For more information on the URL, refer to the {geminiVertexAIDocs}.',
+ label: 'GCP Project',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ location: {
+ default_value: null,
+ description:
+ 'Please provide the GCP region where the Vertex AI API(s) is enabled. For more information, refer to the {geminiVertexAIDocs}.',
+ label: 'GCP Region',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: `ID of the LLM you're using.`,
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'alibabacloud-ai-search',
+ name: 'AlibabaCloud AI Search',
+ task_types: ['text_embedding', 'sparse_embedding', 'rerank', 'completion'],
+ configurations: {
+ workspace: {
+ default_value: null,
+ description: 'The name of the workspace used for the {infer} task.',
+ label: 'Workspace',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ api_key: {
+ default_value: null,
+ description: `A valid API key for the AlibabaCloud AI Search API.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ service_id: {
+ default_value: null,
+ description: 'The name of the model service to use for the {infer} task.',
+ label: 'Project ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ host: {
+ default_value: null,
+ description:
+ 'The name of the host address used for the {infer} task. You can find the host address at https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key[ the API keys section] of the documentation.',
+ label: 'Host',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ http_schema: {
+ default_value: null,
+ description: '',
+ label: 'HTTP Schema',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+];
+
+const mockAddEndpoint = jest.fn();
+const onClose = jest.fn();
+jest.mock('../../hooks/use_add_endpoint', () => ({
+ useAddEndpoint: () => ({
+ mutate: mockAddEndpoint.mockImplementation(() => Promise.resolve()), // Mock implementation of the mutate function
+ }),
+}));
+
+jest.mock('../../hooks/use_providers', () => ({
+ useProviders: jest.fn(() => ({
+ data: mockProviders,
+ })),
+}));
+
+const MockFormProvider = ({ children }: { children: React.ReactElement }) => {
+ const { form } = useForm();
+ return (
+
+
+
+ );
+};
+
+describe('AddInferenceFlyout', () => {
+ it('renders', () => {
+ render(
+
+
+
+ );
+
+ expect(screen.getByTestId('create-inference-flyout')).toBeInTheDocument();
+ expect(screen.getByTestId('create-inference-flyout-header')).toBeInTheDocument();
+ expect(screen.getByTestId('create-inference-flyout-header')).toBeInTheDocument();
+ expect(screen.getByTestId('provider-select')).toBeInTheDocument();
+ expect(screen.getByTestId('add-inference-endpoint-submit-button')).toBeInTheDocument();
+ expect(screen.getByTestId('create-inference-flyout-close-button')).toBeInTheDocument();
+ });
+
+ it('invalidates form if no provider is selected', async () => {
+ render(
+
+
+
+ );
+
+ await userEvent.click(screen.getByTestId('add-inference-endpoint-submit-button'));
+ expect(screen.getByText('Provider is required.')).toBeInTheDocument();
+ expect(mockAddEndpoint).not.toHaveBeenCalled();
+ expect(screen.getByTestId('add-inference-endpoint-submit-button')).toBeDisabled();
+ });
+
+ it('valid submission', async () => {
+ render(
+
+
+
+ );
+
+ await userEvent.click(screen.getByTestId('provider-select'));
+ await userEvent.click(screen.getByText('Anthropic'));
+ await userEvent.type(await screen.findByTestId('api_key-password'), 'test api passcode');
+ await userEvent.type(
+ await screen.findByTestId('model_id-input'),
+ 'sample model name from Anthropic'
+ );
+
+ await userEvent.click(screen.getByTestId('add-inference-endpoint-submit-button'));
+ expect(mockAddEndpoint).toHaveBeenCalled();
+ });
+});
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.tsx b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.tsx
index 9a9dba4f11633..4e6be5764bf44 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.tsx
@@ -22,36 +22,37 @@ import { InferenceForm } from './inference_form';
import * as i18n from './translations';
interface AddInferenceFlyoutWrapperProps {
- setIsAddInferenceFlyoutOpen: (state: boolean) => void;
+ onClose: (state: boolean) => void;
}
export const AddInferenceFlyoutWrapper: React.FC = ({
- setIsAddInferenceFlyoutOpen,
+ onClose,
}) => {
const inferenceCreationFlyoutId = useGeneratedHtmlId({
prefix: 'addInferenceFlyoutId',
});
- const closeFlyout = () => setIsAddInferenceFlyoutOpen(false);
+ const closeFlyout = () => onClose(false);
return (
setIsAddInferenceFlyoutOpen(false)}
+ onClose={() => onClose(false)}
aria-labelledby={inferenceCreationFlyoutId}
+ data-test-subj="create-inference-flyout"
>
-
+
{i18n.CREATE_ENDPOINT_TITLE}
-
+
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx
index 940fe2af1a07b..e26fb765f06c6 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx
@@ -9,8 +9,7 @@ import { Form, useForm } from '@kbn/es-ui-shared-plugin/static/forms/hook_form_l
import React, { useCallback } from 'react';
import { InferenceServices } from '@kbn/inference-endpoint-ui-common';
import { EuiButton, EuiFlexGroup, EuiFlexItem, EuiSpacer } from '@elastic/eui';
-import { useKibana } from '../../hooks/use_kibana';
-import { useProviders } from '../../hooks/user_providers';
+import { useProviders } from '../../hooks/use_providers';
import * as i18n from './translations';
import { useAddEndpoint } from '../../hooks/use_add_endpoint';
import { InferenceEndpoint } from '../../types';
@@ -19,11 +18,8 @@ interface InferenceFormProps {
onSubmitSuccess: (state: boolean) => void;
}
export const InferenceForm: React.FC = ({ onSubmitSuccess }) => {
- const {
- services: { http },
- } = useKibana();
const { mutate: addEndpoint } = useAddEndpoint(() => onSubmitSuccess(false));
- const { data: providers } = useProviders(http);
+ const { data: providers } = useProviders();
const { form } = useForm();
const handleSubmit = useCallback(async () => {
const { isValid, data } = await form.submit();
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints.tsx b/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints.tsx
index 6ff09ea6ece71..d8d696fee1e9e 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints.tsx
@@ -27,7 +27,7 @@ export const InferenceEndpoints: React.FC = () => {
{isAddInferenceFlyoutOpen && (
-
+
)}
>
);
diff --git a/x-pack/plugins/search_inference_endpoints/public/hooks/use_add_endpoint.test.tsx b/x-pack/plugins/search_inference_endpoints/public/hooks/use_add_endpoint.test.tsx
new file mode 100644
index 0000000000000..488c85a4d47b0
--- /dev/null
+++ b/x-pack/plugins/search_inference_endpoints/public/hooks/use_add_endpoint.test.tsx
@@ -0,0 +1,87 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { renderHook, waitFor } from '@testing-library/react';
+import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
+import React from 'react';
+
+import { useAddEndpoint } from './use_add_endpoint';
+import * as i18n from './translations';
+import { useKibana } from './use_kibana';
+
+const wrapper = ({ children }: { children: React.ReactNode }) => {
+ const queryClient = new QueryClient();
+ return {children};
+};
+
+const mockConfig: any = {
+ provider: 'elasticsearch',
+ taskType: 'text_embedding',
+ inferenceId: 'es-endpoint-1',
+ providerConfig: {
+ num_allocations: 1,
+ num_threads: 2,
+ model_id: '.multilingual-e5-small',
+ },
+};
+const mockSecrets: any = { providerSecrets: {} };
+
+const mockInferenceEndpoint = {
+ config: mockConfig,
+ secrets: mockSecrets,
+};
+
+jest.mock('./use_kibana');
+
+const mockUseKibana = useKibana as jest.Mock;
+const mockAdd = jest.fn();
+const mockAddSuccess = jest.fn();
+const mockAddError = jest.fn();
+
+describe('useAddEndpoint', () => {
+ mockUseKibana.mockReturnValue({
+ services: {
+ http: {
+ put: mockAdd,
+ },
+ notifications: {
+ toasts: {
+ addSuccess: mockAddSuccess,
+ addError: mockAddError,
+ },
+ },
+ },
+ });
+
+ it('show call add inference endpoint and show success toast', async () => {
+ const { result } = renderHook(() => useAddEndpoint(), { wrapper });
+
+ result.current.mutate({ inferenceEndpoint: mockInferenceEndpoint });
+
+ await waitFor(() =>
+ expect(mockAdd).toHaveBeenCalledWith(
+ '/internal/inference_endpoint/endpoints/text_embedding/es-endpoint-1',
+ {
+ body: JSON.stringify(mockInferenceEndpoint),
+ }
+ )
+ );
+ expect(mockAddSuccess).toHaveBeenCalledWith({
+ title: i18n.ENDPOINT_ADDED_SUCCESS,
+ });
+ });
+
+ it('should show error toast on failure', async () => {
+ const error = { body: { message: 'error' } };
+ mockAdd.mockRejectedValue(error);
+ const { result } = renderHook(() => useAddEndpoint(), { wrapper });
+
+ result.current.mutate({ inferenceEndpoint: mockInferenceEndpoint });
+
+ await waitFor(() => expect(mockAddError).toHaveBeenCalled());
+ });
+});
diff --git a/x-pack/plugins/search_inference_endpoints/public/hooks/use_add_endpoint.ts b/x-pack/plugins/search_inference_endpoints/public/hooks/use_add_endpoint.ts
index 1b1e0dde3eacc..1f3f453d1b66d 100644
--- a/x-pack/plugins/search_inference_endpoints/public/hooks/use_add_endpoint.ts
+++ b/x-pack/plugins/search_inference_endpoints/public/hooks/use_add_endpoint.ts
@@ -35,7 +35,7 @@ export const useAddEndpoint = (onSuccess?: () => void) => {
onSuccess: () => {
queryClient.invalidateQueries([INFERENCE_ENDPOINTS_QUERY_KEY]);
toasts?.addSuccess({
- title: i18n.DELETE_SUCCESS,
+ title: i18n.ENDPOINT_ADDED_SUCCESS,
});
if (onSuccess) {
onSuccess();
diff --git a/x-pack/plugins/search_inference_endpoints/public/hooks/user_providers.ts b/x-pack/plugins/search_inference_endpoints/public/hooks/use_providers.ts
similarity index 99%
rename from x-pack/plugins/search_inference_endpoints/public/hooks/user_providers.ts
rename to x-pack/plugins/search_inference_endpoints/public/hooks/use_providers.ts
index 4d59dca4fef96..acb44c50af45b 100644
--- a/x-pack/plugins/search_inference_endpoints/public/hooks/user_providers.ts
+++ b/x-pack/plugins/search_inference_endpoints/public/hooks/use_providers.ts
@@ -668,8 +668,7 @@ const getProviders = (http: HttpSetup): Promise => {
);
};
-/* FIX ME: ToastsStart */
-export const useProviders = (http: HttpSetup) => {
+export const useProviders = () => {
const { services } = useKibana();
const toasts = services.notifications?.toasts;
const onErrorFn = (error: { body: KibanaServerError }) => {
@@ -680,7 +679,7 @@ export const useProviders = (http: HttpSetup) => {
};
const query = useQuery(['user-profile'], {
- queryFn: () => getProviders(http),
+ queryFn: () => getProviders(services.http),
staleTime: Infinity,
refetchOnWindowFocus: false,
onError: onErrorFn,
diff --git a/x-pack/plugins/search_inference_endpoints/server/lib/add_inference_endpoint.test.ts b/x-pack/plugins/search_inference_endpoints/server/lib/add_inference_endpoint.test.ts
new file mode 100644
index 0000000000000..5f906aa1fb8cf
--- /dev/null
+++ b/x-pack/plugins/search_inference_endpoints/server/lib/add_inference_endpoint.test.ts
@@ -0,0 +1,55 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { loggingSystemMock } from '@kbn/core/server/mocks';
+
+import { addInferenceEndpoint } from './add_inference_endpoint';
+
+describe('addInferenceEndpoint', () => {
+ const mockClient: any = {
+ inference: {
+ put: jest.fn(),
+ },
+ };
+
+ const type = 'text_embedding';
+ const id = 'es-endpoint-1';
+ const config: any = {
+ provider: 'elasticsearch',
+ taskType: 'text_embedding',
+ inferenceId: 'es-endpoint-1',
+ providerConfig: {
+ num_allocations: 1,
+ num_threads: 2,
+ model_id: '.multilingual-e5-small',
+ },
+ };
+ const secrets: any = { providerSecrets: {} };
+ const mockLogger = loggingSystemMock.createLogger();
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+
+ it('should call the ES client with correct PUT request', async () => {
+ await addInferenceEndpoint(mockClient, type, id, config, secrets, mockLogger);
+
+ expect(mockClient.inference.put).toHaveBeenCalledWith({
+ inference_id: id,
+ task_type: type,
+ inference_config: {
+ service: 'elasticsearch',
+ service_settings: {
+ num_allocations: 1,
+ num_threads: 2,
+ model_id: '.multilingual-e5-small',
+ },
+ task_settings: {},
+ },
+ });
+ });
+});
diff --git a/x-pack/plugins/search_inference_endpoints/server/lib/add_inference_endpoint.ts b/x-pack/plugins/search_inference_endpoints/server/lib/add_inference_endpoint.ts
index afc174ac41ce8..e1170708483c8 100644
--- a/x-pack/plugins/search_inference_endpoints/server/lib/add_inference_endpoint.ts
+++ b/x-pack/plugins/search_inference_endpoints/server/lib/add_inference_endpoint.ts
@@ -20,11 +20,10 @@ export const addInferenceEndpoint = async (
logger: Logger
) => {
try {
- const taskSettings = config?.taskTypeConfig
- ? {
- ...unflattenObject(config?.taskTypeConfig),
- }
- : {};
+ /* task settings property is required in the API call
+ but no needed for inference or connector creation
+ */
+ const taskSettings = {};
const serviceSettings = {
...unflattenObject(config?.providerConfig ?? {}),
...unflattenObject(secrets?.providerSecrets ?? {}),
diff --git a/x-pack/plugins/search_inference_endpoints/server/lib/fetch_inference_services.test.ts b/x-pack/plugins/search_inference_endpoints/server/lib/fetch_inference_services.test.ts
new file mode 100644
index 0000000000000..b5676429d54ab
--- /dev/null
+++ b/x-pack/plugins/search_inference_endpoints/server/lib/fetch_inference_services.test.ts
@@ -0,0 +1,681 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { FieldType } from '@kbn/search-connectors/types';
+import { fetchInferenceServices } from './fetch_inference_services';
+import { ElasticsearchClient } from '@kbn/core/server';
+
+describe('fetch inference services', () => {
+ const mockInferenceServicesResponse = [
+ {
+ service: 'cohere',
+ name: 'Cohere',
+ task_types: ['text_embedding', 'rerank', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ },
+ },
+ {
+ service: 'elastic',
+ name: 'Elastic',
+ task_types: ['sparse_embedding'],
+ configurations: {
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ max_input_tokens: {
+ default_value: null,
+ description: 'Allows you to specify the maximum number of tokens per input.',
+ label: 'Maximum Input Tokens',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ },
+ },
+ {
+ service: 'watsonxai',
+ name: 'IBM Watsonx',
+ task_types: ['text_embedding'],
+ configurations: {
+ project_id: {
+ default_value: null,
+ description: '',
+ label: 'Project ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ api_version: {
+ default_value: null,
+ description: 'The IBM Watsonx API version ID to use.',
+ label: 'API Version',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ max_input_tokens: {
+ default_value: null,
+ description: 'Allows you to specify the maximum number of tokens per input.',
+ label: 'Maximum Input Tokens',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ url: {
+ default_value: null,
+ description: '',
+ label: 'URL',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'azureaistudio',
+ name: 'Azure AI Studio',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ endpoint_type: {
+ default_value: null,
+ description: 'Specifies the type of endpoint that is used in your model deployment.',
+ label: 'Endpoint Type',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ provider: {
+ default_value: null,
+ description: 'The model provider for your deployment.',
+ label: 'Provider',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ target: {
+ default_value: null,
+ description: 'The target URL of your Azure AI Studio model deployment.',
+ label: 'Target',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'hugging_face',
+ name: 'Hugging Face',
+ task_types: ['text_embedding', 'sparse_embedding'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ url: {
+ default_value: 'https://api.openai.com/v1/embeddings',
+ description: 'The URL endpoint to use for the requests.',
+ label: 'URL',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'amazonbedrock',
+ name: 'Amazon Bedrock',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ secret_key: {
+ default_value: null,
+ description: 'A valid AWS secret key that is paired with the access_key.',
+ label: 'Secret Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ provider: {
+ default_value: null,
+ description: 'The model provider for your deployment.',
+ label: 'Provider',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ access_key: {
+ default_value: null,
+ description: 'A valid AWS access key that has permissions to use Amazon Bedrock.',
+ label: 'Access Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ model: {
+ default_value: null,
+ description:
+ 'The base model ID or an ARN to a custom model based on a foundational model.',
+ label: 'Model',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'By default, the amazonbedrock service sets the number of requests allowed per minute to 240.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ region: {
+ default_value: null,
+ description: 'The region that your model or ARN is deployed in.',
+ label: 'Region',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'anthropic',
+ name: 'Anthropic',
+ task_types: ['completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'By default, the anthropic service sets the number of requests allowed per minute to 50.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'googleaistudio',
+ name: 'Google AI Studio',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: "ID of the LLM you're using.",
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'elasticsearch',
+ name: 'Elasticsearch',
+ task_types: ['text_embedding', 'sparse_embedding', 'rerank'],
+ configurations: {
+ num_allocations: {
+ default_value: 1,
+ description:
+ 'The total number of allocations this model is assigned across machine learning nodes.',
+ label: 'Number Allocations',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ num_threads: {
+ default_value: 2,
+ description: 'Sets the number of threads used by each model allocation during inference.',
+ label: 'Number Threads',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: '.multilingual-e5-small',
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'openai',
+ name: 'OpenAI',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description:
+ 'The OpenAI API authentication key. For more details about generating OpenAI API keys, refer to the https://platform.openai.com/account/api-keys.',
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ organization_id: {
+ default_value: null,
+ description: 'The unique identifier of your organization.',
+ label: 'Organization ID',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'Default number of requests allowed per minute. For text_embedding is 3000. For completion is 500.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: 'The name of the model to use for the inference task.',
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ url: {
+ default_value: 'https://api.openai.com/v1/chat/completions',
+ description:
+ 'The OpenAI API endpoint URL. For more information on the URL, refer to the https://platform.openai.com/docs/api-reference.',
+ label: 'URL',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'azureopenai',
+ name: 'Azure OpenAI',
+ task_types: ['text_embedding', 'completion'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ entra_id: {
+ default_value: null,
+ description: 'You must provide either an API key or an Entra ID.',
+ label: 'Entra ID',
+ required: false,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description:
+ 'The azureopenai service sets a default number of requests allowed per minute depending on the task type.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ deployment_id: {
+ default_value: null,
+ description: 'The deployment name of your deployed models.',
+ label: 'Deployment ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ resource_name: {
+ default_value: null,
+ description: 'The name of your Azure OpenAI resource.',
+ label: 'Resource Name',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ api_version: {
+ default_value: null,
+ description: 'The Azure API version ID to use.',
+ label: 'API Version',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'mistral',
+ name: 'Mistral',
+ task_types: ['text_embedding'],
+ configurations: {
+ api_key: {
+ default_value: null,
+ description: `API Key for the provider you're connecting to.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ model: {
+ default_value: null,
+ description:
+ 'Refer to the Mistral models documentation for the list of available text embedding models.',
+ label: 'Model',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ max_input_tokens: {
+ default_value: null,
+ description: 'Allows you to specify the maximum number of tokens per input.',
+ label: 'Maximum Input Tokens',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ },
+ },
+ {
+ service: 'googlevertexai',
+ name: 'Google Vertex AI',
+ task_types: ['text_embedding', 'rerank'],
+ configurations: {
+ service_account_json: {
+ default_value: null,
+ description: "API Key for the provider you're connecting to.",
+ label: 'Credentials JSON',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ project_id: {
+ default_value: null,
+ description:
+ 'The GCP Project ID which has Vertex AI API(s) enabled. For more information on the URL, refer to the {geminiVertexAIDocs}.',
+ label: 'GCP Project',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ location: {
+ default_value: null,
+ description:
+ 'Please provide the GCP region where the Vertex AI API(s) is enabled. For more information, refer to the {geminiVertexAIDocs}.',
+ label: 'GCP Region',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ model_id: {
+ default_value: null,
+ description: `ID of the LLM you're using.`,
+ label: 'Model ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ {
+ service: 'alibabacloud-ai-search',
+ name: 'AlibabaCloud AI Search',
+ task_types: ['text_embedding', 'sparse_embedding', 'rerank', 'completion'],
+ configurations: {
+ workspace: {
+ default_value: null,
+ description: 'The name of the workspace used for the {infer} task.',
+ label: 'Workspace',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ api_key: {
+ default_value: null,
+ description: `A valid API key for the AlibabaCloud AI Search API.`,
+ label: 'API Key',
+ required: true,
+ sensitive: true,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ service_id: {
+ default_value: null,
+ description: 'The name of the model service to use for the {infer} task.',
+ label: 'Project ID',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ host: {
+ default_value: null,
+ description:
+ 'The name of the host address used for the {infer} task. You can find the host address at https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key[ the API keys section] of the documentation.',
+ label: 'Host',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ 'rate_limit.requests_per_minute': {
+ default_value: null,
+ description: 'Minimize the number of rate limit errors.',
+ label: 'Rate Limit',
+ required: false,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.INTEGER,
+ },
+ http_schema: {
+ default_value: null,
+ description: '',
+ label: 'HTTP Schema',
+ required: true,
+ sensitive: false,
+ updatable: true,
+ type: FieldType.STRING,
+ },
+ },
+ },
+ ];
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+
+ const mockClient = {
+ asCurrentUser: {
+ transport: {
+ request: jest.fn(),
+ },
+ },
+ };
+ it('returns all inference services', async () => {
+ mockClient.asCurrentUser.transport.request.mockImplementationOnce(() => {
+ return Promise.resolve({ services: mockInferenceServicesResponse });
+ });
+
+ const services = await fetchInferenceServices(
+ mockClient.asCurrentUser as unknown as ElasticsearchClient
+ );
+
+ expect(services).toEqual({
+ services: mockInferenceServicesResponse,
+ });
+ });
+});
From dc0465b97771d5d54b19e3a0b338f784bc9fed4f Mon Sep 17 00:00:00 2001
From: Samiul Monir
Date: Wed, 18 Dec 2024 16:59:19 -0500
Subject: [PATCH 04/18] Adding FTR tests for adding inference management ui
---
.../components/inference_endpoints_header.tsx | 2 +-
.../svl_search_inference_management_page.ts | 22 +++++++++++++++++++
.../search/inference_management.ts | 6 +++++
3 files changed, 29 insertions(+), 1 deletion(-)
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints_header.tsx b/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints_header.tsx
index 9d4d002400aad..ce94c4c89194b 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints_header.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints_header.tsx
@@ -30,7 +30,7 @@ export const InferenceEndpointsHeader: React.FC =
iconType="plusInCircle"
fill
iconSize="m"
- data-test-subj="add-ai-connector"
+ data-test-subj="add-inference-endpoint-header-button"
onClick={() => setIsAddInferenceFlyoutOpen(true)}
>
{i18n.ADD_ENDPOINT_LABEL}
diff --git a/x-pack/test_serverless/functional/page_objects/svl_search_inference_management_page.ts b/x-pack/test_serverless/functional/page_objects/svl_search_inference_management_page.ts
index 4e4c6147e8f77..0cfc7a5447bb9 100644
--- a/x-pack/test_serverless/functional/page_objects/svl_search_inference_management_page.ts
+++ b/x-pack/test_serverless/functional/page_objects/svl_search_inference_management_page.ts
@@ -18,6 +18,7 @@ export function SvlSearchInferenceManagementPageProvider({ getService }: FtrProv
await testSubjects.existOrFail('allInferenceEndpointsPage');
await testSubjects.existOrFail('api-documentation');
await testSubjects.existOrFail('view-your-models');
+ await testSubjects.existOrFail('add-inference-endpoint-header-button');
},
async expectTabularViewToBeLoaded() {
@@ -95,5 +96,26 @@ export function SvlSearchInferenceManagementPageProvider({ getService }: FtrProv
expect((await browser.getClipboardValue()).includes('.elser-2-elasticsearch')).to.be(true);
},
},
+
+ AddInferenceFlyout: {
+ async expectInferenceEndpointToBeVisible() {
+ await testSubjects.click('add-inference-endpoint-header-button');
+ await testSubjects.existOrFail('create-inference-flyout');
+
+ await testSubjects.click('provider-select');
+ await testSubjects.setValue('provider-super-select-search-box', 'Cohere');
+ await testSubjects.click('provider');
+
+ await testSubjects.existOrFail('api_key-password');
+ await testSubjects.click('completion');
+ await testSubjects.existOrFail('inference-endpoint-input-field');
+ (await testSubjects.getVisibleText('inference-endpoint-input-field')).includes(
+ 'cohere-completion'
+ );
+
+ await testSubjects.click('add-inference-endpoint-submit-button');
+ expect(await testSubjects.isEnabled('add-inference-endpoint-submit-button')).to.be(false);
+ },
+ },
};
}
diff --git a/x-pack/test_serverless/functional/test_suites/search/inference_management.ts b/x-pack/test_serverless/functional/test_suites/search/inference_management.ts
index fce97e3bbf475..73eb8098886b0 100644
--- a/x-pack/test_serverless/functional/test_suites/search/inference_management.ts
+++ b/x-pack/test_serverless/functional/test_suites/search/inference_management.ts
@@ -97,6 +97,12 @@ export default function ({ getPageObjects, getService }: FtrProviderContext) {
});
});
+ describe('create inference flyout', () => {
+ it('renders successfully', async () => {
+ await pageObjects.svlSearchInferenceManagementPage.AddInferenceFlyout.expectInferenceEndpointToBeVisible();
+ });
+ });
+
it('has embedded dev console', async () => {
await testHasEmbeddedConsole(pageObjects);
});
From 4d16097f04f005247e74f1b3b6ea63ee0027c837 Mon Sep 17 00:00:00 2001
From: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
Date: Wed, 18 Dec 2024 22:22:05 +0000
Subject: [PATCH 05/18] [CI] Auto-commit changed files from 'node
scripts/generate codeowners'
---
.github/CODEOWNERS | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 8dce022a2140e..1e2b72acdcc1f 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -764,7 +764,7 @@ x-pack/packages/ai-infra/product-doc-artifact-builder @elastic/appex-ai-infra
x-pack/packages/kbn-ai-assistant @elastic/search-kibana
x-pack/packages/kbn-alerting-comparators @elastic/response-ops
x-pack/packages/kbn-alerting-state-types @elastic/response-ops
-x-pack/packages/kbn-inference-endpoint-ui-common @elastic/response-ops @elastic/appex-ai-infra @elastic/obs-ai-assistant @elastic/security-generative-ai @elastic/search-kibana
+x-pack/packages/kbn-inference-endpoint-ui-common @elastic/response-ops @elastic/appex-ai-infra @elastic/obs-ai-assistant @elastic/security-generative-ai
x-pack/packages/kbn-random-sampling @elastic/kibana-visualizations
x-pack/packages/kbn-synthetics-private-location @elastic/obs-ux-management-team
x-pack/packages/maps/vector_tile_utils @elastic/kibana-presentation
From 8ed537aefa7a80418e479398ba52402eaf7870b8 Mon Sep 17 00:00:00 2001
From: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
Date: Wed, 18 Dec 2024 22:31:37 +0000
Subject: [PATCH 06/18] [CI] Auto-commit changed files from 'node
scripts/lint_ts_projects --fix'
---
x-pack/plugins/search_inference_endpoints/tsconfig.json | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/x-pack/plugins/search_inference_endpoints/tsconfig.json b/x-pack/plugins/search_inference_endpoints/tsconfig.json
index dc1441f88227d..da162552248b4 100644
--- a/x-pack/plugins/search_inference_endpoints/tsconfig.json
+++ b/x-pack/plugins/search_inference_endpoints/tsconfig.json
@@ -36,9 +36,11 @@
"@kbn/search-navigation",
"@kbn/shared-ux-page-kibana-template",
"@kbn/licensing-plugin",
- "@kbn/triggers-actions-ui-plugin",
"@kbn/inference-endpoint-ui-common",
- "@kbn/es-ui-shared-plugin"
+ "@kbn/es-ui-shared-plugin",
+ "@kbn/search-connectors",
+ "@kbn/core-http-browser",
+ "@kbn/safer-lodash-set"
],
"exclude": [
"target/**/*",
From 93d448b6f1d2fe8fa58c9be5447a8892692be0a8 Mon Sep 17 00:00:00 2001
From: YulNaumenko
Date: Wed, 18 Dec 2024 20:21:42 -0800
Subject: [PATCH 07/18] [AI Connector] Migrates AI inference Connector to use a
shared components from '@kbn/inference-endpoint-ui-common'
---
.../inference/additional_options_fields.tsx | 315 ----------------
.../connector_types/inference/connector.tsx | 355 +-----------------
.../providers/assets/images/alibaba_cloud.svg | 3 -
.../assets/images/amazon_bedrock.svg | 11 -
.../providers/assets/images/anthropic.svg | 3 -
.../assets/images/azure_ai_studio.svg | 44 ---
.../providers/assets/images/azure_open_ai.svg | 9 -
.../providers/assets/images/cohere.svg | 9 -
.../providers/assets/images/elastic.svg | 16 -
.../assets/images/google_ai_studio.svg | 6 -
.../providers/assets/images/hugging_face.svg | 10 -
.../providers/assets/images/ibm_watsonx.svg | 3 -
.../providers/assets/images/mistral.svg | 34 --
.../providers/assets/images/open_ai.svg | 3 -
.../providers/get_providers.test.tsx | 52 ---
.../service_provider.test.tsx | 42 ---
.../service_provider.tsx | 124 ------
.../providers/selectable/index.test.tsx | 60 ---
.../inference/providers/selectable/index.tsx | 145 -------
.../connector_configuration_field.tsx | 237 ------------
.../connector_configuration_form_items.tsx | 97 -----
.../connector_configuration_utils.ts | 51 ---
22 files changed, 6 insertions(+), 1623 deletions(-)
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/additional_options_fields.tsx
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/alibaba_cloud.svg
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/amazon_bedrock.svg
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/anthropic.svg
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/azure_ai_studio.svg
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/azure_open_ai.svg
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/cohere.svg
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/elastic.svg
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/google_ai_studio.svg
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/hugging_face.svg
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/ibm_watsonx.svg
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/mistral.svg
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/open_ai.svg
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/get_providers.test.tsx
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/render_service_provider/service_provider.test.tsx
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/render_service_provider/service_provider.tsx
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/selectable/index.test.tsx
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/inference/providers/selectable/index.tsx
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_field.tsx
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_form_items.tsx
delete mode 100644 x-pack/plugins/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_utils.ts
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/additional_options_fields.tsx b/x-pack/plugins/stack_connectors/public/connector_types/inference/additional_options_fields.tsx
deleted file mode 100644
index 5862389e6ab80..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/additional_options_fields.tsx
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import React, { useMemo } from 'react';
-import { css } from '@emotion/react';
-
-import {
- EuiFormRow,
- EuiSpacer,
- EuiTitle,
- EuiAccordion,
- EuiFieldText,
- useEuiTheme,
- EuiTextColor,
- EuiButtonGroup,
- EuiPanel,
- EuiButtonEmpty,
- EuiCopy,
- EuiButton,
- useEuiFontSize,
-} from '@elastic/eui';
-import {
- getFieldValidityAndErrorMessage,
- UseField,
- useFormContext,
-} from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
-import { FormattedMessage } from '@kbn/i18n-react';
-
-import { fieldValidators } from '@kbn/es-ui-shared-plugin/static/forms/helpers';
-import { ConfigEntryView } from '../../../common/dynamic_config/types';
-import { ConnectorConfigurationFormItems } from '../lib/dynamic_config/connector_configuration_form_items';
-import * as i18n from './translations';
-import { DEFAULT_TASK_TYPE } from './constants';
-import { Config } from './types';
-import { TaskTypeOption } from './helpers';
-
-// Custom trigger button CSS
-const buttonCss = css`
- &:hover {
- text-decoration: none;
- }
-`;
-
-interface AdditionalOptionsConnectorFieldsProps {
- config: Config;
- readOnly: boolean;
- isEdit: boolean;
- optionalProviderFormFields: ConfigEntryView[];
- onSetProviderConfigEntry: (key: string, value: unknown) => Promise;
- onTaskTypeOptionsSelect: (taskType: string, provider?: string) => void;
- selectedTaskType?: string;
- taskTypeFormFields: ConfigEntryView[];
- taskTypeOptions: TaskTypeOption[];
-}
-
-export const AdditionalOptionsConnectorFields: React.FC = ({
- config,
- readOnly,
- isEdit,
- taskTypeOptions,
- optionalProviderFormFields,
- selectedTaskType,
- onSetProviderConfigEntry,
- onTaskTypeOptionsSelect,
-}) => {
- const xsFontSize = useEuiFontSize('xs').fontSize;
- const { euiTheme } = useEuiTheme();
- const { setFieldValue } = useFormContext();
-
- const taskTypeSettings = useMemo(
- () =>
- selectedTaskType || config.taskType?.length ? (
- <>
-
-
-
-
-
-
-
-
-
-
-
- {(field) => {
- const { isInvalid, errorMessage } = getFieldValidityAndErrorMessage(field);
-
- return (
-
- {isEdit || readOnly ? (
-
- {config.taskType}
-
- ) : taskTypeOptions.length === 1 ? (
- onTaskTypeOptionsSelect(config.taskType)}
- >
- {config.taskType}
-
- ) : (
- onTaskTypeOptionsSelect(id)}
- options={taskTypeOptions}
- color="text"
- type="single"
- />
- )}
-
- );
- }}
-
- >
- ) : null,
- [
- selectedTaskType,
- config.taskType,
- xsFontSize,
- euiTheme.colors,
- isEdit,
- readOnly,
- taskTypeOptions,
- onTaskTypeOptionsSelect,
- ]
- );
-
- const inferenceUri = useMemo(() => `_inference/${selectedTaskType}/`, [selectedTaskType]);
-
- return (
-
-
-
- }
- initialIsOpen={true}
- >
-
-
- {optionalProviderFormFields.length > 0 ? (
- <>
-
-
-
-
-
-
-
-
-
-
-
-
- >
- ) : null}
-
- {taskTypeSettings}
-
-
-
-
-
-
-
-
-
-
-
-
-
- {(field) => {
- const { isInvalid, errorMessage } = getFieldValidityAndErrorMessage(field);
-
- return (
-
- }
- >
- {
- setFieldValue('config.inferenceId', e.target.value);
- }}
- prepend={inferenceUri}
- append={
-
- {(copy) => (
-
-
-
- )}
-
- }
- />
-
- );
- }}
-
-
-
- );
-};
-
-// eslint-disable-next-line import/no-default-export
-export { AdditionalOptionsConnectorFields as default };
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/connector.tsx b/x-pack/plugins/stack_connectors/public/connector_types/inference/connector.tsx
index 51fc2d2a3e9f7..b54f2af6c0792 100644
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/connector.tsx
+++ b/x-pack/plugins/stack_connectors/public/connector_types/inference/connector.tsx
@@ -5,44 +5,12 @@
* 2.0.
*/
-import React, { useState, useEffect, useCallback, useMemo } from 'react';
-import {
- EuiFormRow,
- EuiSpacer,
- EuiInputPopover,
- EuiFieldText,
- EuiFieldTextProps,
- EuiSelectableOption,
- EuiFormControlLayout,
- keys,
- EuiHorizontalRule,
-} from '@elastic/eui';
-import {
- getFieldValidityAndErrorMessage,
- UseField,
- useFormContext,
- useFormData,
-} from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
-import { FormattedMessage } from '@kbn/i18n-react';
-import {
- ConnectorFormSchema,
- type ActionConnectorFieldsProps,
-} from '@kbn/triggers-actions-ui-plugin/public';
+import React from 'react';
+import { InferenceServices } from '@kbn/inference-endpoint-ui-common';
+import { type ActionConnectorFieldsProps } from '@kbn/triggers-actions-ui-plugin/public';
import { useKibana } from '@kbn/triggers-actions-ui-plugin/public';
-import { fieldValidators } from '@kbn/es-ui-shared-plugin/static/forms/helpers';
-import { ConfigEntryView } from '../../../common/dynamic_config/types';
-import { ServiceProviderKeys } from '../../../common/inference/constants';
-import { ConnectorConfigurationFormItems } from '../lib/dynamic_config/connector_configuration_form_items';
-import * as i18n from './translations';
-import { DEFAULT_TASK_TYPE } from './constants';
-import { SelectableProvider } from './providers/selectable';
-import { Config, Secrets } from './types';
-import { generateInferenceEndpointId, getTaskTypeOptions, TaskTypeOption } from './helpers';
import { useProviders } from './providers/get_providers';
-import { SERVICE_PROVIDERS } from './providers/render_service_provider/service_provider';
-import { AdditionalOptionsConnectorFields } from './additional_options_fields';
-import { getProviderConfigHiddenField, getProviderSecretsHiddenField } from './hidden_fields';
const InferenceAPIConnectorFields: React.FunctionComponent = ({
readOnly,
@@ -53,123 +21,9 @@ const InferenceAPIConnectorFields: React.FunctionComponent>({
- watch: [
- 'secrets.providerSecrets',
- 'config.taskType',
- 'config.inferenceId',
- 'config.provider',
- 'config.providerConfig',
- ],
- });
-
const { data: providers, isLoading } = useProviders(http, toasts);
- const [isProviderPopoverOpen, setProviderPopoverOpen] = useState(false);
-
- const [providerSchema, setProviderSchema] = useState([]);
- const [optionalProviderFormFields, setOptionalProviderFormFields] = useState(
- []
- );
- const [requiredProviderFormFields, setRequiredProviderFormFields] = useState(
- []
- );
-
- const [taskTypeOptions, setTaskTypeOptions] = useState([]);
- const [selectedTaskType, setSelectedTaskType] = useState(DEFAULT_TASK_TYPE);
- const [taskTypeFormFields] = useState([]);
-
- const handleProviderClosePopover = useCallback(() => {
- setProviderPopoverOpen(false);
- }, []);
-
- const handleProviderPopover = useCallback(() => {
- setProviderPopoverOpen((isOpen) => !isOpen);
- }, []);
-
- const handleProviderKeyboardOpen: EuiFieldTextProps['onKeyDown'] = useCallback((event: any) => {
- if (event.key === keys.ENTER) {
- setProviderPopoverOpen(true);
- }
- }, []);
-
- useEffect(() => {
- if (!isEdit && config && !config.inferenceId) {
- generateInferenceEndpointId(config, setFieldValue);
- }
- }, [isEdit, setFieldValue, config]);
-
- useEffect(() => {
- if (isSubmitting) {
- validateFields(['config.providerConfig']);
- validateFields(['secrets.providerSecrets']);
- }
- }, [isSubmitting, config, validateFields]);
-
- const onTaskTypeOptionsSelect = useCallback(
- (taskType: string) => {
- // Get task type settings
- setSelectedTaskType(taskType);
-
- updateFieldValues({
- config: {
- taskType,
- },
- });
- generateInferenceEndpointId({ ...config, taskType }, setFieldValue);
- },
- [config, setFieldValue, updateFieldValues]
- );
-
- const onProviderChange = useCallback(
- (provider?: string) => {
- const newProvider = providers?.find((p) => p.service === provider);
-
- // Update task types list available for the selected provider
- setTaskTypeOptions(getTaskTypeOptions(newProvider?.task_types ?? []));
- if (newProvider?.task_types && newProvider?.task_types.length > 0) {
- onTaskTypeOptionsSelect(newProvider?.task_types[0]);
- }
-
- // Update connector providerSchema
- const newProviderSchema = Object.keys(newProvider?.configurations ?? {}).map((k) => ({
- key: k,
- isValid: true,
- ...newProvider?.configurations[k],
- })) as ConfigEntryView[];
-
- setProviderSchema(newProviderSchema);
-
- const defaultProviderConfig: Record = {};
- const defaultProviderSecrets: Record = {};
-
- Object.keys(newProvider?.configurations ?? {}).forEach((k) => {
- if (!newProvider?.configurations[k].sensitive) {
- if (newProvider?.configurations[k] && !!newProvider?.configurations[k].default_value) {
- defaultProviderConfig[k] = newProvider.configurations[k].default_value;
- } else {
- defaultProviderConfig[k] = null;
- }
- } else {
- defaultProviderSecrets[k] = null;
- }
- });
-
- updateFieldValues({
- config: {
- provider: newProvider?.service,
- providerConfig: defaultProviderConfig,
- },
- secrets: {
- providerSecrets: defaultProviderSecrets,
- },
- });
- },
- [onTaskTypeOptionsSelect, providers, updateFieldValues]
- );
-
- useEffect(() => {
+ /* useEffect(() => {
if (config?.provider && isEdit) {
const newProvider = providers?.find((p) => p.service === config.provider);
// Update connector providerSchema
@@ -183,206 +37,9 @@ const InferenceAPIConnectorFields: React.FunctionComponent {
- // Set values from the provider secrets and config to the schema
- const existingConfiguration = providerSchema
- ? providerSchema.map((item: ConfigEntryView) => {
- const itemValue = item;
- itemValue.isValid = true;
- if (item.sensitive && secrets?.providerSecrets) {
- itemValue.value = secrets?.providerSecrets[item.key] as any;
- } else if (config?.providerConfig) {
- itemValue.value = config?.providerConfig[item.key] as any;
- }
- return itemValue;
- })
- : [];
-
- setOptionalProviderFormFields(existingConfiguration.filter((p) => !p.required && !p.sensitive));
- setRequiredProviderFormFields(existingConfiguration.filter((p) => p.required || p.sensitive));
- }, [config?.providerConfig, providerSchema, secrets]);
-
- const getProviderOptions = useCallback(() => {
- return providers?.map((p) => ({
- label: p.service,
- key: p.service,
- })) as EuiSelectableOption[];
- }, [providers]);
-
- const onSetProviderConfigEntry = useCallback(
- async (key: string, value: unknown) => {
- const entry: ConfigEntryView | undefined = providerSchema.find(
- (p: ConfigEntryView) => p.key === key
- );
- if (entry) {
- if (entry.sensitive) {
- if (!secrets.providerSecrets) {
- secrets.providerSecrets = {};
- }
- const newSecrets = { ...secrets.providerSecrets };
- newSecrets[key] = value;
- setFieldValue('secrets.providerSecrets', newSecrets);
- await validateFields(['secrets.providerSecrets']);
- } else {
- if (!config.providerConfig) {
- config.providerConfig = {};
- }
- const newConfig = { ...config.providerConfig };
- newConfig[key] = value;
- setFieldValue('config.providerConfig', newConfig);
- await validateFields(['config.providerConfig']);
- }
- }
- },
- [config, providerSchema, secrets, setFieldValue, validateFields]
- );
-
- const onClearProvider = useCallback(() => {
- onProviderChange();
- setFieldValue('config.taskType', '');
- setFieldValue('config.provider', '');
- }, [onProviderChange, setFieldValue]);
-
- const providerIcon = useMemo(
- () =>
- Object.keys(SERVICE_PROVIDERS).includes(config?.provider)
- ? SERVICE_PROVIDERS[config?.provider as ServiceProviderKeys].icon
- : undefined,
- [config?.provider]
- );
-
- const providerName = useMemo(
- () =>
- Object.keys(SERVICE_PROVIDERS).includes(config?.provider)
- ? SERVICE_PROVIDERS[config?.provider as ServiceProviderKeys].name
- : config?.provider,
- [config?.provider]
- );
-
- const providerSuperSelect = useCallback(
- (isInvalid: boolean) => (
-
-
-
- ),
- [
- isEdit,
- readOnly,
- onClearProvider,
- config?.provider,
- providerIcon,
- handleProviderPopover,
- handleProviderKeyboardOpen,
- providerName,
- isProviderPopoverOpen,
- ]
- );
+*/
- return (
- <>
-
- {(field) => {
- const { isInvalid, errorMessage } = getFieldValidityAndErrorMessage(field);
- const selectInput = providerSuperSelect(isInvalid);
- return (
-
- }
- isInvalid={isInvalid}
- error={errorMessage}
- >
-
-
-
-
- );
- }}
-
- {config?.provider ? (
- <>
-
-
-
-
-
-
- {getProviderSecretsHiddenField(
- providerSchema,
- setRequiredProviderFormFields,
- isSubmitting
- )}
- {getProviderConfigHiddenField(
- providerSchema,
- setRequiredProviderFormFields,
- isSubmitting
- )}
- >
- ) : null}
- >
- );
+ return !isLoading ? : null;
};
// eslint-disable-next-line import/no-default-export
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/alibaba_cloud.svg b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/alibaba_cloud.svg
deleted file mode 100644
index 1ae552d509c3a..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/alibaba_cloud.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/amazon_bedrock.svg b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/amazon_bedrock.svg
deleted file mode 100644
index f8815d4f75ec5..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/amazon_bedrock.svg
+++ /dev/null
@@ -1,11 +0,0 @@
-
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/anthropic.svg b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/anthropic.svg
deleted file mode 100644
index c361cda86a7df..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/anthropic.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/azure_ai_studio.svg b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/azure_ai_studio.svg
deleted file mode 100644
index 405e182a10394..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/azure_ai_studio.svg
+++ /dev/null
@@ -1,44 +0,0 @@
-
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/azure_open_ai.svg b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/azure_open_ai.svg
deleted file mode 100644
index 122c0c65af13c..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/azure_open_ai.svg
+++ /dev/null
@@ -1,9 +0,0 @@
-
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/cohere.svg b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/cohere.svg
deleted file mode 100644
index 69953809fec35..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/cohere.svg
+++ /dev/null
@@ -1,9 +0,0 @@
-
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/elastic.svg b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/elastic.svg
deleted file mode 100644
index e763c2e2f2ab6..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/elastic.svg
+++ /dev/null
@@ -1,16 +0,0 @@
-
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/google_ai_studio.svg b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/google_ai_studio.svg
deleted file mode 100644
index b6e34ae15c9e4..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/google_ai_studio.svg
+++ /dev/null
@@ -1,6 +0,0 @@
-
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/hugging_face.svg b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/hugging_face.svg
deleted file mode 100644
index 87ac70c5a18f4..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/hugging_face.svg
+++ /dev/null
@@ -1,10 +0,0 @@
-
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/ibm_watsonx.svg b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/ibm_watsonx.svg
deleted file mode 100644
index 5883eff3884d6..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/ibm_watsonx.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/mistral.svg b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/mistral.svg
deleted file mode 100644
index f62258a327594..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/mistral.svg
+++ /dev/null
@@ -1,34 +0,0 @@
-
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/open_ai.svg b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/open_ai.svg
deleted file mode 100644
index 9ddc8f8fd63b8..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/assets/images/open_ai.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/get_providers.test.tsx b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/get_providers.test.tsx
deleted file mode 100644
index 7ab81ee062638..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/get_providers.test.tsx
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-import React from 'react';
-import * as ReactQuery from '@tanstack/react-query';
-import { waitFor, renderHook } from '@testing-library/react';
-import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
-import { httpServiceMock, notificationServiceMock } from '@kbn/core/public/mocks';
-import { useProviders } from './get_providers';
-
-const http = httpServiceMock.createStartContract();
-const toasts = notificationServiceMock.createStartContract();
-const useQuerySpy = jest.spyOn(ReactQuery, 'useQuery');
-
-beforeEach(() => jest.resetAllMocks());
-
-const { getProviders } = jest.requireMock('./get_providers');
-
-const queryClient = new QueryClient();
-
-const wrapper = ({ children }: { children: React.ReactNode }) => (
- {children}
-);
-
-describe('useProviders', () => {
- beforeEach(() => {
- jest.clearAllMocks();
- });
-
- it('should call useQuery', async () => {
- renderHook(() => useProviders(http, toasts.toasts), {
- wrapper,
- });
-
- await waitFor(() => {
- return expect(useQuerySpy).toBeCalled();
- });
- });
-
- it('should return isError = true if api fails', async () => {
- getProviders.mockResolvedValue('This is an error.');
-
- renderHook(() => useProviders(http, toasts.toasts), {
- wrapper,
- });
-
- await waitFor(() => expect(useQuerySpy).toHaveBeenCalled());
- });
-});
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/render_service_provider/service_provider.test.tsx b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/render_service_provider/service_provider.test.tsx
deleted file mode 100644
index 84a32286b7532..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/render_service_provider/service_provider.test.tsx
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import { render, screen } from '@testing-library/react';
-import React from 'react';
-import { ServiceProviderIcon, ServiceProviderName } from './service_provider';
-import { ServiceProviderKeys } from '../../../../../common/inference/constants';
-
-jest.mock('../assets/images/elastic.svg', () => 'elasticIcon.svg');
-jest.mock('../assets/images/hugging_face.svg', () => 'huggingFaceIcon.svg');
-jest.mock('../assets/images/cohere.svg', () => 'cohereIcon.svg');
-jest.mock('../assets/images/open_ai.svg', () => 'openAIIcon.svg');
-
-describe('ServiceProviderIcon component', () => {
- it('renders Hugging Face icon and name when providerKey is hugging_face', () => {
- render();
- const icon = screen.getByTestId('icon-service-provider-hugging_face');
- expect(icon).toBeInTheDocument();
- });
-
- it('renders Open AI icon and name when providerKey is openai', () => {
- render();
- const icon = screen.getByTestId('icon-service-provider-openai');
- expect(icon).toBeInTheDocument();
- });
-});
-
-describe('ServiceProviderName component', () => {
- it('renders Hugging Face icon and name when providerKey is hugging_face', () => {
- render();
- expect(screen.getByText('Hugging Face')).toBeInTheDocument();
- });
-
- it('renders Open AI icon and name when providerKey is openai', () => {
- render();
- expect(screen.getByText('OpenAI')).toBeInTheDocument();
- });
-});
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/render_service_provider/service_provider.tsx b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/render_service_provider/service_provider.tsx
deleted file mode 100644
index 5eb8518a5ea15..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/render_service_provider/service_provider.tsx
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import { EuiHighlight, EuiIcon } from '@elastic/eui';
-import React from 'react';
-import { ServiceProviderKeys } from '../../../../../common/inference/constants';
-import elasticIcon from '../assets/images/elastic.svg';
-import huggingFaceIcon from '../assets/images/hugging_face.svg';
-import cohereIcon from '../assets/images/cohere.svg';
-import openAIIcon from '../assets/images/open_ai.svg';
-import azureAIStudioIcon from '../assets/images/azure_ai_studio.svg';
-import azureOpenAIIcon from '../assets/images/azure_open_ai.svg';
-import googleAIStudioIcon from '../assets/images/google_ai_studio.svg';
-import mistralIcon from '../assets/images/mistral.svg';
-import amazonBedrockIcon from '../assets/images/amazon_bedrock.svg';
-import anthropicIcon from '../assets/images/anthropic.svg';
-import alibabaCloudIcon from '../assets/images/alibaba_cloud.svg';
-import ibmWatsonxIcon from '../assets/images/ibm_watsonx.svg';
-
-interface ServiceProviderProps {
- providerKey: ServiceProviderKeys;
- searchValue?: string;
-}
-
-export type ProviderSolution = 'Observability' | 'Security' | 'Search';
-
-interface ServiceProviderRecord {
- icon: string;
- name: string;
- solutions: ProviderSolution[];
-}
-
-export const SERVICE_PROVIDERS: Record = {
- [ServiceProviderKeys.amazonbedrock]: {
- icon: amazonBedrockIcon,
- name: 'Amazon Bedrock',
- solutions: ['Observability', 'Security', 'Search'],
- },
- [ServiceProviderKeys.azureaistudio]: {
- icon: azureAIStudioIcon,
- name: 'Azure AI Studio',
- solutions: ['Search'],
- },
- [ServiceProviderKeys.azureopenai]: {
- icon: azureOpenAIIcon,
- name: 'Azure OpenAI',
- solutions: ['Observability', 'Security', 'Search'],
- },
- [ServiceProviderKeys.anthropic]: {
- icon: anthropicIcon,
- name: 'Anthropic',
- solutions: ['Search'],
- },
- [ServiceProviderKeys.cohere]: {
- icon: cohereIcon,
- name: 'Cohere',
- solutions: ['Search'],
- },
- [ServiceProviderKeys.elasticsearch]: {
- icon: elasticIcon,
- name: 'Elasticsearch',
- solutions: ['Search'],
- },
- [ServiceProviderKeys.googleaistudio]: {
- icon: googleAIStudioIcon,
- name: 'Google AI Studio',
- solutions: ['Search'],
- },
- [ServiceProviderKeys.googlevertexai]: {
- icon: googleAIStudioIcon,
- name: 'Google Vertex AI',
- solutions: ['Observability', 'Security', 'Search'],
- },
- [ServiceProviderKeys.hugging_face]: {
- icon: huggingFaceIcon,
- name: 'Hugging Face',
- solutions: ['Search'],
- },
- [ServiceProviderKeys.mistral]: {
- icon: mistralIcon,
- name: 'Mistral',
- solutions: ['Search'],
- },
- [ServiceProviderKeys.openai]: {
- icon: openAIIcon,
- name: 'OpenAI',
- solutions: ['Observability', 'Security', 'Search'],
- },
- [ServiceProviderKeys['alibabacloud-ai-search']]: {
- icon: alibabaCloudIcon,
- name: 'AlibabaCloud AI Search',
- solutions: ['Search'],
- },
- [ServiceProviderKeys.watsonxai]: {
- icon: ibmWatsonxIcon,
- name: 'IBM Watsonx',
- solutions: ['Search'],
- },
-};
-
-export const ServiceProviderIcon: React.FC = ({ providerKey }) => {
- const provider = SERVICE_PROVIDERS[providerKey];
-
- return provider ? (
-
- ) : null;
-};
-
-export const ServiceProviderName: React.FC = ({
- providerKey,
- searchValue,
-}) => {
- const provider = SERVICE_PROVIDERS[providerKey];
-
- return provider ? (
- {provider.name}
- ) : (
- {providerKey}
- );
-};
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/selectable/index.test.tsx b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/selectable/index.test.tsx
deleted file mode 100644
index f83d4bcd9ea4c..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/selectable/index.test.tsx
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import type { EuiSelectableProps } from '@elastic/eui';
-import React from 'react';
-import type { ShallowWrapper } from 'enzyme';
-import { shallow } from 'enzyme';
-
-import { SelectableProvider } from '.';
-
-describe('SelectableProvider', () => {
- const props = {
- isLoading: false,
- onClosePopover: jest.fn(),
- onProviderChange: jest.fn(),
- getSelectableOptions: jest.fn().mockReturnValue([]),
- };
-
- describe('should render', () => {
- let wrapper: ShallowWrapper;
-
- describe('provider', () => {
- beforeAll(() => {
- wrapper = shallow();
- });
-
- afterAll(() => {
- jest.clearAllMocks();
- });
-
- test('render placeholder', () => {
- const searchProps: EuiSelectableProps['searchProps'] = wrapper
- .find('[data-test-subj="selectable-provider-input"]')
- .prop('searchProps');
- expect(searchProps?.placeholder).toEqual('Search');
- });
- });
-
- describe('template', () => {
- beforeAll(() => {
- wrapper = shallow();
- });
-
- afterAll(() => {
- jest.clearAllMocks();
- });
-
- test('render placeholder', () => {
- const searchProps: EuiSelectableProps['searchProps'] = wrapper
- .find('[data-test-subj="selectable-provider-input"]')
- .prop('searchProps');
- expect(searchProps?.placeholder).toEqual('Search');
- });
- });
- });
-});
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/selectable/index.tsx b/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/selectable/index.tsx
deleted file mode 100644
index fc31c9dd6c4f7..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/inference/providers/selectable/index.tsx
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import type { EuiSelectableOption, EuiSelectableProps } from '@elastic/eui';
-import { EuiSelectable, EuiFlexGroup, EuiFlexItem, EuiBadge } from '@elastic/eui';
-import React, { memo, useCallback, useMemo, useState } from 'react';
-import { i18n } from '@kbn/i18n';
-import { ServiceProviderKeys } from '../../../../../common/inference/constants';
-import {
- ProviderSolution,
- SERVICE_PROVIDERS,
- ServiceProviderIcon,
- ServiceProviderName,
-} from '../render_service_provider/service_provider';
-
-/**
- * Modifies options by creating new property `providerTitle`(with value of `title`), and by setting `title` to undefined.
- * Thus prevents appearing default browser tooltip on option hover (attribute `title` that gets rendered on li element)
- *
- * @param {EuiSelectableOption[]} options
- * @returns {EuiSelectableOption[]} modified options
- */
-
-export interface SelectableProviderProps {
- isLoading: boolean;
- getSelectableOptions: (searchProviderValue?: string) => EuiSelectableOption[];
- onClosePopover: () => void;
- onProviderChange: (provider?: string) => void;
-}
-
-const SelectableProviderComponent: React.FC = ({
- isLoading,
- getSelectableOptions,
- onClosePopover,
- onProviderChange,
-}) => {
- const [searchProviderValue, setSearchProviderValue] = useState('');
- const onSearchProvider = useCallback(
- (val: string) => {
- setSearchProviderValue(val);
- },
- [setSearchProviderValue]
- );
-
- const renderProviderOption = useCallback>(
- (option, searchValue) => {
- const provider = Object.keys(SERVICE_PROVIDERS).includes(option.label)
- ? SERVICE_PROVIDERS[option.label as ServiceProviderKeys]
- : undefined;
-
- const supportedBySolutions = (provider &&
- provider.solutions.map((solution) => (
-
- {solution}
-
- ))) ?? (
-
- {'Search' as ProviderSolution}
-
- );
- return (
-
-
-
-
-
-
-
-
-
-
-
-
-
- {supportedBySolutions}
-
-
-
- );
- },
- []
- );
-
- const handleProviderChange = useCallback>(
- (options) => {
- const selectedProvider = options.filter((option) => option.checked === 'on');
- if (selectedProvider != null && selectedProvider.length > 0) {
- onProviderChange(selectedProvider[0].label);
- }
- onClosePopover();
- },
- [onClosePopover, onProviderChange]
- );
-
- const EuiSelectableContent = useCallback>(
- (list, search) => (
- <>
- {search}
- {list}
- >
- ),
- []
- );
-
- const searchProps: EuiSelectableProps['searchProps'] = useMemo(
- () => ({
- 'data-test-subj': 'provider-super-select-search-box',
- placeholder: i18n.translate(
- 'xpack.stackConnectors.components.inference.selectable.providerSearch',
- {
- defaultMessage: 'Search',
- }
- ),
- onSearch: onSearchProvider,
- incremental: false,
- compressed: true,
- fullWidth: true,
- }),
- [onSearchProvider]
- );
-
- return (
-
- {EuiSelectableContent}
-
- );
-};
-
-export const SelectableProvider = memo(SelectableProviderComponent);
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_field.tsx b/x-pack/plugins/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_field.tsx
deleted file mode 100644
index b6dc0972492bf..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_field.tsx
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import React, { useEffect, useState } from 'react';
-
-import {
- EuiAccordion,
- EuiFieldText,
- EuiFieldPassword,
- EuiSwitch,
- EuiTextArea,
- EuiFieldNumber,
-} from '@elastic/eui';
-
-import { isEmpty } from 'lodash/fp';
-import { ConfigEntryView, FieldType } from '../../../../common/dynamic_config/types';
-import {
- ensureBooleanType,
- ensureCorrectTyping,
- ensureStringType,
-} from './connector_configuration_utils';
-
-interface ConnectorConfigurationFieldProps {
- configEntry: ConfigEntryView;
- isLoading: boolean;
- setConfigValue: (value: number | string | boolean | null) => void;
-}
-
-interface ConfigInputFieldProps {
- configEntry: ConfigEntryView;
- isLoading: boolean;
- validateAndSetConfigValue: (value: string | boolean) => void;
-}
-export const ConfigInputField: React.FC = ({
- configEntry,
- isLoading,
- validateAndSetConfigValue,
-}) => {
- // eslint-disable-next-line @typescript-eslint/naming-convention
- const { isValid, value, default_value, key } = configEntry;
- const [innerValue, setInnerValue] = useState(
- !value || value.toString().length === 0 ? default_value : value
- );
-
- useEffect(() => {
- setInnerValue(!value || value.toString().length === 0 ? default_value : value);
- }, [default_value, value]);
- return (
- {
- setInnerValue(event.target.value);
- validateAndSetConfigValue(event.target.value);
- }}
- />
- );
-};
-
-export const ConfigSwitchField: React.FC = ({
- configEntry,
- isLoading,
- validateAndSetConfigValue,
-}) => {
- // eslint-disable-next-line @typescript-eslint/naming-convention
- const { label, value, default_value, key } = configEntry;
- const [innerValue, setInnerValue] = useState(value ?? default_value);
- useEffect(() => {
- setInnerValue(value ?? default_value);
- }, [default_value, value]);
- return (
- {label}}
- onChange={(event) => {
- setInnerValue(event.target.checked);
- validateAndSetConfigValue(event.target.checked);
- }}
- />
- );
-};
-
-export const ConfigInputTextArea: React.FC = ({
- isLoading,
- configEntry,
- validateAndSetConfigValue,
-}) => {
- // eslint-disable-next-line @typescript-eslint/naming-convention
- const { isValid, value, default_value, key } = configEntry;
- const [innerValue, setInnerValue] = useState(value ?? default_value);
- useEffect(() => {
- setInnerValue(value ?? default_value);
- }, [default_value, value]);
- return (
- {
- setInnerValue(event.target.value);
- validateAndSetConfigValue(event.target.value);
- }}
- />
- );
-};
-
-export const ConfigNumberField: React.FC = ({
- configEntry,
- isLoading,
- validateAndSetConfigValue,
-}) => {
- // eslint-disable-next-line @typescript-eslint/naming-convention
- const { isValid, value, default_value, key } = configEntry;
- const [innerValue, setInnerValue] = useState(value ?? default_value);
- useEffect(() => {
- setInnerValue(!value || value.toString().length === 0 ? default_value : value);
- }, [default_value, value]);
- return (
- {
- const newValue = isEmpty(event.target.value) ? '0' : event.target.value;
- setInnerValue(newValue);
- validateAndSetConfigValue(newValue);
- }}
- />
- );
-};
-
-export const ConfigSensitiveTextArea: React.FC = ({
- isLoading,
- configEntry,
- validateAndSetConfigValue,
-}) => {
- const { key, label } = configEntry;
- return (
- {label}}>
-
-
- );
-};
-
-export const ConfigInputPassword: React.FC = ({
- isLoading,
- configEntry,
- validateAndSetConfigValue,
-}) => {
- const { value, key } = configEntry;
- const [innerValue, setInnerValue] = useState(value ?? null);
- useEffect(() => {
- setInnerValue(value ?? null);
- }, [value]);
- return (
- <>
- {
- setInnerValue(event.target.value);
- validateAndSetConfigValue(event.target.value);
- }}
- />
- >
- );
-};
-
-export const ConnectorConfigurationField: React.FC = ({
- configEntry,
- isLoading,
- setConfigValue,
-}) => {
- const validateAndSetConfigValue = (value: number | string | boolean) => {
- setConfigValue(ensureCorrectTyping(configEntry.type, value));
- };
-
- const { key, type, sensitive } = configEntry;
-
- switch (type) {
- case FieldType.INTEGER:
- return (
-
- );
-
- case FieldType.BOOLEAN:
- return (
-
- );
-
- default:
- return sensitive ? (
-
- ) : (
-
- );
- }
-};
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_form_items.tsx b/x-pack/plugins/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_form_items.tsx
deleted file mode 100644
index ed564c247394c..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_form_items.tsx
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import React from 'react';
-
-import {
- EuiCallOut,
- EuiFlexGroup,
- EuiFlexItem,
- EuiFormRow,
- EuiSpacer,
- EuiText,
-} from '@elastic/eui';
-
-import { i18n } from '@kbn/i18n';
-import { ConfigEntryView } from '../../../../common/dynamic_config/types';
-import { ConnectorConfigurationField } from './connector_configuration_field';
-
-interface ConnectorConfigurationFormItemsProps {
- isLoading: boolean;
- items: ConfigEntryView[];
- setConfigEntry: (key: string, value: string | number | boolean | null) => void;
- direction?: 'column' | 'row' | 'rowReverse' | 'columnReverse' | undefined;
- itemsGrow?: boolean;
-}
-
-export const ConnectorConfigurationFormItems: React.FC = ({
- isLoading,
- items,
- setConfigEntry,
- direction,
-}) => {
- return (
-
- {items.map((configEntry) => {
- const { key, isValid, label, sensitive, description, validationErrors, required } =
- configEntry;
-
- const helpText = description;
- // toggle and sensitive textarea labels go next to the element, not in the row
- const rowLabel = description ? (
-
-
- {label}
-
-
- ) : (
- {label}
- );
-
- const optionalLabel = !required ? (
-
- {i18n.translate('xpack.stackConnectors.components.inference.config.optionalValue', {
- defaultMessage: 'Optional',
- })}
-
- ) : undefined;
-
- return (
-
-
- {
- setConfigEntry(key, value);
- }}
- />
-
- {sensitive ? (
- <>
-
-
- >
- ) : null}
-
- );
- })}
-
- );
-};
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_utils.ts b/x-pack/plugins/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_utils.ts
deleted file mode 100644
index 5e78903746aee..0000000000000
--- a/x-pack/plugins/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_utils.ts
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import { ConfigProperties, FieldType } from '../../../../common/dynamic_config/types';
-
-export type ConnectorConfigEntry = ConfigProperties & { key: string };
-
-export const validIntInput = (value: string | number | boolean | null): boolean => {
- // reject non integers (including x.0 floats), but don't validate if empty
- return (value !== null || value !== '') &&
- (isNaN(Number(value)) ||
- !Number.isSafeInteger(Number(value)) ||
- ensureStringType(value).indexOf('.') >= 0)
- ? false
- : true;
-};
-
-export const ensureCorrectTyping = (
- type: FieldType,
- value: string | number | boolean | null
-): string | number | boolean | null => {
- switch (type) {
- case FieldType.INTEGER:
- return validIntInput(value) ? ensureIntType(value) : value;
- case FieldType.BOOLEAN:
- return ensureBooleanType(value);
- default:
- return ensureStringType(value);
- }
-};
-
-export const ensureStringType = (value: string | number | boolean | null): string => {
- return value !== null ? String(value) : '';
-};
-
-export const ensureIntType = (value: string | number | boolean | null): number | null => {
- // int is null-safe to prevent empty values from becoming zeroes
- if (value === null || value === '') {
- return null;
- }
-
- return parseInt(String(value), 10);
-};
-
-export const ensureBooleanType = (value: string | number | boolean | null): boolean => {
- return Boolean(value);
-};
From 6499af2a384a24d5f001dcf6aaf67a62fd6700b7 Mon Sep 17 00:00:00 2001
From: YulNaumenko
Date: Mon, 30 Dec 2024 12:27:11 -0800
Subject: [PATCH 08/18] cleanup
---
.../connector_configuration_field.tsx | 232 ------
.../connector_configuration_form_items.tsx | 90 ---
.../connector_configuration_utils.ts | 49 --
.../add_inference_flyout_wrapper.test.tsx | 730 ------------------
.../add_inference_flyout_wrapper.tsx | 66 --
.../inference_form.tsx | 56 --
.../add_inference_endpoints/translations.ts | 36 -
7 files changed, 1259 deletions(-)
delete mode 100644 x-pack/platform/plugins/shared/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_field.tsx
delete mode 100644 x-pack/platform/plugins/shared/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_form_items.tsx
delete mode 100644 x-pack/platform/plugins/shared/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_utils.ts
delete mode 100644 x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.test.tsx
delete mode 100644 x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.tsx
delete mode 100644 x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx
delete mode 100644 x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/translations.ts
diff --git a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_field.tsx b/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_field.tsx
deleted file mode 100644
index fb278b826146d..0000000000000
--- a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_field.tsx
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import React, { useEffect, useState } from 'react';
-
-import {
- EuiAccordion,
- EuiFieldText,
- EuiFieldPassword,
- EuiSwitch,
- EuiTextArea,
- EuiFieldNumber,
-} from '@elastic/eui';
-
-import { isEmpty } from 'lodash/fp';
-import { ConfigEntryView, FieldType } from '../../types/types';
-import { ensureBooleanType, ensureCorrectTyping, ensureStringType } from './configuration_utils';
-
-interface ConfigurationFieldProps {
- configEntry: ConfigEntryView;
- isLoading: boolean;
- setConfigValue: (value: number | string | boolean | null) => void;
-}
-
-interface ConfigInputFieldProps {
- configEntry: ConfigEntryView;
- isLoading: boolean;
- validateAndSetConfigValue: (value: string | boolean) => void;
-}
-export const ConfigInputField: React.FC = ({
- configEntry,
- isLoading,
- validateAndSetConfigValue,
-}) => {
- // eslint-disable-next-line @typescript-eslint/naming-convention
- const { isValid, value, default_value, key } = configEntry;
- const [innerValue, setInnerValue] = useState(
- !value || value.toString().length === 0 ? default_value : value
- );
-
- useEffect(() => {
- setInnerValue(!value || value.toString().length === 0 ? default_value : value);
- }, [default_value, value]);
- return (
- {
- setInnerValue(event.target.value);
- validateAndSetConfigValue(event.target.value);
- }}
- />
- );
-};
-
-export const ConfigSwitchField: React.FC = ({
- configEntry,
- isLoading,
- validateAndSetConfigValue,
-}) => {
- // eslint-disable-next-line @typescript-eslint/naming-convention
- const { label, value, default_value, key } = configEntry;
- const [innerValue, setInnerValue] = useState(value ?? default_value);
- useEffect(() => {
- setInnerValue(value ?? default_value);
- }, [default_value, value]);
- return (
- {label}}
- onChange={(event) => {
- setInnerValue(event.target.checked);
- validateAndSetConfigValue(event.target.checked);
- }}
- />
- );
-};
-
-export const ConfigInputTextArea: React.FC = ({
- isLoading,
- configEntry,
- validateAndSetConfigValue,
-}) => {
- // eslint-disable-next-line @typescript-eslint/naming-convention
- const { isValid, value, default_value, key } = configEntry;
- const [innerValue, setInnerValue] = useState(value ?? default_value);
- useEffect(() => {
- setInnerValue(value ?? '');
- }, [default_value, value]);
- return (
- {
- setInnerValue(event.target.value);
- validateAndSetConfigValue(event.target.value);
- }}
- />
- );
-};
-
-export const ConfigNumberField: React.FC = ({
- configEntry,
- isLoading,
- validateAndSetConfigValue,
-}) => {
- // eslint-disable-next-line @typescript-eslint/naming-convention
- const { isValid, value, default_value, key } = configEntry;
- const [innerValue, setInnerValue] = useState(value ?? default_value);
- useEffect(() => {
- setInnerValue(!value || value.toString().length === 0 ? default_value : value);
- }, [default_value, value]);
- return (
- {
- const newValue = isEmpty(event.target.value) ? '0' : event.target.value;
- setInnerValue(newValue);
- validateAndSetConfigValue(newValue);
- }}
- />
- );
-};
-
-export const ConfigSensitiveTextArea: React.FC = ({
- isLoading,
- configEntry,
- validateAndSetConfigValue,
-}) => {
- const { key, label } = configEntry;
- return (
- {label}}>
-
-
- );
-};
-
-export const ConfigInputPassword: React.FC = ({
- isLoading,
- configEntry,
- validateAndSetConfigValue,
-}) => {
- const { value, key } = configEntry;
- const [innerValue, setInnerValue] = useState(value ?? null);
- useEffect(() => {
- setInnerValue(value ?? null);
- }, [value]);
- return (
- <>
- {
- setInnerValue(event.target.value);
- validateAndSetConfigValue(event.target.value);
- }}
- />
- >
- );
-};
-
-export const ConfigurationField: React.FC = ({
- configEntry,
- isLoading,
- setConfigValue,
-}) => {
- const validateAndSetConfigValue = (value: number | string | boolean) => {
- setConfigValue(ensureCorrectTyping(configEntry.type, value));
- };
-
- const { key, type, sensitive } = configEntry;
-
- switch (type) {
- case FieldType.INTEGER:
- return (
-
- );
-
- case FieldType.BOOLEAN:
- return (
-
- );
-
- default:
- return sensitive ? (
-
- ) : (
-
- );
- }
-};
diff --git a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_form_items.tsx b/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_form_items.tsx
deleted file mode 100644
index 850f574c2fe2b..0000000000000
--- a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_form_items.tsx
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import React from 'react';
-
-import {
- EuiCallOut,
- EuiFlexGroup,
- EuiFlexItem,
- EuiFormRow,
- EuiSpacer,
- EuiText,
-} from '@elastic/eui';
-
-import { ConfigEntryView } from '../../types/types';
-import { ConfigurationField } from './configuration_field';
-import * as i18n from '../../translations';
-
-interface ConfigurationFormItemsProps {
- isLoading: boolean;
- items: ConfigEntryView[];
- setConfigEntry: (key: string, value: string | number | boolean | null) => void;
- direction?: 'column' | 'row' | 'rowReverse' | 'columnReverse' | undefined;
-}
-
-export const ConfigurationFormItems: React.FC = ({
- isLoading,
- items,
- setConfigEntry,
- direction,
-}) => {
- return (
-
- {items.map((configEntry) => {
- const { key, isValid, label, sensitive, description, validationErrors, required } =
- configEntry;
-
- const helpText = description;
- // toggle and sensitive textarea labels go next to the element, not in the row
- const rowLabel = description ? (
-
-
- {label}
-
-
- ) : (
- {label}
- );
-
- const optionalLabel = !required ? (
-
- {i18n.OPTIONALTEXT}
-
- ) : undefined;
-
- return (
-
-
- {
- setConfigEntry(key, value);
- }}
- />
-
- {sensitive ? (
- <>
-
-
- >
- ) : null}
-
- );
- })}
-
- );
-};
diff --git a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_utils.ts b/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_utils.ts
deleted file mode 100644
index 45e886b368443..0000000000000
--- a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/lib/dynamic_config/connector_configuration_utils.ts
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import { FieldType } from '../../types/types';
-
-export const validIntInput = (value: string | number | boolean | null): boolean => {
- // reject non integers (including x.0 floats), but don't validate if empty
- return (value !== null || value !== '') &&
- (isNaN(Number(value)) ||
- !Number.isSafeInteger(Number(value)) ||
- ensureStringType(value).indexOf('.') >= 0)
- ? false
- : true;
-};
-
-export const ensureCorrectTyping = (
- type: FieldType,
- value: string | number | boolean | null
-): string | number | boolean | null => {
- switch (type) {
- case FieldType.INTEGER:
- return validIntInput(value) ? ensureIntType(value) : value;
- case FieldType.BOOLEAN:
- return ensureBooleanType(value);
- default:
- return ensureStringType(value);
- }
-};
-
-export const ensureStringType = (value: string | number | boolean | null): string => {
- return value !== null ? String(value) : '';
-};
-
-export const ensureIntType = (value: string | number | boolean | null): number | null => {
- // int is null-safe to prevent empty values from becoming zeroes
- if (value === null || value === '') {
- return null;
- }
-
- return parseInt(String(value), 10);
-};
-
-export const ensureBooleanType = (value: string | number | boolean | null): boolean => {
- return Boolean(value);
-};
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.test.tsx b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.test.tsx
deleted file mode 100644
index 784170b5857c3..0000000000000
--- a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.test.tsx
+++ /dev/null
@@ -1,730 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import { render, screen } from '@testing-library/react';
-import userEvent from '@testing-library/user-event';
-import { FieldType } from '@kbn/search-connectors/types';
-import { Form, useForm } from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
-import React from 'react';
-import { I18nProvider } from '@kbn/i18n-react';
-
-import { AddInferenceFlyoutWrapper } from './add_inference_flyout_wrapper';
-
-const mockProviders = [
- {
- service: 'cohere',
- name: 'Cohere',
- task_types: ['text_embedding', 'rerank', 'completion'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- },
- },
- {
- service: 'elastic',
- name: 'Elastic',
- task_types: ['sparse_embedding'],
- configurations: {
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- max_input_tokens: {
- default_value: null,
- description: 'Allows you to specify the maximum number of tokens per input.',
- label: 'Maximum Input Tokens',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- },
- },
- {
- service: 'watsonxai',
- name: 'IBM Watsonx',
- task_types: ['text_embedding'],
- configurations: {
- project_id: {
- default_value: null,
- description: '',
- label: 'Project ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- model_id: {
- default_value: null,
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- api_version: {
- default_value: null,
- description: 'The IBM Watsonx API version ID to use.',
- label: 'API Version',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- max_input_tokens: {
- default_value: null,
- description: 'Allows you to specify the maximum number of tokens per input.',
- label: 'Maximum Input Tokens',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- url: {
- default_value: null,
- description: '',
- label: 'URL',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'azureaistudio',
- name: 'Azure AI Studio',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- endpoint_type: {
- default_value: null,
- description: 'Specifies the type of endpoint that is used in your model deployment.',
- label: 'Endpoint Type',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- provider: {
- default_value: null,
- description: 'The model provider for your deployment.',
- label: 'Provider',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- target: {
- default_value: null,
- description: 'The target URL of your Azure AI Studio model deployment.',
- label: 'Target',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'hugging_face',
- name: 'Hugging Face',
- task_types: ['text_embedding', 'sparse_embedding'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- url: {
- default_value: 'https://api.openai.com/v1/embeddings',
- description: 'The URL endpoint to use for the requests.',
- label: 'URL',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'amazonbedrock',
- name: 'Amazon Bedrock',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- secret_key: {
- default_value: null,
- description: 'A valid AWS secret key that is paired with the access_key.',
- label: 'Secret Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- provider: {
- default_value: null,
- description: 'The model provider for your deployment.',
- label: 'Provider',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- access_key: {
- default_value: null,
- description: 'A valid AWS access key that has permissions to use Amazon Bedrock.',
- label: 'Access Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- model: {
- default_value: null,
- description: 'The base model ID or an ARN to a custom model based on a foundational model.',
- label: 'Model',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description:
- 'By default, the amazonbedrock service sets the number of requests allowed per minute to 240.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- region: {
- default_value: null,
- description: 'The region that your model or ARN is deployed in.',
- label: 'Region',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'anthropic',
- name: 'Anthropic',
- task_types: ['completion'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description:
- 'By default, the anthropic service sets the number of requests allowed per minute to 50.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'googleaistudio',
- name: 'Google AI Studio',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: "ID of the LLM you're using.",
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'elasticsearch',
- name: 'Elasticsearch',
- task_types: ['text_embedding', 'sparse_embedding', 'rerank'],
- configurations: {
- num_allocations: {
- default_value: 1,
- description:
- 'The total number of allocations this model is assigned across machine learning nodes.',
- label: 'Number Allocations',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- num_threads: {
- default_value: 2,
- description: 'Sets the number of threads used by each model allocation during inference.',
- label: 'Number Threads',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: '.multilingual-e5-small',
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'openai',
- name: 'OpenAI',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- api_key: {
- default_value: null,
- description:
- 'The OpenAI API authentication key. For more details about generating OpenAI API keys, refer to the https://platform.openai.com/account/api-keys.',
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- organization_id: {
- default_value: null,
- description: 'The unique identifier of your organization.',
- label: 'Organization ID',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description:
- 'Default number of requests allowed per minute. For text_embedding is 3000. For completion is 500.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- url: {
- default_value: 'https://api.openai.com/v1/chat/completions',
- description:
- 'The OpenAI API endpoint URL. For more information on the URL, refer to the https://platform.openai.com/docs/api-reference.',
- label: 'URL',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'azureopenai',
- name: 'Azure OpenAI',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- entra_id: {
- default_value: null,
- description: 'You must provide either an API key or an Entra ID.',
- label: 'Entra ID',
- required: false,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description:
- 'The azureopenai service sets a default number of requests allowed per minute depending on the task type.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- deployment_id: {
- default_value: null,
- description: 'The deployment name of your deployed models.',
- label: 'Deployment ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- resource_name: {
- default_value: null,
- description: 'The name of your Azure OpenAI resource.',
- label: 'Resource Name',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- api_version: {
- default_value: null,
- description: 'The Azure API version ID to use.',
- label: 'API Version',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'mistral',
- name: 'Mistral',
- task_types: ['text_embedding'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- model: {
- default_value: null,
- description:
- 'Refer to the Mistral models documentation for the list of available text embedding models.',
- label: 'Model',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- max_input_tokens: {
- default_value: null,
- description: 'Allows you to specify the maximum number of tokens per input.',
- label: 'Maximum Input Tokens',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- },
- },
- {
- service: 'googlevertexai',
- name: 'Google Vertex AI',
- task_types: ['text_embedding', 'rerank'],
- configurations: {
- service_account_json: {
- default_value: null,
- description: "API Key for the provider you're connecting to.",
- label: 'Credentials JSON',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- project_id: {
- default_value: null,
- description:
- 'The GCP Project ID which has Vertex AI API(s) enabled. For more information on the URL, refer to the {geminiVertexAIDocs}.',
- label: 'GCP Project',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- location: {
- default_value: null,
- description:
- 'Please provide the GCP region where the Vertex AI API(s) is enabled. For more information, refer to the {geminiVertexAIDocs}.',
- label: 'GCP Region',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: `ID of the LLM you're using.`,
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'alibabacloud-ai-search',
- name: 'AlibabaCloud AI Search',
- task_types: ['text_embedding', 'sparse_embedding', 'rerank', 'completion'],
- configurations: {
- workspace: {
- default_value: null,
- description: 'The name of the workspace used for the {infer} task.',
- label: 'Workspace',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- api_key: {
- default_value: null,
- description: `A valid API key for the AlibabaCloud AI Search API.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- service_id: {
- default_value: null,
- description: 'The name of the model service to use for the {infer} task.',
- label: 'Project ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- host: {
- default_value: null,
- description:
- 'The name of the host address used for the {infer} task. You can find the host address at https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key[ the API keys section] of the documentation.',
- label: 'Host',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- http_schema: {
- default_value: null,
- description: '',
- label: 'HTTP Schema',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
-];
-
-const mockAddEndpoint = jest.fn();
-const onClose = jest.fn();
-jest.mock('../../hooks/use_add_endpoint', () => ({
- useAddEndpoint: () => ({
- mutate: mockAddEndpoint.mockImplementation(() => Promise.resolve()), // Mock implementation of the mutate function
- }),
-}));
-
-jest.mock('../../hooks/use_providers', () => ({
- useProviders: jest.fn(() => ({
- data: mockProviders,
- })),
-}));
-
-const MockFormProvider = ({ children }: { children: React.ReactElement }) => {
- const { form } = useForm();
- return (
-
-
-
- );
-};
-
-describe('AddInferenceFlyout', () => {
- it('renders', () => {
- render(
-
-
-
- );
-
- expect(screen.getByTestId('create-inference-flyout')).toBeInTheDocument();
- expect(screen.getByTestId('create-inference-flyout-header')).toBeInTheDocument();
- expect(screen.getByTestId('create-inference-flyout-header')).toBeInTheDocument();
- expect(screen.getByTestId('provider-select')).toBeInTheDocument();
- expect(screen.getByTestId('add-inference-endpoint-submit-button')).toBeInTheDocument();
- expect(screen.getByTestId('create-inference-flyout-close-button')).toBeInTheDocument();
- });
-
- it('invalidates form if no provider is selected', async () => {
- render(
-
-
-
- );
-
- await userEvent.click(screen.getByTestId('add-inference-endpoint-submit-button'));
- expect(screen.getByText('Provider is required.')).toBeInTheDocument();
- expect(mockAddEndpoint).not.toHaveBeenCalled();
- expect(screen.getByTestId('add-inference-endpoint-submit-button')).toBeDisabled();
- });
-
- it('valid submission', async () => {
- render(
-
-
-
- );
-
- await userEvent.click(screen.getByTestId('provider-select'));
- await userEvent.click(screen.getByText('Anthropic'));
- await userEvent.type(await screen.findByTestId('api_key-password'), 'test api passcode');
- await userEvent.type(
- await screen.findByTestId('model_id-input'),
- 'sample model name from Anthropic'
- );
-
- await userEvent.click(screen.getByTestId('add-inference-endpoint-submit-button'));
- expect(mockAddEndpoint).toHaveBeenCalled();
- });
-});
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.tsx b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.tsx
deleted file mode 100644
index 4e6be5764bf44..0000000000000
--- a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.tsx
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import {
- EuiButtonEmpty,
- EuiFlexGroup,
- EuiFlexItem,
- EuiFlyout,
- EuiFlyoutBody,
- EuiFlyoutFooter,
- EuiFlyoutHeader,
- EuiTitle,
- useGeneratedHtmlId,
-} from '@elastic/eui';
-import React from 'react';
-
-import { InferenceForm } from './inference_form';
-import * as i18n from './translations';
-
-interface AddInferenceFlyoutWrapperProps {
- onClose: (state: boolean) => void;
-}
-
-export const AddInferenceFlyoutWrapper: React.FC = ({
- onClose,
-}) => {
- const inferenceCreationFlyoutId = useGeneratedHtmlId({
- prefix: 'addInferenceFlyoutId',
- });
- const closeFlyout = () => onClose(false);
-
- return (
- onClose(false)}
- aria-labelledby={inferenceCreationFlyoutId}
- data-test-subj="create-inference-flyout"
- >
-
-
- {i18n.CREATE_ENDPOINT_TITLE}
-
-
-
-
-
-
-
-
-
- {i18n.CANCEL}
-
-
-
-
-
- );
-};
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx
deleted file mode 100644
index e26fb765f06c6..0000000000000
--- a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import { Form, useForm } from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
-import React, { useCallback } from 'react';
-import { InferenceServices } from '@kbn/inference-endpoint-ui-common';
-import { EuiButton, EuiFlexGroup, EuiFlexItem, EuiSpacer } from '@elastic/eui';
-import { useProviders } from '../../hooks/use_providers';
-import * as i18n from './translations';
-import { useAddEndpoint } from '../../hooks/use_add_endpoint';
-import { InferenceEndpoint } from '../../types';
-
-interface InferenceFormProps {
- onSubmitSuccess: (state: boolean) => void;
-}
-export const InferenceForm: React.FC = ({ onSubmitSuccess }) => {
- const { mutate: addEndpoint } = useAddEndpoint(() => onSubmitSuccess(false));
- const { data: providers } = useProviders();
- const { form } = useForm();
- const handleSubmit = useCallback(async () => {
- const { isValid, data } = await form.submit();
-
- if (isValid) {
- addEndpoint({
- inferenceEndpoint: data as InferenceEndpoint,
- });
- return;
- }
- }, [addEndpoint, form]);
-
- return providers ? (
-
- ) : null;
-};
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/translations.ts b/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/translations.ts
deleted file mode 100644
index 330a2b434d4da..0000000000000
--- a/x-pack/plugins/search_inference_endpoints/public/components/add_inference_endpoints/translations.ts
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import { i18n } from '@kbn/i18n';
-
-export const SAVE = i18n.translate(
- 'xpack.searchInferenceEndpoints.addInferenceEndpoint.saveBtnLabel',
- {
- defaultMessage: 'Save',
- }
-);
-
-export const SAVE_TEST = i18n.translate(
- 'xpack.searchInferenceEndpoints.addInferenceEndpoint.saveAndTestBtnLabel',
- {
- defaultMessage: 'Save and test',
- }
-);
-
-export const CANCEL = i18n.translate(
- 'xpack.searchInferenceEndpoints.addInferenceEndpoint.cancelBtnLabel',
- {
- defaultMessage: 'Cancel',
- }
-);
-
-export const CREATE_ENDPOINT_TITLE = i18n.translate(
- 'xpack.searchInferenceEndpoints.addInferenceEndpoint.createEndpointTitle',
- {
- defaultMessage: 'Create Inference Endpoint',
- }
-);
From be6ea81483c04604d4b9e995a5af0241bd819852 Mon Sep 17 00:00:00 2001
From: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
Date: Mon, 30 Dec 2024 20:41:01 +0000
Subject: [PATCH 09/18] [CI] Auto-commit changed files from 'node
scripts/yarn_deduplicate'
---
x-pack/platform/plugins/shared/stack_connectors/tsconfig.json | 1 +
1 file changed, 1 insertion(+)
diff --git a/x-pack/platform/plugins/shared/stack_connectors/tsconfig.json b/x-pack/platform/plugins/shared/stack_connectors/tsconfig.json
index 222fca24ad5fa..b4a565bf0ec93 100644
--- a/x-pack/platform/plugins/shared/stack_connectors/tsconfig.json
+++ b/x-pack/platform/plugins/shared/stack_connectors/tsconfig.json
@@ -44,6 +44,7 @@
"@kbn/alerting-types",
"@kbn/core-notifications-browser",
"@kbn/response-ops-rule-form",
+ "@kbn/inference-endpoint-ui-common",
],
"exclude": [
"target/**/*",
From c201d795bef66ef15327526152fa62368e744240 Mon Sep 17 00:00:00 2001
From: YulNaumenko
Date: Mon, 30 Dec 2024 14:53:49 -0800
Subject: [PATCH 10/18] removed labels
---
.../private/translations/translations/fr-FR.json | 12 ------------
.../private/translations/translations/ja-JP.json | 12 ------------
.../private/translations/translations/zh-CN.json | 12 ------------
3 files changed, 36 deletions(-)
diff --git a/x-pack/platform/plugins/private/translations/translations/fr-FR.json b/x-pack/platform/plugins/private/translations/translations/fr-FR.json
index d1d7c38033ed5..f11889749d1b9 100644
--- a/x-pack/platform/plugins/private/translations/translations/fr-FR.json
+++ b/x-pack/platform/plugins/private/translations/translations/fr-FR.json
@@ -45005,34 +45005,22 @@
"xpack.stackConnectors.components.index.preconfiguredIndexHelpText": "Les documents sont indexés dans l'index {alertHistoryIndex}.",
"xpack.stackConnectors.components.index.resetDefaultIndexLabel": "Réinitialiser l'index par défaut",
"xpack.stackConnectors.components.index.selectMessageText": "Indexez les données dans Elasticsearch.",
- "xpack.stackConnectors.components.inference.additionalOptionsLabel": "Options supplémentaires",
"xpack.stackConnectors.components.inference.bodyCodeEditorAriaLabel": "Éditeur de code",
"xpack.stackConnectors.components.inference.bodyFieldLabel": "Corps",
"xpack.stackConnectors.components.inference.completionInputLabel": "Entrée",
"xpack.stackConnectors.components.inference.completionInputTypeLabel": "Type d'entrée",
- "xpack.stackConnectors.components.inference.config.optionalValue": "Facultatif",
"xpack.stackConnectors.components.inference.connectorTypeTitle": "Connecteur IA",
"xpack.stackConnectors.components.inference.copied.tooltip": "Copié !",
"xpack.stackConnectors.components.inference.copy.tooltip": "Copier dans le presse-papiers",
- "xpack.stackConnectors.components.inference.copyLabel": "Copier",
"xpack.stackConnectors.components.inference.documentation": "Documentation de l'API d'inférence",
"xpack.stackConnectors.components.inference.error.requiredProviderText": "Le fournisseur est requis.",
- "xpack.stackConnectors.components.inference.inferenceEndpointHelpLabel": "Les points de terminaison d'inférence fournissent une méthode simplifiée pour utiliser cette configuration, en particulier à partir de l'API",
- "xpack.stackConnectors.components.inference.inferenceEndpointLabel": "Point de terminaison d'inférence",
- "xpack.stackConnectors.components.inference.inferenceIdHelpLabel": "Cet identifiant ne peut pas être modifié une fois créé.",
"xpack.stackConnectors.components.inference.invalidActionText": "Nom d'action non valide.",
"xpack.stackConnectors.components.inference.providerFieldLabel": "Fournisseur",
- "xpack.stackConnectors.components.inference.providerLabel": "Service",
- "xpack.stackConnectors.components.inference.providerOptionalSettingsHelpLabel": "Configurer le fournisseur d'inférence. Ces paramètres sont des paramètres de fournisseur facultatifs.",
- "xpack.stackConnectors.components.inference.providerOptionalSettingsLabel": "Paramètres de service",
"xpack.stackConnectors.components.inference.requiredGenericTextField": "{field} est obligatoire.",
"xpack.stackConnectors.components.inference.rerankQueryLabel": "Recherche",
- "xpack.stackConnectors.components.inference.selectable.providerSearch": "Recherche",
"xpack.stackConnectors.components.inference.selectMessageText": "Envoyez des demandes aux fournisseurs d'IA tels qu'Amazon Bedrock, OpenAI et bien d'autres.",
"xpack.stackConnectors.components.inference.selectProvider": "Sélectionner un service",
- "xpack.stackConnectors.components.inference.taskTypeDetailsLabel": "Paramètres des tâches",
"xpack.stackConnectors.components.inference.taskTypeFieldLabel": "Type de tâche",
- "xpack.stackConnectors.components.inference.taskTypeHelpLabel": "Configurer la tâche d'inférence. Ces paramètres sont spécifiques au service et au modèle sélectionnés.",
"xpack.stackConnectors.components.inference.unableToFindProvidersQueryMessage": "Impossible de trouver des fournisseurs",
"xpack.stackConnectors.components.jira.apiTokenTextFieldLabel": "Token d'API",
"xpack.stackConnectors.components.jira.apiUrlTextFieldLabel": "URL",
diff --git a/x-pack/platform/plugins/private/translations/translations/ja-JP.json b/x-pack/platform/plugins/private/translations/translations/ja-JP.json
index f5ac3a85279a6..6791420a698d4 100644
--- a/x-pack/platform/plugins/private/translations/translations/ja-JP.json
+++ b/x-pack/platform/plugins/private/translations/translations/ja-JP.json
@@ -44855,34 +44855,22 @@
"xpack.stackConnectors.components.index.preconfiguredIndexHelpText": "ドキュメントは{alertHistoryIndex}インデックスにインデックスされます。",
"xpack.stackConnectors.components.index.resetDefaultIndexLabel": "デフォルトのインデックスをリセット",
"xpack.stackConnectors.components.index.selectMessageText": "データを Elasticsearch にインデックスしてください。",
- "xpack.stackConnectors.components.inference.additionalOptionsLabel": "その他のオプション",
"xpack.stackConnectors.components.inference.bodyCodeEditorAriaLabel": "コードエディター",
"xpack.stackConnectors.components.inference.bodyFieldLabel": "本文",
"xpack.stackConnectors.components.inference.completionInputLabel": "インプット",
"xpack.stackConnectors.components.inference.completionInputTypeLabel": "入力タイプ",
- "xpack.stackConnectors.components.inference.config.optionalValue": "オプション",
"xpack.stackConnectors.components.inference.connectorTypeTitle": "AIコネクター",
"xpack.stackConnectors.components.inference.copied.tooltip": "コピー完了",
"xpack.stackConnectors.components.inference.copy.tooltip": "クリップボードにコピー",
- "xpack.stackConnectors.components.inference.copyLabel": "コピー",
"xpack.stackConnectors.components.inference.documentation": "推論APIドキュメント",
"xpack.stackConnectors.components.inference.error.requiredProviderText": "プロバイダーは必須です。",
- "xpack.stackConnectors.components.inference.inferenceEndpointHelpLabel": "推論エンドポイントは、特にAPIから、この構成を簡単に利用できる方法を提供します。",
- "xpack.stackConnectors.components.inference.inferenceEndpointLabel": "推論エンドポイント",
- "xpack.stackConnectors.components.inference.inferenceIdHelpLabel": "このIDは、作成すると、変更できません。",
"xpack.stackConnectors.components.inference.invalidActionText": "無効なアクション名です。",
"xpack.stackConnectors.components.inference.providerFieldLabel": "プロバイダー",
- "xpack.stackConnectors.components.inference.providerLabel": "サービス",
- "xpack.stackConnectors.components.inference.providerOptionalSettingsHelpLabel": "推論プロバイダーを構成します。これらの設定はオプションのプロバイダー設定です。",
- "xpack.stackConnectors.components.inference.providerOptionalSettingsLabel": "サービス設定",
"xpack.stackConnectors.components.inference.requiredGenericTextField": "{field}は必須です。",
"xpack.stackConnectors.components.inference.rerankQueryLabel": "クエリー",
- "xpack.stackConnectors.components.inference.selectable.providerSearch": "検索",
"xpack.stackConnectors.components.inference.selectMessageText": "Amazon Bedrock、OpenAIなどのAIプロバイダーに要求を送信します。",
"xpack.stackConnectors.components.inference.selectProvider": "サービスを選択",
- "xpack.stackConnectors.components.inference.taskTypeDetailsLabel": "タスク設定",
"xpack.stackConnectors.components.inference.taskTypeFieldLabel": "タスクタイプ",
- "xpack.stackConnectors.components.inference.taskTypeHelpLabel": "推論タスクを構成します。これらの設定は、選択したサービスおよびモデルに固有です。",
"xpack.stackConnectors.components.inference.unableToFindProvidersQueryMessage": "プロバイダーが見つかりません",
"xpack.stackConnectors.components.jira.apiTokenTextFieldLabel": "APIトークン",
"xpack.stackConnectors.components.jira.apiUrlTextFieldLabel": "URL",
diff --git a/x-pack/platform/plugins/private/translations/translations/zh-CN.json b/x-pack/platform/plugins/private/translations/translations/zh-CN.json
index 998dd0392f5a2..4d4a68cfcd046 100644
--- a/x-pack/platform/plugins/private/translations/translations/zh-CN.json
+++ b/x-pack/platform/plugins/private/translations/translations/zh-CN.json
@@ -44191,34 +44191,22 @@
"xpack.stackConnectors.components.index.preconfiguredIndexHelpText": "文档已索引到 {alertHistoryIndex} 索引中。",
"xpack.stackConnectors.components.index.resetDefaultIndexLabel": "重置默认索引",
"xpack.stackConnectors.components.index.selectMessageText": "将数据索引到 Elasticsearch 中。",
- "xpack.stackConnectors.components.inference.additionalOptionsLabel": "其他选项",
"xpack.stackConnectors.components.inference.bodyCodeEditorAriaLabel": "代码编辑器",
"xpack.stackConnectors.components.inference.bodyFieldLabel": "正文",
"xpack.stackConnectors.components.inference.completionInputLabel": "输入",
"xpack.stackConnectors.components.inference.completionInputTypeLabel": "输入类型",
- "xpack.stackConnectors.components.inference.config.optionalValue": "可选",
"xpack.stackConnectors.components.inference.connectorTypeTitle": "AI 连接器",
"xpack.stackConnectors.components.inference.copied.tooltip": "已复制!",
"xpack.stackConnectors.components.inference.copy.tooltip": "复制到剪贴板",
- "xpack.stackConnectors.components.inference.copyLabel": "复制",
"xpack.stackConnectors.components.inference.documentation": "推理 API 文档",
"xpack.stackConnectors.components.inference.error.requiredProviderText": "'提供商'必填。",
- "xpack.stackConnectors.components.inference.inferenceEndpointHelpLabel": "推理终端提供了使用此配置(特别是通过 API 使用)的简化方法",
- "xpack.stackConnectors.components.inference.inferenceEndpointLabel": "推理终端",
- "xpack.stackConnectors.components.inference.inferenceIdHelpLabel": "此 ID 一旦创建,将无法更改。",
"xpack.stackConnectors.components.inference.invalidActionText": "操作名称无效。",
"xpack.stackConnectors.components.inference.providerFieldLabel": "提供商",
- "xpack.stackConnectors.components.inference.providerLabel": "服务",
- "xpack.stackConnectors.components.inference.providerOptionalSettingsHelpLabel": "配置推理提供商。这些设置是可选的提供商设置。",
- "xpack.stackConnectors.components.inference.providerOptionalSettingsLabel": "服务设置",
"xpack.stackConnectors.components.inference.requiredGenericTextField": "{field} 必填。",
"xpack.stackConnectors.components.inference.rerankQueryLabel": "查询",
- "xpack.stackConnectors.components.inference.selectable.providerSearch": "搜索",
"xpack.stackConnectors.components.inference.selectMessageText": "发送请求至 Amazon Bedrock、OpenAI 等 AI 提供商。",
"xpack.stackConnectors.components.inference.selectProvider": "选择服务",
- "xpack.stackConnectors.components.inference.taskTypeDetailsLabel": "任务设置",
"xpack.stackConnectors.components.inference.taskTypeFieldLabel": "任务类型",
- "xpack.stackConnectors.components.inference.taskTypeHelpLabel": "配置推理任务。这些设置特定于选定服务和模型。",
"xpack.stackConnectors.components.inference.unableToFindProvidersQueryMessage": "找不到提供商",
"xpack.stackConnectors.components.jira.apiTokenTextFieldLabel": "API 令牌",
"xpack.stackConnectors.components.jira.apiUrlTextFieldLabel": "URL",
From 05c1685e64dd61ab3b1f5a69a4605fc75e100a17 Mon Sep 17 00:00:00 2001
From: YulNaumenko
Date: Thu, 2 Jan 2025 11:07:32 -0800
Subject: [PATCH 11/18] fixed task type on edit
---
.../components/additional_options_fields.tsx | 21 ++++++-
.../inference_service_form_fields.tsx | 1 +
.../inference/connector.test.tsx | 60 +++++++++----------
3 files changed, 48 insertions(+), 34 deletions(-)
diff --git a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/additional_options_fields.tsx b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/additional_options_fields.tsx
index 381b15b78020f..e7fa0afce90e0 100644
--- a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/additional_options_fields.tsx
+++ b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/additional_options_fields.tsx
@@ -52,6 +52,7 @@ interface AdditionalOptionsFieldsProps {
onTaskTypeOptionsSelect: (taskType: string, provider?: string) => void;
selectedTaskType?: string;
taskTypeOptions: TaskTypeOption[];
+ isEdit?: boolean;
}
export const AdditionalOptionsFields: React.FC = ({
@@ -61,6 +62,7 @@ export const AdditionalOptionsFields: React.FC = (
selectedTaskType,
onSetProviderConfigEntry,
onTaskTypeOptionsSelect,
+ isEdit,
}) => {
const xsFontSize = useEuiFontSize('xs').fontSize;
const { euiTheme } = useEuiTheme();
@@ -106,7 +108,18 @@ export const AdditionalOptionsFields: React.FC = (
return (
- {taskTypeOptions.length === 1 ? (
+ {isEdit ? (
+
+ {config.taskType}
+
+ ) : taskTypeOptions.length === 1 ? (
= (
selectedTaskType,
config.taskType,
xsFontSize,
- euiTheme.colors,
+ euiTheme.colors.textSubdued,
+ euiTheme.colors.disabled,
+ euiTheme.colors.lightestShade,
+ euiTheme.colors.darkShade,
+ isEdit,
taskTypeOptions,
onTaskTypeOptionsSelect,
]
diff --git a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.tsx b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.tsx
index ca2a932cd6642..17e532fed4f72 100644
--- a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.tsx
+++ b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.tsx
@@ -376,6 +376,7 @@ export const InferenceServiceFormFields: React.FC = ({
onTaskTypeOptionsSelect={onTaskTypeOptionsSelect}
taskTypeOptions={taskTypeOptions}
selectedTaskType={selectedTaskType}
+ isEdit={isEdit}
/>
diff --git a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/connector.test.tsx b/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/connector.test.tsx
index 5d20ff9595483..83d1d75f63458 100644
--- a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/connector.test.tsx
+++ b/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/connector.test.tsx
@@ -9,7 +9,7 @@ import React from 'react';
import ConnectorFields from './connector';
import { ConnectorFormTestProvider } from '../lib/test_utils';
-import { render, waitFor } from '@testing-library/react';
+import { render, screen } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { createStartServicesMock } from '@kbn/triggers-actions-ui-plugin/public/common/lib/kibana/kibana_react.mock';
import { useProviders } from './providers/get_providers';
@@ -731,7 +731,7 @@ describe('ConnectorFields renders', () => {
data: providersSchemas,
});
});
- test('openai provider fields are rendered', async () => {
+ test('openai provider fields are rendered', () => {
const { getAllByTestId } = render(
{}} />
@@ -746,7 +746,7 @@ describe('ConnectorFields renders', () => {
expect(getAllByTestId('taskTypeSelectDisabled')[0]).toHaveTextContent('completion');
});
- test('googleaistudio provider fields are rendered', async () => {
+ test('googleaistudio provider fields are rendered', () => {
const { getAllByTestId } = render(
{}} />
@@ -771,32 +771,37 @@ describe('ConnectorFields renders', () => {
});
it('connector validation succeeds when connector config is valid', async () => {
- const { getByTestId } = render(
+ render(
- {}} />
+ {}} />
);
- await userEvent.click(getByTestId('form-test-provide-submit'));
-
- await waitFor(async () => {
- expect(onSubmit).toHaveBeenCalled();
- });
+ await userEvent.type(
+ screen.getByTestId('api_key-password'),
+ '{selectall}{backspace}goodpassword'
+ );
+ await userEvent.click(screen.getByTestId('form-test-provide-submit'));
+ expect(onSubmit).toHaveBeenCalled();
expect(onSubmit).toBeCalledWith({
data: {
config: {
- inferenceId: 'openai-completion-4fzzzxjylrx',
...openAiConnector.config,
},
actionTypeId: openAiConnector.actionTypeId,
name: openAiConnector.name,
id: openAiConnector.id,
isDeprecated: openAiConnector.isDeprecated,
+ secrets: {
+ providerSecrets: {
+ api_key: 'goodpassword',
+ },
+ },
},
isValid: true,
});
- });
+ }, 60000);
it('validates correctly if the provider config url is empty', async () => {
const connector = {
@@ -810,29 +815,23 @@ describe('ConnectorFields renders', () => {
},
};
- const res = render(
+ render(
{}} />
);
await userEvent.type(
- res.getByTestId('api_key-password'),
+ screen.getByTestId('api_key-password'),
'{selectall}{backspace}goodpassword'
);
- await userEvent.click(res.getByTestId('form-test-provide-submit'));
- await waitFor(async () => {
- expect(onSubmit).toHaveBeenCalled();
- });
+ await userEvent.click(screen.getByTestId('form-test-provide-submit'));
+ expect(onSubmit).toHaveBeenCalled();
expect(onSubmit).toHaveBeenCalledWith({ data: {}, isValid: false });
- });
+ }, 60000);
- const tests: Array<[string, string]> = [
- ['url-input', ''],
- ['api_key-password', ''],
- ];
- it.each(tests)('validates correctly %p', async (field, value) => {
+ it('validates correctly empty password field', async () => {
const connector = {
...openAiConnector,
config: {
@@ -841,20 +840,17 @@ describe('ConnectorFields renders', () => {
},
};
- const res = render(
+ render(
{}} />
);
- await userEvent.type(res.getByTestId(field), `{selectall}{backspace}${value}`);
-
- await userEvent.click(res.getByTestId('form-test-provide-submit'));
- await waitFor(async () => {
- expect(onSubmit).toHaveBeenCalled();
- });
+ await userEvent.type(screen.getByTestId('api_key-password'), `{selectall}{backspace}`);
+ await userEvent.click(screen.getByTestId('form-test-provide-submit'));
+ expect(onSubmit).toHaveBeenCalled();
expect(onSubmit).toHaveBeenCalledWith({ data: {}, isValid: false });
- });
+ }, 60000);
});
});
From 1be74073eceed566f9dc21145c07048900b019b3 Mon Sep 17 00:00:00 2001
From: YulNaumenko
Date: Mon, 6 Jan 2025 16:12:15 -0800
Subject: [PATCH 12/18] removed get providers from connectors and uses the
common hook from the package
---
.../src/components}/hooks/use_providers.ts | 12 +-
.../inference_service_form_fields.tsx | 13 +-
.../src/translations.ts | 7 +
.../connector_types/inference/connector.tsx | 8 +-
.../providers/get_providers.test.tsx | 52 --
.../inference/providers/get_providers.ts | 40 -
.../shared/stack_connectors/server/plugin.ts | 7 +-
.../routes/get_inference_services.test.ts | 677 -----------------
.../server/routes/get_inference_services.ts | 703 ------------------
.../stack_connectors/server/routes/index.ts | 1 -
.../inference_form.tsx | 13 +-
.../public/hooks/translations.ts | 7 -
12 files changed, 31 insertions(+), 1509 deletions(-)
rename x-pack/{solutions/search/plugins/search_inference_endpoints/public => platform/packages/shared/kbn-inference-endpoint-ui-common/src/components}/hooks/use_providers.ts (98%)
delete mode 100644 x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/providers/get_providers.test.tsx
delete mode 100644 x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/providers/get_providers.ts
delete mode 100644 x-pack/platform/plugins/shared/stack_connectors/server/routes/get_inference_services.test.ts
delete mode 100644 x-pack/platform/plugins/shared/stack_connectors/server/routes/get_inference_services.ts
diff --git a/x-pack/solutions/search/plugins/search_inference_endpoints/public/hooks/use_providers.ts b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/hooks/use_providers.ts
similarity index 98%
rename from x-pack/solutions/search/plugins/search_inference_endpoints/public/hooks/use_providers.ts
rename to x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/hooks/use_providers.ts
index 4bef4268c798f..2b0dae5c6cfc2 100644
--- a/x-pack/solutions/search/plugins/search_inference_endpoints/public/hooks/use_providers.ts
+++ b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/hooks/use_providers.ts
@@ -7,10 +7,10 @@
import type { HttpSetup } from '@kbn/core-http-browser';
import { useQuery } from '@tanstack/react-query';
-import { FieldType, InferenceProvider } from '@kbn/inference-endpoint-ui-common';
import { KibanaServerError } from '@kbn/kibana-utils-plugin/common';
-import { useKibana } from './use_kibana';
-import * as i18n from './translations';
+import { IToasts } from '@kbn/core/public';
+import { FieldType, InferenceProvider } from '../../..';
+import * as i18n from '../../translations';
const getProviders = (http: HttpSetup): InferenceProvider[] => {
return [
@@ -624,9 +624,7 @@ const getProviders = (http: HttpSetup): InferenceProvider[] => {
];
};
-export const useProviders = () => {
- const { services } = useKibana();
- const toasts = services.notifications?.toasts;
+export const useProviders = (http: HttpSetup, toasts: IToasts) => {
const onErrorFn = (error: { body: KibanaServerError }) => {
toasts?.addError(new Error(error.body.message), {
title: i18n.GET_PROVIDERS_FAILED,
@@ -635,7 +633,7 @@ export const useProviders = () => {
};
const query = useQuery(['user-profile'], {
- queryFn: () => getProviders(services.http),
+ queryFn: () => getProviders(http),
staleTime: Infinity,
refetchOnWindowFocus: false,
onError: onErrorFn,
diff --git a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.tsx b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.tsx
index 17e532fed4f72..0e6e8f2019514 100644
--- a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.tsx
+++ b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.tsx
@@ -26,8 +26,9 @@ import {
import { FormattedMessage } from '@kbn/i18n-react';
import { ConnectorFormSchema } from '@kbn/triggers-actions-ui-plugin/public';
+import { HttpSetup, IToasts } from '@kbn/core/public';
import * as LABELS from '../translations';
-import { Config, ConfigEntryView, FieldType, InferenceProvider, Secrets } from '../types/types';
+import { Config, ConfigEntryView, FieldType, Secrets } from '../types/types';
import { SERVICE_PROVIDERS } from './providers/render_service_provider/service_provider';
import { DEFAULT_TASK_TYPE, ServiceProviderKeys } from '../constants';
import { SelectableProvider } from './providers/selectable';
@@ -36,16 +37,20 @@ import { ConfigurationFormItems } from './configuration/configuration_form_items
import { AdditionalOptionsFields } from './additional_options_fields';
import { ProviderSecretHiddenField } from './hidden_fields/provider_secret_hidden_field';
import { ProviderConfigHiddenField } from './hidden_fields/provider_config_hidden_field';
+import { useProviders } from './hooks/use_providers';
interface InferenceServicesProps {
- providers: InferenceProvider[];
+ http: HttpSetup;
+ toasts: IToasts;
isEdit?: boolean;
}
export const InferenceServiceFormFields: React.FC = ({
- providers,
+ http,
+ toasts,
isEdit,
}) => {
+ const { data: providers, isLoading } = useProviders(http, toasts);
const [isProviderPopoverOpen, setProviderPopoverOpen] = useState(false);
const [providerSchema, setProviderSchema] = useState([]);
const [taskTypeOptions, setTaskTypeOptions] = useState([]);
@@ -350,7 +355,7 @@ export const InferenceServiceFormFields: React.FC = ({
className="rightArrowIcon"
>
diff --git a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/translations.ts b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/translations.ts
index 6258fc94687fe..48170e9a5ec39 100644
--- a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/translations.ts
+++ b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/translations.ts
@@ -127,3 +127,10 @@ export const RE_ENTER_SECRETS = (label: string) => {
values: { label },
});
};
+
+export const GET_PROVIDERS_FAILED = i18n.translate(
+ 'xpack.searchInferenceEndpoints.addEndpoint.unableToFindProvidersQueryMessage',
+ {
+ defaultMessage: 'Unable to find providers',
+ }
+);
diff --git a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/connector.tsx b/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/connector.tsx
index 6a9f25a999e85..261c2d5a34154 100644
--- a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/connector.tsx
+++ b/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/connector.tsx
@@ -10,8 +10,6 @@ import { InferenceServiceFormFields } from '@kbn/inference-endpoint-ui-common';
import { type ActionConnectorFieldsProps } from '@kbn/triggers-actions-ui-plugin/public';
import { useKibana } from '@kbn/triggers-actions-ui-plugin/public';
-import { useProviders } from './providers/get_providers';
-
const InferenceAPIConnectorFields: React.FunctionComponent = ({
isEdit,
}) => {
@@ -20,11 +18,7 @@ const InferenceAPIConnectorFields: React.FunctionComponent
- ) : null;
+ return ;
};
// eslint-disable-next-line import/no-default-export
diff --git a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/providers/get_providers.test.tsx b/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/providers/get_providers.test.tsx
deleted file mode 100644
index 7ab81ee062638..0000000000000
--- a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/providers/get_providers.test.tsx
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-import React from 'react';
-import * as ReactQuery from '@tanstack/react-query';
-import { waitFor, renderHook } from '@testing-library/react';
-import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
-import { httpServiceMock, notificationServiceMock } from '@kbn/core/public/mocks';
-import { useProviders } from './get_providers';
-
-const http = httpServiceMock.createStartContract();
-const toasts = notificationServiceMock.createStartContract();
-const useQuerySpy = jest.spyOn(ReactQuery, 'useQuery');
-
-beforeEach(() => jest.resetAllMocks());
-
-const { getProviders } = jest.requireMock('./get_providers');
-
-const queryClient = new QueryClient();
-
-const wrapper = ({ children }: { children: React.ReactNode }) => (
- {children}
-);
-
-describe('useProviders', () => {
- beforeEach(() => {
- jest.clearAllMocks();
- });
-
- it('should call useQuery', async () => {
- renderHook(() => useProviders(http, toasts.toasts), {
- wrapper,
- });
-
- await waitFor(() => {
- return expect(useQuerySpy).toBeCalled();
- });
- });
-
- it('should return isError = true if api fails', async () => {
- getProviders.mockResolvedValue('This is an error.');
-
- renderHook(() => useProviders(http, toasts.toasts), {
- wrapper,
- });
-
- await waitFor(() => expect(useQuerySpy).toHaveBeenCalled());
- });
-});
diff --git a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/providers/get_providers.ts b/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/providers/get_providers.ts
deleted file mode 100644
index badc0cb61030d..0000000000000
--- a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/providers/get_providers.ts
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import type { HttpSetup } from '@kbn/core-http-browser';
-import { i18n } from '@kbn/i18n';
-import { useQuery } from '@tanstack/react-query';
-import type { ToastsStart } from '@kbn/core-notifications-browser';
-import { INTERNAL_BASE_STACK_CONNECTORS_API_PATH } from '../../../../common';
-import { InferenceProvider } from '../../../../common/inference/types';
-
-export const getProviders = async (http: HttpSetup): Promise => {
- return await http.get(`${INTERNAL_BASE_STACK_CONNECTORS_API_PATH}/_inference/_services`);
-};
-
-export const useProviders = (http: HttpSetup, toasts: ToastsStart) => {
- const onErrorFn = (error: Error) => {
- if (error) {
- toasts.addDanger(
- i18n.translate(
- 'xpack.stackConnectors.components.inference.unableToFindProvidersQueryMessage',
- {
- defaultMessage: 'Unable to find providers',
- }
- )
- );
- }
- };
-
- const query = useQuery(['user-profile'], {
- queryFn: () => getProviders(http),
- staleTime: Infinity,
- refetchOnWindowFocus: false,
- onError: onErrorFn,
- });
- return query;
-};
diff --git a/x-pack/platform/plugins/shared/stack_connectors/server/plugin.ts b/x-pack/platform/plugins/shared/stack_connectors/server/plugin.ts
index b20892938735b..aee84d963043d 100644
--- a/x-pack/platform/plugins/shared/stack_connectors/server/plugin.ts
+++ b/x-pack/platform/plugins/shared/stack_connectors/server/plugin.ts
@@ -8,11 +8,7 @@
import { PluginInitializerContext, Plugin, CoreSetup, Logger } from '@kbn/core/server';
import { PluginSetupContract as ActionsPluginSetupContract } from '@kbn/actions-plugin/server';
import { registerConnectorTypes } from './connector_types';
-import {
- validSlackApiChannelsRoute,
- getWellKnownEmailServiceRoute,
- getInferenceServicesRoute,
-} from './routes';
+import { validSlackApiChannelsRoute, getWellKnownEmailServiceRoute } from './routes';
import {
ExperimentalFeatures,
parseExperimentalConfigValue,
@@ -43,7 +39,6 @@ export class StackConnectorsPlugin implements Plugin {
getWellKnownEmailServiceRoute(router);
validSlackApiChannelsRoute(router, actions.getActionsConfigurationUtilities(), this.logger);
- getInferenceServicesRoute(router);
registerConnectorTypes({
actions,
diff --git a/x-pack/platform/plugins/shared/stack_connectors/server/routes/get_inference_services.test.ts b/x-pack/platform/plugins/shared/stack_connectors/server/routes/get_inference_services.test.ts
deleted file mode 100644
index 9e1449a37f7ff..0000000000000
--- a/x-pack/platform/plugins/shared/stack_connectors/server/routes/get_inference_services.test.ts
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import { httpServiceMock, httpServerMock } from '@kbn/core/server/mocks';
-import { coreMock } from '@kbn/core/server/mocks';
-import { getInferenceServicesRoute } from './get_inference_services';
-import { FieldType } from '../../common/dynamic_config/types';
-
-describe('getInferenceServicesRoute', () => {
- it('returns available service providers', async () => {
- const router = httpServiceMock.createRouter();
- const core = coreMock.createRequestHandlerContext();
-
- const mockResult = [
- {
- service: 'cohere',
- name: 'Cohere',
- task_types: ['text_embedding', 'rerank', 'completion'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- },
- },
- {
- service: 'elastic',
- name: 'Elastic',
- task_types: ['sparse_embedding'],
- configurations: {
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- max_input_tokens: {
- default_value: null,
- description: 'Allows you to specify the maximum number of tokens per input.',
- label: 'Maximum Input Tokens',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- },
- },
- {
- service: 'watsonxai',
- name: 'IBM Watsonx',
- task_types: ['text_embedding'],
- configurations: {
- project_id: {
- default_value: null,
- description: '',
- label: 'Project ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- model_id: {
- default_value: null,
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- api_version: {
- default_value: null,
- description: 'The IBM Watsonx API version ID to use.',
- label: 'API Version',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- max_input_tokens: {
- default_value: null,
- description: 'Allows you to specify the maximum number of tokens per input.',
- label: 'Maximum Input Tokens',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- url: {
- default_value: null,
- description: '',
- label: 'URL',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'azureaistudio',
- name: 'Azure AI Studio',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- endpoint_type: {
- default_value: null,
- description: 'Specifies the type of endpoint that is used in your model deployment.',
- label: 'Endpoint Type',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- provider: {
- default_value: null,
- description: 'The model provider for your deployment.',
- label: 'Provider',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- target: {
- default_value: null,
- description: 'The target URL of your Azure AI Studio model deployment.',
- label: 'Target',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'hugging_face',
- name: 'Hugging Face',
- task_types: ['text_embedding', 'sparse_embedding'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- url: {
- default_value: 'https://api.openai.com/v1/embeddings',
- description: 'The URL endpoint to use for the requests.',
- label: 'URL',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'amazonbedrock',
- name: 'Amazon Bedrock',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- secret_key: {
- default_value: null,
- description: 'A valid AWS secret key that is paired with the access_key.',
- label: 'Secret Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- provider: {
- default_value: null,
- description: 'The model provider for your deployment.',
- label: 'Provider',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- access_key: {
- default_value: null,
- description: 'A valid AWS access key that has permissions to use Amazon Bedrock.',
- label: 'Access Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- model: {
- default_value: null,
- description:
- 'The base model ID or an ARN to a custom model based on a foundational model.',
- label: 'Model',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description:
- 'By default, the amazonbedrock service sets the number of requests allowed per minute to 240.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- region: {
- default_value: null,
- description: 'The region that your model or ARN is deployed in.',
- label: 'Region',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'anthropic',
- name: 'Anthropic',
- task_types: ['completion'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description:
- 'By default, the anthropic service sets the number of requests allowed per minute to 50.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'googleaistudio',
- name: 'Google AI Studio',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: "ID of the LLM you're using.",
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'elasticsearch',
- name: 'Elasticsearch',
- task_types: ['text_embedding', 'sparse_embedding', 'rerank'],
- configurations: {
- num_allocations: {
- default_value: 1,
- description:
- 'The total number of allocations this model is assigned across machine learning nodes.',
- label: 'Number Allocations',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- num_threads: {
- default_value: 2,
- description:
- 'Sets the number of threads used by each model allocation during inference.',
- label: 'Number Threads',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: '.multilingual-e5-small',
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'openai',
- name: 'OpenAI',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- api_key: {
- default_value: null,
- description:
- 'The OpenAI API authentication key. For more details about generating OpenAI API keys, refer to the https://platform.openai.com/account/api-keys.',
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- organization_id: {
- default_value: null,
- description: 'The unique identifier of your organization.',
- label: 'Organization ID',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description:
- 'Default number of requests allowed per minute. For text_embedding is 3000. For completion is 500.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- url: {
- default_value: 'https://api.openai.com/v1/chat/completions',
- description:
- 'The OpenAI API endpoint URL. For more information on the URL, refer to the https://platform.openai.com/docs/api-reference.',
- label: 'URL',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'azureopenai',
- name: 'Azure OpenAI',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- entra_id: {
- default_value: null,
- description: 'You must provide either an API key or an Entra ID.',
- label: 'Entra ID',
- required: false,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description:
- 'The azureopenai service sets a default number of requests allowed per minute depending on the task type.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- deployment_id: {
- default_value: null,
- description: 'The deployment name of your deployed models.',
- label: 'Deployment ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- resource_name: {
- default_value: null,
- description: 'The name of your Azure OpenAI resource.',
- label: 'Resource Name',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- api_version: {
- default_value: null,
- description: 'The Azure API version ID to use.',
- label: 'API Version',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'mistral',
- name: 'Mistral',
- task_types: ['text_embedding'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- model: {
- default_value: null,
- description:
- 'Refer to the Mistral models documentation for the list of available text embedding models.',
- label: 'Model',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- max_input_tokens: {
- default_value: null,
- description: 'Allows you to specify the maximum number of tokens per input.',
- label: 'Maximum Input Tokens',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- },
- },
- {
- service: 'googlevertexai',
- name: 'Google Vertex AI',
- task_types: ['text_embedding', 'rerank'],
- configurations: {
- service_account_json: {
- default_value: null,
- description: "API Key for the provider you're connecting to.",
- label: 'Credentials JSON',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- project_id: {
- default_value: null,
- description:
- 'The GCP Project ID which has Vertex AI API(s) enabled. For more information on the URL, refer to the {geminiVertexAIDocs}.',
- label: 'GCP Project',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- location: {
- default_value: null,
- description:
- 'Please provide the GCP region where the Vertex AI API(s) is enabled. For more information, refer to the {geminiVertexAIDocs}.',
- label: 'GCP Region',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: `ID of the LLM you're using.`,
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'alibabacloud-ai-search',
- name: 'AlibabaCloud AI Search',
- task_types: ['text_embedding', 'sparse_embedding', 'rerank', 'completion'],
- configurations: {
- workspace: {
- default_value: null,
- description: 'The name of the workspace used for the {infer} task.',
- label: 'Workspace',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- api_key: {
- default_value: null,
- description: `A valid API key for the AlibabaCloud AI Search API.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- service_id: {
- default_value: null,
- description: 'The name of the model service to use for the {infer} task.',
- label: 'Project ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- host: {
- default_value: null,
- description:
- 'The name of the host address used for the {infer} task. You can find the host address at https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key[ the API keys section] of the documentation.',
- label: 'Host',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- http_schema: {
- default_value: null,
- description: '',
- label: 'HTTP Schema',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- ];
- core.elasticsearch.client.asInternalUser.transport.request.mockResolvedValue(mockResult);
-
- getInferenceServicesRoute(router);
-
- const [config, handler] = router.get.mock.calls[0];
- expect(config.path).toMatchInlineSnapshot(`"/internal/stack_connectors/_inference/_services"`);
-
- const mockResponse = httpServerMock.createResponseFactory();
- const mockRequest = httpServerMock.createKibanaRequest();
- await handler({ core }, mockRequest, mockResponse);
-
- expect(mockResponse.ok).toHaveBeenCalledWith({
- body: mockResult,
- });
- });
-});
diff --git a/x-pack/platform/plugins/shared/stack_connectors/server/routes/get_inference_services.ts b/x-pack/platform/plugins/shared/stack_connectors/server/routes/get_inference_services.ts
deleted file mode 100644
index 3c4a1b3aa1a35..0000000000000
--- a/x-pack/platform/plugins/shared/stack_connectors/server/routes/get_inference_services.ts
+++ /dev/null
@@ -1,703 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import {
- IRouter,
- RequestHandlerContext,
- KibanaRequest,
- IKibanaResponse,
- KibanaResponseFactory,
-} from '@kbn/core/server';
-import { FieldType } from '../../common/dynamic_config/types';
-import { InferenceProvider } from '../../common/inference/types';
-import { INTERNAL_BASE_STACK_CONNECTORS_API_PATH } from '../../common';
-
-export const getInferenceServicesRoute = (router: IRouter) => {
- router.get(
- {
- path: `${INTERNAL_BASE_STACK_CONNECTORS_API_PATH}/_inference/_services`,
- security: {
- authz: {
- enabled: false,
- reason:
- 'This route is opted out of authorization as it relies on ES authorization instead.',
- },
- },
- options: {
- access: 'internal',
- },
- validate: false,
- },
- handler
- );
-
- async function handler(
- ctx: RequestHandlerContext,
- req: KibanaRequest,
- res: KibanaResponseFactory
- ): Promise {
- // Temporarily hard-coding the response until the real implementation is ready with the updated response - https://github.com/elastic/ml-team/issues/1428
-
- // const esClient = (await ctx.core).elasticsearch.client.asInternalUser;
-
- // // eslint-disable-next-line @typescript-eslint/no-explicit-any
- // const response = await esClient.transport.request({
- // method: 'GET',
- // path: `/_inference/_services`,
- // });
-
- const response: InferenceProvider[] = [
- {
- service: 'cohere',
- name: 'Cohere',
- task_types: ['text_embedding', 'rerank', 'completion'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- },
- },
- {
- service: 'elastic',
- name: 'Elastic',
- task_types: ['sparse_embedding'],
- configurations: {
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- max_input_tokens: {
- default_value: null,
- description: 'Allows you to specify the maximum number of tokens per input.',
- label: 'Maximum Input Tokens',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- },
- },
- {
- service: 'watsonxai',
- name: 'IBM Watsonx',
- task_types: ['text_embedding'],
- configurations: {
- project_id: {
- default_value: null,
- description: '',
- label: 'Project ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- model_id: {
- default_value: null,
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- api_version: {
- default_value: null,
- description: 'The IBM Watsonx API version ID to use.',
- label: 'API Version',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- max_input_tokens: {
- default_value: null,
- description: 'Allows you to specify the maximum number of tokens per input.',
- label: 'Maximum Input Tokens',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- url: {
- default_value: null,
- description: '',
- label: 'URL',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'azureaistudio',
- name: 'Azure AI Studio',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- endpoint_type: {
- default_value: null,
- description: 'Specifies the type of endpoint that is used in your model deployment.',
- label: 'Endpoint Type',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- provider: {
- default_value: null,
- description: 'The model provider for your deployment.',
- label: 'Provider',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- target: {
- default_value: null,
- description: 'The target URL of your Azure AI Studio model deployment.',
- label: 'Target',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'hugging_face',
- name: 'Hugging Face',
- task_types: ['text_embedding', 'sparse_embedding'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- url: {
- default_value: 'https://api.openai.com/v1/embeddings',
- description: 'The URL endpoint to use for the requests.',
- label: 'URL',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'amazonbedrock',
- name: 'Amazon Bedrock',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- secret_key: {
- default_value: null,
- description: 'A valid AWS secret key that is paired with the access_key.',
- label: 'Secret Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- provider: {
- default_value: null,
- description: 'The model provider for your deployment.',
- label: 'Provider',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- access_key: {
- default_value: null,
- description: 'A valid AWS access key that has permissions to use Amazon Bedrock.',
- label: 'Access Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- model: {
- default_value: null,
- description:
- 'The base model ID or an ARN to a custom model based on a foundational model.',
- label: 'Model',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description:
- 'By default, the amazonbedrock service sets the number of requests allowed per minute to 240.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- region: {
- default_value: null,
- description: 'The region that your model or ARN is deployed in.',
- label: 'Region',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'anthropic',
- name: 'Anthropic',
- task_types: ['completion'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description:
- 'By default, the anthropic service sets the number of requests allowed per minute to 50.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'googleaistudio',
- name: 'Google AI Studio',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: "ID of the LLM you're using.",
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'elasticsearch',
- name: 'Elasticsearch',
- task_types: ['text_embedding', 'sparse_embedding', 'rerank'],
- configurations: {
- num_allocations: {
- default_value: 1,
- description:
- 'The total number of allocations this model is assigned across machine learning nodes.',
- label: 'Number Allocations',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- num_threads: {
- default_value: 2,
- description:
- 'Sets the number of threads used by each model allocation during inference.',
- label: 'Number Threads',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: '.multilingual-e5-small',
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'openai',
- name: 'OpenAI',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- api_key: {
- default_value: null,
- description:
- 'The OpenAI API authentication key. For more details about generating OpenAI API keys, refer to the https://platform.openai.com/account/api-keys.',
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- organization_id: {
- default_value: null,
- description: 'The unique identifier of your organization.',
- label: 'Organization ID',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description:
- 'Default number of requests allowed per minute. For text_embedding is 3000. For completion is 500.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: 'The name of the model to use for the inference task.',
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- url: {
- default_value: 'https://api.openai.com/v1/chat/completions',
- description:
- 'The OpenAI API endpoint URL. For more information on the URL, refer to the https://platform.openai.com/docs/api-reference.',
- label: 'URL',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'azureopenai',
- name: 'Azure OpenAI',
- task_types: ['text_embedding', 'completion'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- entra_id: {
- default_value: null,
- description: 'You must provide either an API key or an Entra ID.',
- label: 'Entra ID',
- required: false,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description:
- 'The azureopenai service sets a default number of requests allowed per minute depending on the task type.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- deployment_id: {
- default_value: null,
- description: 'The deployment name of your deployed models.',
- label: 'Deployment ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- resource_name: {
- default_value: null,
- description: 'The name of your Azure OpenAI resource.',
- label: 'Resource Name',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- api_version: {
- default_value: null,
- description: 'The Azure API version ID to use.',
- label: 'API Version',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'mistral',
- name: 'Mistral',
- task_types: ['text_embedding'],
- configurations: {
- api_key: {
- default_value: null,
- description: `API Key for the provider you're connecting to.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- model: {
- default_value: null,
- description:
- 'Refer to the Mistral models documentation for the list of available text embedding models.',
- label: 'Model',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- max_input_tokens: {
- default_value: null,
- description: 'Allows you to specify the maximum number of tokens per input.',
- label: 'Maximum Input Tokens',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- },
- },
- {
- service: 'googlevertexai',
- name: 'Google Vertex AI',
- task_types: ['text_embedding', 'rerank'],
- configurations: {
- service_account_json: {
- default_value: null,
- description: "API Key for the provider you're connecting to.",
- label: 'Credentials JSON',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- project_id: {
- default_value: null,
- description:
- 'The GCP Project ID which has Vertex AI API(s) enabled. For more information on the URL, refer to the {geminiVertexAIDocs}.',
- label: 'GCP Project',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- location: {
- default_value: null,
- description:
- 'Please provide the GCP region where the Vertex AI API(s) is enabled. For more information, refer to the {geminiVertexAIDocs}.',
- label: 'GCP Region',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- model_id: {
- default_value: null,
- description: `ID of the LLM you're using.`,
- label: 'Model ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- {
- service: 'alibabacloud-ai-search',
- name: 'AlibabaCloud AI Search',
- task_types: ['text_embedding', 'sparse_embedding', 'rerank', 'completion'],
- configurations: {
- workspace: {
- default_value: null,
- description: 'The name of the workspace used for the {infer} task.',
- label: 'Workspace',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- api_key: {
- default_value: null,
- description: `A valid API key for the AlibabaCloud AI Search API.`,
- label: 'API Key',
- required: true,
- sensitive: true,
- updatable: true,
- type: FieldType.STRING,
- },
- service_id: {
- default_value: null,
- description: 'The name of the model service to use for the {infer} task.',
- label: 'Project ID',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- host: {
- default_value: null,
- description:
- 'The name of the host address used for the {infer} task. You can find the host address at https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key[ the API keys section] of the documentation.',
- label: 'Host',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- 'rate_limit.requests_per_minute': {
- default_value: null,
- description: 'Minimize the number of rate limit errors.',
- label: 'Rate Limit',
- required: false,
- sensitive: false,
- updatable: true,
- type: FieldType.INTEGER,
- },
- http_schema: {
- default_value: null,
- description: '',
- label: 'HTTP Schema',
- required: true,
- sensitive: false,
- updatable: true,
- type: FieldType.STRING,
- },
- },
- },
- ];
-
- // TODO: replace transformative map to the real type coming from the _inference/_service
- return res.ok({
- body: response,
- });
- }
-};
diff --git a/x-pack/platform/plugins/shared/stack_connectors/server/routes/index.ts b/x-pack/platform/plugins/shared/stack_connectors/server/routes/index.ts
index e64995e1a50ef..cd9857b2168ed 100644
--- a/x-pack/platform/plugins/shared/stack_connectors/server/routes/index.ts
+++ b/x-pack/platform/plugins/shared/stack_connectors/server/routes/index.ts
@@ -7,4 +7,3 @@
export { getWellKnownEmailServiceRoute } from './get_well_known_email_service';
export { validSlackApiChannelsRoute } from './valid_slack_api_channels';
-export { getInferenceServicesRoute } from './get_inference_services';
diff --git a/x-pack/solutions/search/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx b/x-pack/solutions/search/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx
index 4ed1a1090edb3..0972a6b5f2fe2 100644
--- a/x-pack/solutions/search/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx
+++ b/x-pack/solutions/search/plugins/search_inference_endpoints/public/components/add_inference_endpoints/inference_form.tsx
@@ -9,15 +9,19 @@ import { Form, useForm } from '@kbn/es-ui-shared-plugin/static/forms/hook_form_l
import React, { useCallback, useState } from 'react';
import { InferenceServiceFormFields } from '@kbn/inference-endpoint-ui-common';
import { EuiButton, EuiFlexGroup, EuiFlexItem, EuiSpacer } from '@elastic/eui';
-import { useProviders } from '../../hooks/use_providers';
import * as i18n from './translations';
import { useAddEndpoint } from '../../hooks/use_add_endpoint';
import { InferenceEndpoint } from '../../types';
+import { useKibana } from '../../hooks/use_kibana';
interface InferenceFormProps {
onSubmitSuccess: (state: boolean) => void;
}
export const InferenceForm: React.FC = ({ onSubmitSuccess }) => {
+ const {
+ http,
+ notifications: { toasts },
+ } = useKibana().services;
const [isLoading, setIsLoading] = useState(false);
const onSuccess = useCallback(() => {
setIsLoading(false);
@@ -30,7 +34,6 @@ export const InferenceForm: React.FC = ({ onSubmitSuccess })
() => onSuccess(),
() => onError()
);
- const { data: providers } = useProviders();
const { form } = useForm();
const handleSubmit = useCallback(async () => {
setIsLoading(true);
@@ -45,9 +48,9 @@ export const InferenceForm: React.FC = ({ onSubmitSuccess })
}
}, [addEndpoint, form]);
- return providers ? (
+ return (
- ) : null;
+ );
};
diff --git a/x-pack/solutions/search/plugins/search_inference_endpoints/public/hooks/translations.ts b/x-pack/solutions/search/plugins/search_inference_endpoints/public/hooks/translations.ts
index d7a9680dd1667..d0b9eb66cdd93 100644
--- a/x-pack/solutions/search/plugins/search_inference_endpoints/public/hooks/translations.ts
+++ b/x-pack/solutions/search/plugins/search_inference_endpoints/public/hooks/translations.ts
@@ -28,10 +28,3 @@ export const ENDPOINT_CREATION_FAILED = i18n.translate(
defaultMessage: 'Endpoint creation failed',
}
);
-
-export const GET_PROVIDERS_FAILED = i18n.translate(
- 'xpack.searchInferenceEndpoints.addEndpoint.unableToFindProvidersQueryMessage',
- {
- defaultMessage: 'Unable to find providers',
- }
-);
From 60df2fa863a0056273af3b176c2eef1de451aa19 Mon Sep 17 00:00:00 2001
From: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
Date: Tue, 7 Jan 2025 01:04:23 +0000
Subject: [PATCH 13/18] [CI] Auto-commit changed files from 'node
scripts/notice'
---
.../shared/kbn-inference-endpoint-ui-common/tsconfig.json | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/tsconfig.json b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/tsconfig.json
index f306c4703b7a0..9eb10898fba8b 100644
--- a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/tsconfig.json
+++ b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/tsconfig.json
@@ -21,6 +21,9 @@
"@kbn/i18n-react",
"@kbn/search-connectors",
"@kbn/es-ui-shared-plugin",
- "@kbn/triggers-actions-ui-plugin"
+ "@kbn/triggers-actions-ui-plugin",
+ "@kbn/core-http-browser",
+ "@kbn/kibana-utils-plugin",
+ "@kbn/core"
]
}
From cadcb90a1fd83ecf6d76559846ce93e0dce83dce Mon Sep 17 00:00:00 2001
From: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
Date: Tue, 7 Jan 2025 01:04:30 +0000
Subject: [PATCH 14/18] [CI] Auto-commit changed files from 'node
scripts/yarn_deduplicate'
---
x-pack/platform/plugins/shared/stack_connectors/tsconfig.json | 1 -
1 file changed, 1 deletion(-)
diff --git a/x-pack/platform/plugins/shared/stack_connectors/tsconfig.json b/x-pack/platform/plugins/shared/stack_connectors/tsconfig.json
index b4a565bf0ec93..4b7b5cdad8bb5 100644
--- a/x-pack/platform/plugins/shared/stack_connectors/tsconfig.json
+++ b/x-pack/platform/plugins/shared/stack_connectors/tsconfig.json
@@ -42,7 +42,6 @@
"@kbn/utility-types",
"@kbn/task-manager-plugin",
"@kbn/alerting-types",
- "@kbn/core-notifications-browser",
"@kbn/response-ops-rule-form",
"@kbn/inference-endpoint-ui-common",
],
From 747e41d4a64de3cae9ec7525f2cfb836d2df14a0 Mon Sep 17 00:00:00 2001
From: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
Date: Tue, 7 Jan 2025 01:04:41 +0000
Subject: [PATCH 15/18] [CI] Auto-commit changed files from 'node
scripts/styled_components_mapping'
---
.../search/plugins/search_inference_endpoints/tsconfig.json | 1 -
1 file changed, 1 deletion(-)
diff --git a/x-pack/solutions/search/plugins/search_inference_endpoints/tsconfig.json b/x-pack/solutions/search/plugins/search_inference_endpoints/tsconfig.json
index 7b8da3db70c9e..8909c20100eed 100644
--- a/x-pack/solutions/search/plugins/search_inference_endpoints/tsconfig.json
+++ b/x-pack/solutions/search/plugins/search_inference_endpoints/tsconfig.json
@@ -39,7 +39,6 @@
"@kbn/inference-endpoint-ui-common",
"@kbn/es-ui-shared-plugin",
"@kbn/search-connectors",
- "@kbn/core-http-browser",
"@kbn/safer-lodash-set"
],
"exclude": [
From 8955abd620027b75e9bb94b9a0bbebe994f1beb8 Mon Sep 17 00:00:00 2001
From: YulNaumenko
Date: Mon, 6 Jan 2025 18:19:17 -0800
Subject: [PATCH 16/18] fixed build fail
---
.../kbn-inference-endpoint-ui-common/index.ts | 1 +
.../inference_service_form_fields.test.tsx | 21 +-
.../inference_service_form_fields.tsx | 6 +-
.../{components => }/hooks/use_providers.ts | 4 +-
.../src/translations.ts | 2 +-
.../inference/connector.test.tsx | 181 +++++++++---------
.../add_inference_flyout_wrapper.test.tsx | 2 +-
7 files changed, 111 insertions(+), 106 deletions(-)
rename x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/{components => }/hooks/use_providers.ts (99%)
diff --git a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/index.ts b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/index.ts
index a2abc5514bd05..0192b9623755c 100644
--- a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/index.ts
+++ b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/index.ts
@@ -6,5 +6,6 @@
*/
export { InferenceServiceFormFields } from './src/components/inference_service_form_fields';
+export { useProviders } from './src/hooks/use_providers';
export * from './src/types/types';
diff --git a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.test.tsx b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.test.tsx
index c5d19aa26919e..5c20bbecb6f1c 100644
--- a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.test.tsx
+++ b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.test.tsx
@@ -12,8 +12,10 @@ import { render, screen } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { Form, useForm } from '@kbn/es-ui-shared-plugin/static/forms/hook_form_lib';
import { I18nProvider } from '@kbn/i18n-react';
+import { httpServiceMock } from '@kbn/core-http-browser-mocks';
+import { notificationServiceMock } from '@kbn/core-notifications-browser-mocks';
-const providers = [
+const mockProviders = [
{
service: 'hugging_face',
name: 'Hugging Face',
@@ -110,6 +112,15 @@ const providers = [
},
] as InferenceProvider[];
+jest.mock('../hooks/use_providers', () => ({
+ useProviders: jest.fn(() => ({
+ data: mockProviders,
+ })),
+}));
+
+const httpMock = httpServiceMock.createStartContract();
+const notificationsMock = notificationServiceMock.createStartContract();
+
const MockFormProvider = ({ children }: { children: React.ReactElement }) => {
const { form } = useForm();
@@ -124,7 +135,7 @@ describe('Inference Services', () => {
it('renders', () => {
render(
-
+
);
@@ -134,7 +145,7 @@ describe('Inference Services', () => {
it('renders Selectable', async () => {
render(
-
+
);
@@ -145,7 +156,7 @@ describe('Inference Services', () => {
it('renders selected provider fields - hugging_face', async () => {
render(
-
+
);
@@ -165,7 +176,7 @@ describe('Inference Services', () => {
it('re-renders fields when selected to anthropic from hugging_face', async () => {
render(
-
+
);
diff --git a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.tsx b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.tsx
index 0e6e8f2019514..22eb4fbadc901 100644
--- a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.tsx
+++ b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/inference_service_form_fields.tsx
@@ -37,7 +37,7 @@ import { ConfigurationFormItems } from './configuration/configuration_form_items
import { AdditionalOptionsFields } from './additional_options_fields';
import { ProviderSecretHiddenField } from './hidden_fields/provider_secret_hidden_field';
import { ProviderConfigHiddenField } from './hidden_fields/provider_config_hidden_field';
-import { useProviders } from './hooks/use_providers';
+import { useProviders } from '../hooks/use_providers';
interface InferenceServicesProps {
http: HttpSetup;
@@ -317,7 +317,7 @@ export const InferenceServiceFormFields: React.FC = ({
setRequiredProviderFormFields(existingConfiguration.filter((p) => p.required || p.sensitive));
}, [config?.providerConfig, providerSchema, secrets]);
- return (
+ return !isLoading ? (
<>
= ({
>
) : null}
>
- );
+ ) : null;
};
diff --git a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/hooks/use_providers.ts b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/hooks/use_providers.ts
similarity index 99%
rename from x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/hooks/use_providers.ts
rename to x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/hooks/use_providers.ts
index 2b0dae5c6cfc2..260d61a2aade8 100644
--- a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/components/hooks/use_providers.ts
+++ b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/hooks/use_providers.ts
@@ -9,8 +9,8 @@ import type { HttpSetup } from '@kbn/core-http-browser';
import { useQuery } from '@tanstack/react-query';
import { KibanaServerError } from '@kbn/kibana-utils-plugin/common';
import { IToasts } from '@kbn/core/public';
-import { FieldType, InferenceProvider } from '../../..';
-import * as i18n from '../../translations';
+import { FieldType, InferenceProvider } from '../..';
+import * as i18n from '../translations';
const getProviders = (http: HttpSetup): InferenceProvider[] => {
return [
diff --git a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/translations.ts b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/translations.ts
index 48170e9a5ec39..3c9bab9ecb6fe 100644
--- a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/translations.ts
+++ b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/src/translations.ts
@@ -129,7 +129,7 @@ export const RE_ENTER_SECRETS = (label: string) => {
};
export const GET_PROVIDERS_FAILED = i18n.translate(
- 'xpack.searchInferenceEndpoints.addEndpoint.unableToFindProvidersQueryMessage',
+ 'xpack.inferenceEndpointUICommon.hooks.unableToFindProvidersQueryMessage',
{
defaultMessage: 'Unable to find providers',
}
diff --git a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/connector.test.tsx b/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/connector.test.tsx
index 83d1d75f63458..88889967d1dbf 100644
--- a/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/connector.test.tsx
+++ b/x-pack/platform/plugins/shared/stack_connectors/public/connector_types/inference/connector.test.tsx
@@ -12,34 +12,6 @@ import { ConnectorFormTestProvider } from '../lib/test_utils';
import { render, screen } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { createStartServicesMock } from '@kbn/triggers-actions-ui-plugin/public/common/lib/kibana/kibana_react.mock';
-import { useProviders } from './providers/get_providers';
-import { FieldType } from '../../../common/dynamic_config/types';
-
-jest.mock('./providers/get_providers');
-
-const mockUseKibanaReturnValue = createStartServicesMock();
-jest.mock('@kbn/triggers-actions-ui-plugin/public/common/lib/kibana', () => ({
- __esModule: true,
- useKibana: jest.fn(() => ({
- services: mockUseKibanaReturnValue,
- })),
-}));
-
-jest.mock('@faker-js/faker', () => {
- const originalModule = jest.requireActual('@faker-js/faker');
- return {
- ...originalModule,
- faker: {
- ...originalModule.faker,
- string: {
- ...originalModule.faker.string,
- alpha: jest.fn().mockReturnValue('123'),
- },
- },
- };
-});
-
-const mockProviders = useProviders as jest.Mock;
const providersSchemas = [
{
@@ -54,7 +26,7 @@ const providersSchemas = [
required: true,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
'rate_limit.requests_per_minute': {
default_value: null,
@@ -63,7 +35,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
},
},
@@ -79,7 +51,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
model_id: {
default_value: null,
@@ -88,7 +60,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
max_input_tokens: {
default_value: null,
@@ -97,7 +69,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
},
},
@@ -113,7 +85,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
model_id: {
default_value: null,
@@ -122,7 +94,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
api_version: {
default_value: null,
@@ -131,7 +103,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
max_input_tokens: {
default_value: null,
@@ -140,7 +112,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
url: {
default_value: null,
@@ -149,7 +121,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
},
},
@@ -165,7 +137,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
provider: {
default_value: null,
@@ -174,7 +146,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
api_key: {
default_value: null,
@@ -183,7 +155,7 @@ const providersSchemas = [
required: true,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
'rate_limit.requests_per_minute': {
default_value: null,
@@ -192,7 +164,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
target: {
default_value: null,
@@ -201,7 +173,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
},
},
@@ -217,7 +189,7 @@ const providersSchemas = [
required: true,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
'rate_limit.requests_per_minute': {
default_value: null,
@@ -226,7 +198,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
url: {
default_value: 'https://api.openai.com/v1/embeddings',
@@ -235,7 +207,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
},
},
@@ -251,7 +223,7 @@ const providersSchemas = [
required: true,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
provider: {
default_value: null,
@@ -260,7 +232,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
access_key: {
default_value: null,
@@ -269,7 +241,7 @@ const providersSchemas = [
required: true,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
model: {
default_value: null,
@@ -278,7 +250,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
'rate_limit.requests_per_minute': {
default_value: null,
@@ -288,7 +260,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
region: {
default_value: null,
@@ -297,7 +269,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
},
},
@@ -313,7 +285,7 @@ const providersSchemas = [
required: true,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
'rate_limit.requests_per_minute': {
default_value: null,
@@ -323,7 +295,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
model_id: {
default_value: null,
@@ -332,7 +304,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
},
},
@@ -348,7 +320,7 @@ const providersSchemas = [
required: true,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
'rate_limit.requests_per_minute': {
default_value: null,
@@ -357,7 +329,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
model_id: {
default_value: null,
@@ -366,7 +338,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
},
},
@@ -383,7 +355,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
num_threads: {
default_value: 2,
@@ -392,7 +364,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
model_id: {
default_value: '.multilingual-e5-small',
@@ -401,7 +373,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
},
},
@@ -418,7 +390,7 @@ const providersSchemas = [
required: true,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
organization_id: {
default_value: null,
@@ -427,7 +399,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
'rate_limit.requests_per_minute': {
default_value: null,
@@ -437,7 +409,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
model_id: {
default_value: null,
@@ -446,7 +418,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
url: {
default_value: 'https://api.openai.com/v1/chat/completions',
@@ -456,7 +428,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
},
},
@@ -472,7 +444,7 @@ const providersSchemas = [
required: true,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
entra_id: {
default_value: null,
@@ -481,7 +453,7 @@ const providersSchemas = [
required: false,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
'rate_limit.requests_per_minute': {
default_value: null,
@@ -491,7 +463,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
deployment_id: {
default_value: null,
@@ -500,7 +472,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
resource_name: {
default_value: null,
@@ -509,7 +481,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
api_version: {
default_value: null,
@@ -518,7 +490,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
},
},
@@ -534,7 +506,7 @@ const providersSchemas = [
required: true,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
model: {
default_value: null,
@@ -544,7 +516,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
'rate_limit.requests_per_minute': {
default_value: null,
@@ -553,7 +525,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
max_input_tokens: {
default_value: null,
@@ -562,7 +534,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
},
},
@@ -578,7 +550,7 @@ const providersSchemas = [
required: true,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
project_id: {
default_value: null,
@@ -588,7 +560,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
location: {
default_value: null,
@@ -598,7 +570,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
'rate_limit.requests_per_minute': {
default_value: null,
@@ -607,7 +579,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
model_id: {
default_value: null,
@@ -616,7 +588,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
},
},
@@ -632,7 +604,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
api_key: {
default_value: null,
@@ -641,7 +613,7 @@ const providersSchemas = [
required: true,
sensitive: true,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
service_id: {
default_value: null,
@@ -650,7 +622,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
host: {
default_value: null,
@@ -660,7 +632,7 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
'rate_limit.requests_per_minute': {
default_value: null,
@@ -669,7 +641,7 @@ const providersSchemas = [
required: false,
sensitive: false,
updatable: true,
- type: FieldType.INTEGER,
+ type: 'int',
},
http_schema: {
default_value: null,
@@ -678,12 +650,40 @@ const providersSchemas = [
required: true,
sensitive: false,
updatable: true,
- type: FieldType.STRING,
+ type: 'string',
},
},
},
];
+const mockUseKibanaReturnValue = createStartServicesMock();
+jest.mock('@kbn/triggers-actions-ui-plugin/public/common/lib/kibana', () => ({
+ __esModule: true,
+ useKibana: jest.fn(() => ({
+ services: mockUseKibanaReturnValue,
+ })),
+}));
+
+jest.mock('@faker-js/faker', () => {
+ const originalModule = jest.requireActual('@faker-js/faker');
+ return {
+ ...originalModule,
+ faker: {
+ ...originalModule.faker,
+ string: {
+ ...originalModule.faker.string,
+ alpha: jest.fn().mockReturnValue('123'),
+ },
+ },
+ };
+});
+
+jest.mock('@kbn/inference-endpoint-ui-common/src/hooks/use_providers', () => ({
+ useProviders: jest.fn(() => ({
+ data: providersSchemas,
+ })),
+}));
+
const openAiConnector = {
actionTypeId: '.inference',
name: 'AI Connector',
@@ -724,13 +724,6 @@ const googleaistudioConnector = {
};
describe('ConnectorFields renders', () => {
- beforeEach(() => {
- jest.clearAllMocks();
- mockProviders.mockReturnValue({
- isLoading: false,
- data: providersSchemas,
- });
- });
test('openai provider fields are rendered', () => {
const { getAllByTestId } = render(
diff --git a/x-pack/solutions/search/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.test.tsx b/x-pack/solutions/search/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.test.tsx
index e21ea0d95eccb..3031bbe857656 100644
--- a/x-pack/solutions/search/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.test.tsx
+++ b/x-pack/solutions/search/plugins/search_inference_endpoints/public/components/add_inference_endpoints/add_inference_flyout_wrapper.test.tsx
@@ -22,7 +22,7 @@ jest.mock('../../hooks/use_add_endpoint', () => ({
}),
}));
-jest.mock('../../hooks/use_providers', () => ({
+jest.mock('@kbn/inference-endpoint-ui-common/src/hooks/use_providers', () => ({
useProviders: jest.fn(() => ({
data: mockProviders,
})),
From f7b66f587ba37b0635a8d9c27882262917a3311e Mon Sep 17 00:00:00 2001
From: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
Date: Tue, 7 Jan 2025 02:28:43 +0000
Subject: [PATCH 17/18] [CI] Auto-commit changed files from 'node
scripts/notice'
---
.../shared/kbn-inference-endpoint-ui-common/tsconfig.json | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/tsconfig.json b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/tsconfig.json
index 9eb10898fba8b..5c60ee4820e4a 100644
--- a/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/tsconfig.json
+++ b/x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common/tsconfig.json
@@ -24,6 +24,8 @@
"@kbn/triggers-actions-ui-plugin",
"@kbn/core-http-browser",
"@kbn/kibana-utils-plugin",
- "@kbn/core"
+ "@kbn/core",
+ "@kbn/core-http-browser-mocks",
+ "@kbn/core-notifications-browser-mocks"
]
}
From 713e039a58eb4b108c81820a758eb48090313b5a Mon Sep 17 00:00:00 2001
From: YulNaumenko
Date: Mon, 6 Jan 2025 18:57:09 -0800
Subject: [PATCH 18/18] translations fix
---
.../plugins/private/translations/translations/fr-FR.json | 1 -
.../plugins/private/translations/translations/ja-JP.json | 1 -
.../plugins/private/translations/translations/zh-CN.json | 1 -
3 files changed, 3 deletions(-)
diff --git a/x-pack/platform/plugins/private/translations/translations/fr-FR.json b/x-pack/platform/plugins/private/translations/translations/fr-FR.json
index 8c9561d9b4dd8..5c0f839ea211f 100644
--- a/x-pack/platform/plugins/private/translations/translations/fr-FR.json
+++ b/x-pack/platform/plugins/private/translations/translations/fr-FR.json
@@ -44999,7 +44999,6 @@
"xpack.stackConnectors.components.inference.selectMessageText": "Envoyez des demandes aux fournisseurs d'IA tels qu'Amazon Bedrock, OpenAI et bien d'autres.",
"xpack.stackConnectors.components.inference.selectProvider": "Sélectionner un service",
"xpack.stackConnectors.components.inference.taskTypeFieldLabel": "Type de tâche",
- "xpack.stackConnectors.components.inference.unableToFindProvidersQueryMessage": "Impossible de trouver des fournisseurs",
"xpack.stackConnectors.components.jira.apiTokenTextFieldLabel": "Token d'API",
"xpack.stackConnectors.components.jira.apiUrlTextFieldLabel": "URL",
"xpack.stackConnectors.components.jira.commentsTextAreaFieldLabel": "Commentaires supplémentaires",
diff --git a/x-pack/platform/plugins/private/translations/translations/ja-JP.json b/x-pack/platform/plugins/private/translations/translations/ja-JP.json
index ac90c30a9f17e..156eea59ff524 100644
--- a/x-pack/platform/plugins/private/translations/translations/ja-JP.json
+++ b/x-pack/platform/plugins/private/translations/translations/ja-JP.json
@@ -44849,7 +44849,6 @@
"xpack.stackConnectors.components.inference.selectMessageText": "Amazon Bedrock、OpenAIなどのAIプロバイダーに要求を送信します。",
"xpack.stackConnectors.components.inference.selectProvider": "サービスを選択",
"xpack.stackConnectors.components.inference.taskTypeFieldLabel": "タスクタイプ",
- "xpack.stackConnectors.components.inference.unableToFindProvidersQueryMessage": "プロバイダーが見つかりません",
"xpack.stackConnectors.components.jira.apiTokenTextFieldLabel": "APIトークン",
"xpack.stackConnectors.components.jira.apiUrlTextFieldLabel": "URL",
"xpack.stackConnectors.components.jira.commentsTextAreaFieldLabel": "追加のコメント",
diff --git a/x-pack/platform/plugins/private/translations/translations/zh-CN.json b/x-pack/platform/plugins/private/translations/translations/zh-CN.json
index 246d7c738384d..7f71645070f54 100644
--- a/x-pack/platform/plugins/private/translations/translations/zh-CN.json
+++ b/x-pack/platform/plugins/private/translations/translations/zh-CN.json
@@ -44186,7 +44186,6 @@
"xpack.stackConnectors.components.inference.selectMessageText": "发送请求至 Amazon Bedrock、OpenAI 等 AI 提供商。",
"xpack.stackConnectors.components.inference.selectProvider": "选择服务",
"xpack.stackConnectors.components.inference.taskTypeFieldLabel": "任务类型",
- "xpack.stackConnectors.components.inference.unableToFindProvidersQueryMessage": "找不到提供商",
"xpack.stackConnectors.components.jira.apiTokenTextFieldLabel": "API 令牌",
"xpack.stackConnectors.components.jira.apiUrlTextFieldLabel": "URL",
"xpack.stackConnectors.components.jira.commentsTextAreaFieldLabel": "其他注释",