diff --git a/common/global/rhosp_attributes.adoc b/common/global/rhosp_attributes.adoc index a463d3df..8544c71f 100644 --- a/common/global/rhosp_attributes.adoc +++ b/common/global/rhosp_attributes.adoc @@ -15,7 +15,7 @@ :osp_long: Red Hat OpenStack Platform :osp_acro: RHOSP -:osp_curr_ver: 17.1-Beta +:osp_curr_ver: 17.1 :osp_curr_ver_no_beta: 17.1 :osp_z_stream: 0 @@ -29,5 +29,6 @@ :defaultURL: https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/{osp_curr_ver}/html :defaultCephURL: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{CephVernum}/html -:setup-tlse: {defaultURL}/hardening_red_hat_openstack_platform/assembly_securing-rhos-with-tls-and-pki_security_and_hardening#proc_implementing-tls-e-with-ansible_encryption-and-key-management[Implementing TLS-e with Ansible] +// Specific links +:setup-tlse: {defaultURL}/hardening_red_hat_openstack_platform/assembly_securing-rhos-with-tls-and-pki_security_and_hardening#proc_implementing-tls-e-with-ansible_encryption-and-key-management[Implementing TLS-e with Ansible] diff --git a/common/global/stf-attributes.adoc b/common/global/stf-attributes.adoc index 18859528..8627eb74 100644 --- a/common/global/stf-attributes.adoc +++ b/common/global/stf-attributes.adoc @@ -28,11 +28,16 @@ ifeval::[{vernum} >= 17.0] :include_when_17: endif::[] +ifeval::[{vernum} == 17.1] +:include_when_17_1: +endif::[] + ifeval::[{ProductVersion} < 1.5] :include_before_stf15: endif::[] ifeval::["{build}" == "upstream"] +:ObservabilityOperator: Observability{nbsp}Operator :OpenShift: OpenShift :OpenShiftShort: OKD :OpenStack: OpenStack @@ -45,17 +50,18 @@ ifeval::["{build}" == "upstream"] :Project: Service{nbsp}Telemetry{nbsp}Framework :ProjectShort: STF :MessageBus: Apache{nbsp}Qpid{nbsp}Dispatch{nbsp}Router -:SupportedOpenShiftVersion: 4.10 -:NextSupportedOpenShiftVersion: 4.12 -:CodeReadyContainersVersion: 2.6.0 +:SupportedOpenShiftVersion: 4.12 +:NextSupportedOpenShiftVersion: 4.14 +:CodeReadyContainersVersion: 2.19.0 endif::[] ifeval::["{build}" == "downstream"] +:ObservabilityOperator: Cluster{nbsp}Observability{nbsp}Operator :OpenShift: Red{nbsp}Hat{nbsp}OpenShift{nbsp}Container{nbsp}Platform :OpenShiftShort: OCP :OpenStack: Red{nbsp}Hat{nbsp}OpenStack{nbsp}Platform :OpenStackShort: RHOSP -:OpenStackVersion: 17.0 +:OpenStackVersion: 17.1 :OpenStackLong: {OpenStack}{nbsp}{OpenStackVersion} :OpenStackInstaller: director :OVirt: Red{nbsp}Hat{nbsp}Virtualization @@ -63,6 +69,6 @@ ifeval::["{build}" == "downstream"] :Project: Service{nbsp}Telemetry{nbsp}Framework :ProjectShort: STF :MessageBus: AMQ{nbsp}Interconnect -:SupportedOpenShiftVersion: 4.10 -:NextSupportedOpenShiftVersion: 4.12 +:SupportedOpenShiftVersion: 4.12 +:NextSupportedOpenShiftVersion: 4.14 endif::[] diff --git a/doc-Service-Telemetry-Framework/Makefile b/doc-Service-Telemetry-Framework/Makefile index 87dcbc19..e321f39f 100644 --- a/doc-Service-Telemetry-Framework/Makefile +++ b/doc-Service-Telemetry-Framework/Makefile @@ -3,10 +3,11 @@ BUILD_DIR = ../build ROOTDIR = $(realpath .) NAME = $(notdir $(ROOTDIR)) DEST_DIR = $(BUILD_DIR)/$(NAME) -DEST_HTML = $(DEST_DIR)/index-1-5-$(BUILD).html -DEST_HTML_170 = $(DEST_DIR)/index-1-5-$(BUILD)-170.html -DEST_HTML_162 = $(DEST_DIR)/index-1-5-$(BUILD)-162.html -DEST_HTML_13 = $(DEST_DIR)/index-1-5-$(BUILD)-13.html +DEST_HTML = $(DEST_DIR)/index-$(BUILD).html +DEST_HTML_171 = $(DEST_DIR)/index-$(BUILD)-171.html +DEST_HTML_170 = $(DEST_DIR)/index-$(BUILD)-170.html +DEST_HTML_162 = $(DEST_DIR)/index-$(BUILD)-162.html +DEST_HTML_13 = $(DEST_DIR)/index-$(BUILD)-13.html DEST_PDF = $(BUILD_DIR)/$(NAME)-$(BUILD).pdf IMAGES_DIR = $(DEST_DIR)/images IMAGES_TS = $(DEST_DIR)/.timestamp-images @@ -23,10 +24,12 @@ endif all: html -html: html-latest html170 html162 html13 +html: html-latest html171 html162 html-latest: prepare $(IMAGES_TS) $(DEST_HTML) +html171: prepare $(IMAGES_TS) $(DEST_HTML_171) + html170: prepare $(IMAGES_TS) $(DEST_HTML_170) html162: prepare $(IMAGES_TS) $(DEST_HTML_162) @@ -53,7 +56,10 @@ $(IMAGES_TS): $(IMAGES) touch $(IMAGES_TS) $(DEST_HTML): $(SOURCES) - asciidoctor -a source-highlighter=highlightjs -a highlightjs-languages="yaml,bash" -a highlightjs-theme="monokai" --failure-level WARN -a build=$(BUILD) -a vernum=17.0 -b xhtml5 -d book -o $@ $< + asciidoctor -a source-highlighter=highlightjs -a highlightjs-languages="yaml,bash" -a highlightjs-theme="monokai" --failure-level WARN -a build=$(BUILD) -a vernum=17.1 -b xhtml5 -d book -o $@ $< + +$(DEST_HTML_171): $(SOURCES) + asciidoctor -a source-highlighter=highlightjs -a highlightjs-languages="yaml,bash" -a highlightjs-theme="monokai" --failure-level WARN -a build=$(BUILD) -a vernum=17.1 -b xhtml5 -d book -o $@ $< $(DEST_HTML_170): $(SOURCES) asciidoctor -a source-highlighter=highlightjs -a highlightjs-languages="yaml,bash" -a highlightjs-theme="monokai" --failure-level WARN -a build=$(BUILD) -a vernum=17.0 -b xhtml5 -d book -o $@ $< diff --git a/doc-Service-Telemetry-Framework/assemblies/assembly_advanced-features.adoc b/doc-Service-Telemetry-Framework/assemblies/assembly_advanced-features.adoc index 13100ea2..28916ec4 100644 --- a/doc-Service-Telemetry-Framework/assemblies/assembly_advanced-features.adoc +++ b/doc-Service-Telemetry-Framework/assemblies/assembly_advanced-features.adoc @@ -23,8 +23,8 @@ endif::include_when_16[] include::../modules/con_dashboards.adoc[leveloffset=+1] include::../modules/proc_setting-up-grafana-to-host-the-dashboard.adoc[leveloffset=+2] ifdef::include_when_16[] -include::../modules/proc_overriding-the-default-grafana-container-image.adoc[leveloffset=+2] -//TODO: Add dashboards back when we have working code for OSP13 +// TODO: either rewrite or drop this procedure. We now provide the preferred downstream RHEL Grafana workload image in the deployment procedure. +//include::../modules/proc_overriding-the-default-grafana-container-image.adoc[leveloffset=+2] include::../modules/proc_importing-dashboards.adoc[leveloffset=+2] endif::include_when_16[] include::../modules/proc_retrieving-and-setting-grafana-login-credentials.adoc[leveloffset=+2] diff --git a/doc-Service-Telemetry-Framework/assemblies/assembly_completing-the-stf-configuration.adoc b/doc-Service-Telemetry-Framework/assemblies/assembly_completing-the-stf-configuration.adoc index db479abe..fb6dd127 100644 --- a/doc-Service-Telemetry-Framework/assemblies/assembly_completing-the-stf-configuration.adoc +++ b/doc-Service-Telemetry-Framework/assemblies/assembly_completing-the-stf-configuration.adoc @@ -29,6 +29,7 @@ include::../modules/proc_configuring-red-hat-openstack-platform-overcloud-for-st ifdef::include_when_13,include_when_17[] include::../modules/proc_getting-ca-certificate-from-stf-for-overcloud-configuration.adoc[leveloffset=+2] endif::include_when_13,include_when_17[] +include::../modules/proc_retrieving-the-qdr-password.adoc[leveloffset=+2] include::../modules/proc_retrieving-the-qdr-route-address.adoc[leveloffset=+2] include::../modules/proc_creating-the-base-configuration-for-stf.adoc[leveloffset=+2] include::../modules/proc_configuring-the-stf-connection-for-the-overcloud.adoc[leveloffset=+2] diff --git a/doc-Service-Telemetry-Framework/assemblies/assembly_installing-the-core-components-of-stf.adoc b/doc-Service-Telemetry-Framework/assemblies/assembly_installing-the-core-components-of-stf.adoc index 9bcde973..7b084d58 100644 --- a/doc-Service-Telemetry-Framework/assemblies/assembly_installing-the-core-components-of-stf.adoc +++ b/doc-Service-Telemetry-Framework/assemblies/assembly_installing-the-core-components-of-stf.adoc @@ -6,14 +6,14 @@ ifdef::context[:parent-context: {context}] :context: assembly-installing-the-core-components-of-stf [role="_abstract"] -You can use Operators to load the {Project} ({ProjectShort}) components and objects. Operators manage each of the following {ProjectShort} core and community components: +You can use Operators to load the {Project} ({ProjectShort}) components and objects. Operators manage each of the following {ProjectShort} core components: -* cert-manager +* Certificate Management * {MessageBus} -* Smart Gateway -* Prometheus and AlertManager -* Elasticsearch -* Grafana +* Smart Gateways +* Prometheus and Alertmanager + +{Project} ({ProjectShort}) uses other supporting Operators as part of the deployment. {ProjectShort} can resolve most dependencies automatically, but you need to pre-install some Operators, such as {ObservabilityOperator}, which provides an instance of Prometheus and Alertmanager, and cert-manager for Red Hat OpenShift, which provides management of certificates. .Prerequisites @@ -23,7 +23,7 @@ endif::[] ifeval::["{SupportedOpenShiftVersion}" != "{NextSupportedOpenShiftVersion}"] * An {OpenShift} version inclusive of {SupportedOpenShiftVersion} through {NextSupportedOpenShiftVersion} is running. endif::[] -* You have prepared your {OpenShift} environment and ensured that there is persistent storage and enough resources to run the {ProjectShort} components on top of the {OpenShift} environment. For more information, see https://access.redhat.com/articles/4907241[Service Telemetry Framework Performance and Scaling]. +* You have prepared your {OpenShift} environment and ensured that there is persistent storage and enough resources to run the {ProjectShort} components on top of the {OpenShift} environment. For more information about {ProjectShort} performance, see the Red Hat Knowledge Base article https://access.redhat.com/articles/4907241[Service Telemetry Framework Performance and Scaling]. * Your environment is fully connected. {ProjectShort} does not work in a {OpenShift}-disconnected environments or network proxy environments. ifeval::["{build}" == "downstream"] @@ -40,10 +40,16 @@ endif::[] * For more information about Operators, see the https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/operators/understanding/olm-what-operators-are.html[_Understanding Operators_] guide. * For more information about Operator catalogs, see https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/operators/understanding/olm-rh-catalogs.html[_Red Hat-provided Operator catalogs_]. -//* For more information about how to remove {ProjectShort} from the {OpenShift} environment, see xref:assembly-removing-stf-from-the-openshift-environment_{}[]. +* For more information about the cert-manager Operator for Red Hat, see https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/security/cert_manager_operator/index.html[_cert-manager Operator for Red Hat OpenShift overview_]. +* For more information about {ObservabilityOperator}, see https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/monitoring/cluster_observability_operator/cluster-observability-operator-overview.html[_Cluster Observability Operator Overview_]. + +include::../modules/con_deploying-stf-to-the-openshift-environment.adoc[leveloffset=+1] -include::../modules/proc_deploying-stf-to-the-openshift-environment.adoc[leveloffset=+1] +include::../modules/proc_deploying-observability-operator.adoc[leveloffset=+2] +include::../modules/proc_deploying-certificate-manager-for-openshift-operator.adoc[leveloffset=+2] +include::../modules/proc_deploying-service-telemetry-operator.adoc[leveloffset=+2] include::../modules/proc_creating-a-servicetelemetry-object-in-openshift.adoc[leveloffset=+1] + include::../modules/con_primary-parameters-of-the-servicetelemetry-object.adoc[leveloffset=+2] include::../modules/proc_accessing-uis-for-stf-components.adoc[leveloffset=+1] include::../modules/proc_configuring-observability-strategy.adoc[leveloffset=+1] diff --git a/doc-Service-Telemetry-Framework/assemblies/assembly_introduction-to-stf.adoc b/doc-Service-Telemetry-Framework/assemblies/assembly_introduction-to-stf.adoc index 006a62a5..2178e603 100644 --- a/doc-Service-Telemetry-Framework/assemblies/assembly_introduction-to-stf.adoc +++ b/doc-Service-Telemetry-Framework/assemblies/assembly_introduction-to-stf.adoc @@ -49,6 +49,7 @@ include::../modules/con_support-for-stf.adoc[leveloffset=+1] endif::[] include::../modules/con_stf-architecture.adoc[leveloffset=+1] +include::../modules/con_stf-architecture-changes.adoc[leveloffset=+2] include::../modules/con_installation-size-of-ocp.adoc[leveloffset=+1] diff --git a/doc-Service-Telemetry-Framework/assemblies/assembly_preparing-your-ocp-environment-for-stf.adoc b/doc-Service-Telemetry-Framework/assemblies/assembly_preparing-your-ocp-environment-for-stf.adoc index 0750256d..ea2ef706 100644 --- a/doc-Service-Telemetry-Framework/assemblies/assembly_preparing-your-ocp-environment-for-stf.adoc +++ b/doc-Service-Telemetry-Framework/assemblies/assembly_preparing-your-ocp-environment-for-stf.adoc @@ -11,19 +11,12 @@ To prepare your {OpenShift} environment for {Project} ({ProjectShort}), you must * Ensure that you have persistent storage available in your {OpenShift} cluster for a production-grade deployment. For more information, see <>. * Ensure that enough resources are available to run the Operators and the application containers. For more information, see <>. * Ensure that you have a fully connected network environment. For more information, see xref:con-network-considerations-for-service-telemetry-framework_assembly-preparing-your-ocp-environment-for-stf[]. -ifeval::["{build}" == "upstream"] -* {ProjectShort} uses Elasticsearch to store events, which requires a larger than normal `vm.max_map_count` value. The `vm.max_map_count` value is set by default in {OpenShift}. For more information about how to edit the value of `vm.max_map_count`, see <>. -endif::[] include::../modules/con_observability-strategy.adoc[leveloffset=+1] include::../modules/con_persistent-volumes.adoc[leveloffset=+1] include::../modules/con_resource-allocation.adoc[leveloffset=+1] include::../modules/con_network-considerations-for-service-telemetry-framework.adoc[leveloffset=+1] -ifeval::["{build}" == "upstream"] -include::../modules/con_node-tuning-operator.adoc[leveloffset=+1] -endif::[] - //reset the context ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/doc-Service-Telemetry-Framework/assemblies/assembly_removing-stf-from-the-openshift-environment.adoc b/doc-Service-Telemetry-Framework/assemblies/assembly_removing-stf-from-the-openshift-environment.adoc index 35e17644..b9cde94e 100644 --- a/doc-Service-Telemetry-Framework/assemblies/assembly_removing-stf-from-the-openshift-environment.adoc +++ b/doc-Service-Telemetry-Framework/assemblies/assembly_removing-stf-from-the-openshift-environment.adoc @@ -15,12 +15,14 @@ ifeval::["{build}" == "upstream"] . Remove the catalog source. endif::[] . Remove the cert-manager Operator. +. Remove the {ObservabilityOperator}. include::../modules/proc_deleting-the-namespace.adoc[leveloffset=+1] ifeval::["{build}" == "upstream"] include::../modules/proc_removing-the-catalogsource.adoc[leveloffset=+1] endif::[] -include::../modules/proc_removing-the-cert-manager-operator.adoc[leveloffset=+1] +include::../modules/ref_removing-the-cert-manager-operator.adoc[leveloffset=+1] +include::../modules/ref_removing-the-observability-operator.adoc[leveloffset=+1] //reset the context ifdef::parent-context[:context: {parent-context}] diff --git a/doc-Service-Telemetry-Framework/docinfo.xml b/doc-Service-Telemetry-Framework/docinfo.xml index bcb78d72..ee11ff6f 100644 --- a/doc-Service-Telemetry-Framework/docinfo.xml +++ b/doc-Service-Telemetry-Framework/docinfo.xml @@ -1,7 +1,7 @@ Service Telemetry Framework 1.5 Installing and deploying Service Telemetry Framework 1.5 Red Hat OpenStack Platform -17.0 +17.1 0 diff --git a/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_0923_deployment_manually.png b/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_0923_deployment_manually.png new file mode 100644 index 00000000..b71b3e3b Binary files /dev/null and b/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_0923_deployment_manually.png differ diff --git a/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_0923_deployment_prereq.png b/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_0923_deployment_prereq.png new file mode 100644 index 00000000..54d7d301 Binary files /dev/null and b/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_0923_deployment_prereq.png differ diff --git a/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_0923_topology.png b/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_0923_topology.png new file mode 100644 index 00000000..5a8b4fb4 Binary files /dev/null and b/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_0923_topology.png differ diff --git a/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_0923_topology_2.png b/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_0923_topology_2.png new file mode 100644 index 00000000..411d5da3 Binary files /dev/null and b/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_0923_topology_2.png differ diff --git a/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_1223_arch.png b/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_1223_arch.png new file mode 100644 index 00000000..30379002 Binary files /dev/null and b/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_1223_arch.png differ diff --git a/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_1223_arch_2.png b/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_1223_arch_2.png new file mode 100644 index 00000000..146ececf Binary files /dev/null and b/doc-Service-Telemetry-Framework/images/363_OpenStack_STF_updates_1223_arch_2.png differ diff --git a/doc-Service-Telemetry-Framework/master.adoc b/doc-Service-Telemetry-Framework/master.adoc index d4d7c6dc..b0d9d717 100644 --- a/doc-Service-Telemetry-Framework/master.adoc +++ b/doc-Service-Telemetry-Framework/master.adoc @@ -1,7 +1,7 @@ = Service Telemetry Framework 1.5 OpenStack Documentation Team :imagesdir: images -:vernum: 17.0 +:vernum: 17.1 :toc: left :toclevels: 3 :icons: font @@ -32,9 +32,9 @@ include::assemblies/assembly_installing-the-core-components-of-stf.adoc[leveloff include::assemblies/assembly_completing-the-stf-configuration.adoc[leveloffset=+1] ifeval::["{build}" == "downstream"] -ifdef::include_when_16_2[] +ifdef::include_when_16_2,include_when_17_1[] include::assemblies/assembly_completing-the-stf-configuration-using-director-operator.adoc[leveloffset=+1] -endif::include_when_16_2[] +endif::include_when_16_2,include_when_17_1[] endif::[] //advanced features @@ -46,7 +46,7 @@ include::assemblies/assembly_renewing-the-amq-interconnect-certificate.adoc[leve // removing include::assemblies/assembly_removing-stf-from-the-openshift-environment.adoc[leveloffset=+1] -//collectd plugins - // upgrading to 1.5 -include::assemblies/assembly_upgrading-service-telemetry-framework-to-version-1-5.adoc[leveloffset=+1] +// NOTE: this is no longer being rendered because the expectation is to move from STF 1.4 on OCP 4.8 to STF 1.5 on OCP 4.10, both of which are EOL now. +// if this affects you, please open a customer case to help manage the upgrade, or simply perform a greenfield deployment of STF 1.5 on OCP 4.14. +//include::assemblies/assembly_upgrading-service-telemetry-framework-to-version-1-5.adoc[leveloffset=+1] diff --git a/doc-Service-Telemetry-Framework/modules/con_configuring-multiple-clouds.adoc b/doc-Service-Telemetry-Framework/modules/con_configuring-multiple-clouds.adoc index 60c74b70..518c467e 100644 --- a/doc-Service-Telemetry-Framework/modules/con_configuring-multiple-clouds.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_configuring-multiple-clouds.adoc @@ -4,9 +4,14 @@ [role="_abstract"] You can configure multiple {OpenStack} ({OpenStackShort}) clouds to target a single instance of {Project} ({ProjectShort}). When you configure multiple clouds, every cloud must send metrics and events on their own unique message bus topic. In the {ProjectShort} deployment, Smart Gateway instances listen on these topics to save information to the common data store. Data that is stored by the Smart Gateway in the data storage domain is filtered by using the metadata that each of Smart Gateways creates. +[WARNING] +==== +Ensure that you deploy each cloud with a unique cloud domain configuration. For more information about configuring the domain for your cloud deployment, see xref:setting-a-unique-cloud-domain_assembly-completing-the-stf-configuration[]. +==== + [[osp-stf-multiple-clouds]] .Two {OpenStackShort} clouds connect to {ProjectShort} -image::OpenStack_STF_Overview_37_0919_topology.png[An example of two {OpenStackShort} clouds connecting to {ProjectShort}] +image::363_OpenStack_STF_updates_0923_topology_2.png[An example of two {OpenStackShort} clouds connecting to {ProjectShort}] To configure the {OpenStackShort} overcloud for a multiple cloud scenario, complete the following tasks: diff --git a/doc-Service-Telemetry-Framework/modules/con_deploying-stf-to-the-openshift-environment.adoc b/doc-Service-Telemetry-Framework/modules/con_deploying-stf-to-the-openshift-environment.adoc new file mode 100644 index 00000000..3949d54c --- /dev/null +++ b/doc-Service-Telemetry-Framework/modules/con_deploying-stf-to-the-openshift-environment.adoc @@ -0,0 +1,5 @@ +[id="deploying-stf-to-the-openshift-environment_{context}"] += Deploying {Project} to the {OpenShift} environment + +[role="_abstract"] +Deploy {Project} ({ProjectShort}) to collect and store {OpenStack} ({OpenStackShort}) telemetry. diff --git a/doc-Service-Telemetry-Framework/modules/con_development-environment-resource-requirements.adoc b/doc-Service-Telemetry-Framework/modules/con_development-environment-resource-requirements.adoc index 0c2ae8c1..462234a3 100644 --- a/doc-Service-Telemetry-Framework/modules/con_development-environment-resource-requirements.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_development-environment-resource-requirements.adoc @@ -40,14 +40,6 @@ The https://code-ready.github.io/crc/#minimum-system-requirements-hardware_gsg[m .Procedure -. After you complete the installation of CRC, you must enable cluster monitoring in the CRC environment: -+ -[source,bash,options="nowrap"] ----- -$ crc config set enable-cluster-monitoring true -Successfully configured enable-cluster-monitoring to true ----- - . If you have an existing environment, delete it, and recreate it to ensure that the resource requests have an effect. Enter the `crc delete` command: + [source,bash] diff --git a/doc-Service-Telemetry-Framework/modules/con_high-availability.adoc b/doc-Service-Telemetry-Framework/modules/con_high-availability.adoc index 3d92fb8d..3db88d62 100644 --- a/doc-Service-Telemetry-Framework/modules/con_high-availability.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_high-availability.adoc @@ -1,16 +1,16 @@ - [id="high-availability_{context}"] = High availability [role="_abstract"] -With high availability, {Project} ({ProjectShort}) can rapidly recover from failures in its component services. Although {OpenShift} restarts a failed pod if nodes are available to schedule the workload, this recovery process might take more than one minute, during which time events and metrics are lost. A high availability configuration includes multiple copies of {ProjectShort} components, which reduces recovery time to approximately 2 seconds. To protect against failure of an {OpenShift} node, deploy {ProjectShort} to an {OpenShift} cluster with three or more nodes. - [WARNING] -{ProjectShort} is not yet a fully fault tolerant system. Delivery of metrics and events during the recovery period is not guaranteed. +==== +{ProjectShort} high availability (HA) mode is deprecated and is not supported in production environments. {OpenShift} is a highly-available platform, and you can cause issues and complicate debugging in {ProjectShort} if you enable HA mode. +==== + +With high availability, {Project} ({ProjectShort}) can rapidly recover from failures in its component services. Although {OpenShift} restarts a failed pod if nodes are available to schedule the workload, this recovery process might take more than one minute, during which time events and metrics are lost. A high availability configuration includes multiple copies of {ProjectShort} components, which reduces recovery time to approximately 2 seconds. To protect against failure of an {OpenShift} node, deploy {ProjectShort} to an {OpenShift} cluster with three or more nodes. Enabling high availability has the following effects: -* Three Elasticsearch pods run instead of the default one. * The following components run two pods instead of the default one: ** {MessageBus} ** Alertmanager diff --git a/doc-Service-Telemetry-Framework/modules/con_node-tuning-operator.adoc b/doc-Service-Telemetry-Framework/modules/con_node-tuning-operator.adoc deleted file mode 100644 index 186cb232..00000000 --- a/doc-Service-Telemetry-Framework/modules/con_node-tuning-operator.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// - -// This module can be included from assemblies using the following include statement: -// include::/con_node-tuning-operator.adoc[leveloffset=+1] - -// The file name and the ID are based on the module title. For example: -// * file name: con_my-concept-module-a.adoc -// * ID: [id='con_my-concept-module-a_{context}'] -// * Title: = My concept module A -// -// The ID is used as an anchor for linking to the module. Avoid changing -// it after the module has been published to ensure existing links are not -// broken. -// -// The `context` attribute enables module reuse. Every module's ID includes -// {context}, which ensures that the module has a unique ID even if it is -// reused multiple times in a guide. -// -// In the title, include nouns that are used in the body text. This helps -// readers and search engines find information quickly. -// Do not start the title with a verb. See also _Wording of headings_ -// in _The IBM Style Guide_. -[id="node-tuning-operator_{context}"] -= Node tuning operator - -[role="_abstract"] -{ProjectShort} uses Elasticsearch to store events, which requires a larger than normal `vm.max_map_count`. The `vm.max_map_count` value is set by default in {OpenShift}. - -[TIP] -If your host platform is a typical {OpenShift} 4 environment, do not make any adjustments. The default node tuning operator is configured to account for Elasticsearch workloads. - -If you want to edit the value of `vm.max_map_count`, you cannot apply node tuning manually using the `sysctl` command because {OpenShift} manages nodes directly. To configure values and apply them to the infrastructure, you must use the node tuning operator. For more information, see https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/scalability_and_performance/using-node-tuning-operator.html[Using the Node Tuning Operator]. - -In an {OpenShiftShort} deployment, the default node tuning operator specification provides the required profiles for Elasticsearch workloads or pods scheduled on nodes. To view the default cluster node tuning specification, run the following command: - -[source,bash] ----- -$ oc get Tuned/default -o yaml -n openshift-cluster-node-tuning-operator ----- - -The output of the default specification is documented at https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/scalability_and_performance/using-node-tuning-operator.html#custom-tuning-default-profiles-set_node-tuning-operator[Default profiles set on a cluster]. You can manage the assignment of profiles in the `recommend` section where profiles are applied to a node when certain conditions are met. When scheduling Elasticsearch to a node in {ProjectShort}, one of the following profiles is applied: - -* `openshift-control-plane-es` -* `openshift-node-es` - -When scheduling an Elasticsearch pod, there must be a label present that matches `tuned.openshift.io/elasticsearch`. If the label is present, one of the two profiles is assigned to the pod. No action is required by the administrator if you use the recommended Operator for Elasticsearch. If you use a custom-deployed Elasticsearch with {ProjectShort}, ensure that you add the `tuned.openshift.io/elasticsearch` label to all scheduled pods. - -.Additional resources - -* For more information about virtual memory use by Elasticsearch, see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html - -* For more information about how the profiles are applied to nodes, see https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/scalability_and_performance/using-node-tuning-operator.html#custom-tuning-specification_node-tuning-operator[Custom tuning specification]. diff --git a/doc-Service-Telemetry-Framework/modules/con_observability-strategy.adoc b/doc-Service-Telemetry-Framework/modules/con_observability-strategy.adoc index 4d27531e..d11f2dfc 100644 --- a/doc-Service-Telemetry-Framework/modules/con_observability-strategy.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_observability-strategy.adoc @@ -2,6 +2,34 @@ = Observability Strategy in Service Telemetry Framework [role="_abstract"] -{Project} ({ProjectShort}) does not include storage backends and alerting tools. {ProjectShort} uses community operators to deploy Prometheus, Alertmanager, Grafana, and Elasticsearch. {ProjectShort} makes requests to these community operators to create instances of each application configured to work with {ProjectShort}. +{Project} ({ProjectShort}) does not include event storage backends or dashboarding tools. {ProjectShort} can optionally create datasource configurations for Grafana using the community operator to provide a dashboarding interface. Instead of having Service Telemetry Operator create custom resource requests, you can use your own deployments of these applications or other compatible applications, and scrape the metrics Smart Gateways for delivery to your own Prometheus-compatible system for telemetry storage. If you set the `observabilityStrategy` to `none`, then storage backends will not be deployed so persistent storage will not be required by {ProjectShort}. + +Use the observabilityStrategy property on the {ProjectShort} object to specify which type of observability components will be deployed. + +The following values are available: + +[cols="1,1"] +|=== +|value |meaning + +| use_redhat +| Red Hat supported components are requested by {ProjectShort}. This includes Prometheus and Alertmanager from the {ObservabilityOperator}, but no resource requests to Elastic Cloud on Kubernetes (ECK) Operator. If enabled, resources are also requested from the Grafana Operator (community component). + +| use_hybrid +| In addition to the Red Hat supported components, Elasticsearch and Grafana resources are also requested (if specified in the ServiceTelemetry object) + +| use_community +| The community version of Prometheus Operator is used instead of {ObservabilityOperator}. Elasticsearch and Grafana resources are also requested (if specified in the ServiceTelemetry object) + +| none +| No storage or alerting components are deployed +|=== + +[NOTE] +==== +Newly deployed {ProjectShort} environments as of 1.5.3 default to `use_redhat`. Existing {ProjectShort} deployments created before 1.5.3 default to `use_community`. +==== + +To migrate an existing {ProjectShort} deployment to `use_redhat`, see the Red Hat Knowledge Base article link:https://access.redhat.com/articles/7011708[Migrating {Project} to fully supported operators]. diff --git a/doc-Service-Telemetry-Framework/modules/con_persistent-volumes.adoc b/doc-Service-Telemetry-Framework/modules/con_persistent-volumes.adoc index 5cc414ac..09edca24 100644 --- a/doc-Service-Telemetry-Framework/modules/con_persistent-volumes.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_persistent-volumes.adoc @@ -2,7 +2,7 @@ = Persistent volumes [role="_abstract"] -{Project} ({ProjectShort}) uses persistent storage in {OpenShift} to request persistent volumes so that Prometheus and Elasticsearch can store metrics and events. +{Project} ({ProjectShort}) uses persistent storage in {OpenShift} to request persistent volumes so that Prometheus can store metrics. When you enable persistent storage through the Service Telemetry Operator, the Persistent Volume Claims (PVC) requested in an {ProjectShort} deployment results in an access mode of RWO (ReadWriteOnce). If your environment contains pre-provisioned persistent volumes, ensure that volumes of RWO are available in the {OpenShift} default configured `storageClass`. @@ -12,5 +12,3 @@ When you enable persistent storage through the Service Telemetry Operator, the P * For more information about recommended configurable storage technology in {OpenShift}, see https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/scalability_and_performance/optimizing-storage.html#recommended-configurable-storage-technology_persistent-storage[Recommended configurable storage technology]. * For more information about configuring persistent storage for Prometheus in {ProjectShort}, see xref:backends-configuring-persistent-storage-for-prometheus_assembly-installing-the-core-components-of-stf[]. - -* For more information about configuring persistent storage for Elasticsearch in {ProjectShort}, see xref:backends-configuring-persistent-storage-for-elasticsearch_assembly-installing-the-core-components-of-stf[]. diff --git a/doc-Service-Telemetry-Framework/modules/con_primary-parameters-of-the-servicetelemetry-object.adoc b/doc-Service-Telemetry-Framework/modules/con_primary-parameters-of-the-servicetelemetry-object.adoc index e39c65b3..af54dbe7 100644 --- a/doc-Service-Telemetry-Framework/modules/con_primary-parameters-of-the-servicetelemetry-object.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_primary-parameters-of-the-servicetelemetry-object.adoc @@ -2,7 +2,7 @@ = Primary parameters of the ServiceTelemetry object [role="_abstract"] -The `ServiceTelemetry` object comprises the following primary configuration parameters: +You can set the following primary configuration parameters of the `ServiceTelemetry` object to configure your {ProjectShort} deployment: * `alerting` * `backends` @@ -11,16 +11,13 @@ The `ServiceTelemetry` object comprises the following primary configuration para * `highAvailability` * `transports` - -You can configure each of these configuration parameters to provide different features in an {ProjectShort} deployment. - [id="backends_{context}"] [discrete] == The backends parameter -Use the `backends` parameter to control which storage back ends are available for storage of metrics and events, and to control the enablement of Smart Gateways that the `clouds` parameter defines. For more information, see xref:clouds_assembly-installing-the-core-components-of-stf[]. +Set the value of the `backends` parameter to allocate the storage back ends for metrics and events, and to enable the Smart Gateways that the `clouds` parameter defines. For more information, see xref:clouds_assembly-installing-the-core-components-of-stf[]. -You can use Prometheus as the metrics storage back end and Elasticsearch as the events storage back end. You can use the Service Telemetry Operator to create other custom resource objects that the Prometheus Operator and Elastic Cloud on Kubernetes Operator watch to create Prometheus and Elasticsearch workloads. +You can use Prometheus as the metrics storage back end and Elasticsearch as the events storage back end. The Service Telemetry Operator can create custom resource objects that the Prometheus Operator watches to create a Prometheus workload. You need an external deployment of Elasticsearch to store events. [discrete] === Enabling Prometheus as a storage back end for metrics @@ -57,11 +54,11 @@ spec: [discrete] === Configuring persistent storage for Prometheus -Use the additional parameters that are defined in `backends.metrics.prometheus.storage.persistent` to configure persistent storage options for Prometheus, such as storage class and volume size. +Set the additional parameters in `backends.metrics.prometheus.storage.persistent` to configure persistent storage options for Prometheus, such as storage class and volume size. -Use `storageClass` to define the back end storage class. If you do not set this parameter, the Service Telemetry Operator uses the default storage class for the {OpenShift} cluster. +Define the back end storage class with the `storageClass` parameter. If you do not set this parameter, the Service Telemetry Operator uses the default storage class for the {OpenShift} cluster. -Use the `pvcStorageRequest` parameter to define the minimum required volume size to satisfy the storage request. If volumes are statically defined, it is possible that a volume size larger than requested is used. By default, Service Telemetry Operator requests a volume size of `20G` (20 Gigabytes). +Define the minimum required volume size for the storage request with the `pvcStorageRequest` parameter. By default, Service Telemetry Operator requests a volume size of `20G` (20 Gigabytes). .Procedure @@ -83,7 +80,7 @@ standard-csi cinder.csi.openstack.org Delete WaitForFirstCons $ oc edit stf default ---- -. Set the value of the backends.metrics.prometheus.enabled parameter to `true` and the value of backends.metrics.prometheus.storage.strategy to `persistent`: +. Set the value of the `backends.metrics.prometheus.enabled` parameter to `true` and the value of `backends.metrics.prometheus.storage.strategy` to `persistent`: + [source,yaml] ---- @@ -108,7 +105,17 @@ spec: [discrete] === Enabling Elasticsearch as a storage back end for events -To enable Elasticsearch as a storage back end for events, you must configure the `ServiceTelemetry` object. +[NOTE] +==== +Previous versions of {ProjectShort} managed Elasticsearch objects for the community supported Elastic Cloud on Kubernetes Operator (ECK). Elasticsearch management functionality is deprecated in {ProjectShort} 1.5.3. You can still forward to an existing Elasticsearch instance that you deploy and manage with ECK, but you cannot manage the creation of Elasticsearch objects. When you upgrade your {ProjectShort} deployment, existing Elasticsearch objects and deployments remain, but are no longer managed by {ProjectShort}. + +ifeval::["{build}" == "downstream"] +For more information about using Elasticsearch with {ProjectShort}, see the Red Hat Knowledge Base article https://access.redhat.com/articles/7031236[Using Service Telemetry Framework with Elasticsearch]. +endif::[] + +==== + +To enable events forwarding to Elasticsearch as a storage back end, you must configure the `ServiceTelemetry` object. .Procedure @@ -119,7 +126,7 @@ To enable Elasticsearch as a storage back end for events, you must configure the $ oc edit stf default ---- -. Set the value of the backends.events.elasticsearch.enabled parameter to `true`: +. Set the value of the `backends.events.elasticsearch.enabled` parameter to `true` and configure the `hostUrl` with the relevant Elasticsearch instance: + [source,yaml] ---- @@ -134,66 +141,39 @@ spec: events: elasticsearch: enabled: true + forwarding: + hostUrl: https://external-elastic-http.domain:9200 + tlsServerName: "" + tlsSecretName: elasticsearch-es-cert + userSecretName: elasticsearch-es-elastic-user + useBasicAuth: true + useTls: true ---- -[id="backends-configuring-persistent-storage-for-elasticsearch_{context}"] -[discrete] -=== Configuring persistent storage for Elasticsearch - -Use the additional parameters defined in `backends.events.elasticsearch.storage.persistent` to configure persistent storage options for Elasticsearch, such as storage class and volume size. - -Use `storageClass` to define the back end storage class. If you do not set this parameter, the Service Telemetry Operator uses the default storage class for the {OpenShift} cluster. - -Use the `pvcStorageRequest` parameter to define the minimum required volume size to satisfy the storage request. If volumes are statically defined, it is possible that a volume size larger than requested is used. By default, Service Telemetry Operator requests a volume size of `20Gi` (20 Gibibytes). - -.Procedure - -. List the available storage classes: +. Create the secret named in the `userSecretName` parameter to store the basic `auth` credentials + -[source,bash,options="nowrap"] +[source,bash] ---- -$ oc get storageclasses -NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE -csi-manila-ceph manila.csi.openstack.org Delete Immediate false 20h -standard (default) kubernetes.io/cinder Delete WaitForFirstConsumer true 20h -standard-csi cinder.csi.openstack.org Delete WaitForFirstConsumer true 20h +$ oc create secret generic elasticsearch-es-elastic-user --from-literal=elastic='' ---- -. Edit the `ServiceTelemetry` object: +. Copy the CA certificate into a file named `EXTERNAL-ES-CA.pem`, then create the secret named in the `tlsSecretName` parameter to make it available to {ProjectShort} + [source,bash] ---- -$ oc edit stf default ----- +$ cat EXTERNAL-ES-CA.pem +-----BEGIN CERTIFICATE----- +[...] +-----END CERTIFICATE----- -. Set the value of the backends.events.elasticsearch.enabled parameter to `true` and the value of backends.events.elasticsearch.storage.strategy to `persistent`: -+ -[source,yaml] ----- -apiVersion: infra.watch/v1beta1 -kind: ServiceTelemetry -metadata: - name: default - namespace: service-telemetry -spec: - [...] - backends: - events: - elasticsearch: - enabled: true - version: 7.16.1 - storage: - strategy: persistent - persistent: - storageClass: standard-csi - pvcStorageRequest: 50G +$ oc create secret generic elasticsearch-es-cert --from-file=ca.crt=EXTERNAL-ES-CA.pem ---- [id="clouds_{context}"] [discrete] == The clouds parameter -Use the `clouds` parameter to define which Smart Gateway objects deploy, thereby providing the interface for multiple monitored cloud environments to connect to an instance of {ProjectShort}. If a supporting back end is available, then metrics and events Smart Gateways for the default cloud configuration are created. By default, the Service Telemetry Operator creates Smart Gateways for `cloud1`. +Configure the `clouds` parameter to define which Smart Gateway objects deploy and provide the interface for monitored cloud environments to connect to an instance of {ProjectShort}. If a supporting back end is available, metrics and events Smart Gateways for the default cloud configuration are created. By default, the Service Telemetry Operator creates Smart Gateways for `cloud1`. ifndef::include_when_13[] You can create a list of cloud objects to control which Smart Gateways are created for the defined clouds. Each cloud consists of data types and collectors. Data types are `metrics` or `events`. Each data type consists of a list of collectors, the message bus subscription address, and a parameter to enable debugging. Available collectors for metrics are `collectd`, `ceilometer`, and `sensubility`. Available collectors for events are `collectd` and `ceilometer`. Ensure that the subscription address for each of these collectors is unique for every cloud, data type, and collector combination. @@ -254,22 +234,27 @@ You can use the optional Boolean parameter `debugEnabled` within the `collectors [discrete] == The alerting parameter -Use the `alerting` parameter to control creation of an Alertmanager instance and the configuration of the storage back end. By default, `alerting` is enabled. For more information, see xref:alerts_assembly-advanced-features[]. +Set the `alerting` parameter to create an Alertmanager instance and a storage back end. By default, `alerting` is enabled. For more information, see xref:alerts_assembly-advanced-features[]. [id="graphing_{context}"] [discrete] == The graphing parameter -Use the `graphing` parameter to control the creation of a Grafana instance. By default, `graphing` is disabled. For more information, see xref:dashboards_assembly-advanced-features[]. +Set the `graphing` parameter to create a Grafana instance. By default, `graphing` is disabled. For more information, see xref:dashboards_assembly-advanced-features[]. [id="highAvailability_{context}"] [discrete] == The highAvailability parameter -Use the `highAvailability` parameter to control the instantiation of multiple copies of {ProjectShort} components to reduce recovery time of components that fail or are rescheduled. By default, `highAvailability` is disabled. For more information, see xref:high-availability_assembly-advanced-features[]. +[WARNING] +==== +{ProjectShort} high availability (HA) mode is deprecated and is not supported in production environments. {OpenShift} is a highly-available platform, and you can cause issues and complicate debugging in {ProjectShort} if you enable HA mode. +==== + +Set the `highAvailability` parameter to instantiate multiple copies of {ProjectShort} components to reduce recovery time of components that fail or are rescheduled. By default, `highAvailability` is disabled. For more information, see xref:high-availability_assembly-advanced-features[]. [id="transports_{context}"] [discrete] == The transports parameter -Use the `transports` parameter to control the enablement of the message bus for a {ProjectShort} deployment. The only transport currently supported is {MessageBus}. By default, the `qdr` transport is enabled. +Set the `transports` parameter to enable the message bus for a {ProjectShort} deployment. The only transport currently supported is {MessageBus}. By default, the `qdr` transport is enabled. diff --git a/doc-Service-Telemetry-Framework/modules/con_resource-allocation.adoc b/doc-Service-Telemetry-Framework/modules/con_resource-allocation.adoc index 3e7c325c..f9cfc7c8 100644 --- a/doc-Service-Telemetry-Framework/modules/con_resource-allocation.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_resource-allocation.adoc @@ -11,6 +11,4 @@ The amount of resources that you require to run {Project} ({ProjectShort}) depen .Additional resources -* For recommendations about sizing for metrics collection, see https://access.redhat.com/articles/4907241[Service Telemetry Framework Performance and Scaling]. - -* For information about sizing requirements for Elasticsearch, see https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-managing-compute-resources.html. +* For recommendations about sizing for metrics collection, see the Red Hat Knowledge Base article https://access.redhat.com/articles/4907241[Service Telemetry Framework Performance and Scaling]. diff --git a/doc-Service-Telemetry-Framework/modules/con_stf-architecture-changes.adoc b/doc-Service-Telemetry-Framework/modules/con_stf-architecture-changes.adoc new file mode 100644 index 00000000..40201d47 --- /dev/null +++ b/doc-Service-Telemetry-Framework/modules/con_stf-architecture-changes.adoc @@ -0,0 +1,22 @@ +// Module included in the following assemblies: +// +// +:appendix-caption: Appendix +// This module can be included from assemblies using the following include statement: +// include::/con_stf-architecture-changes.adoc[leveloffset=+1] + +[id="stf-architecture-changes_{context}"] += {ProjectShort} Architecture Changes + +In releases of {ProjectShort} prior to 1.5.3, the Service Telemetry Operator requested instances of Elasticsearch from the Elastic Cloud on Kubernetes (ECK) Operator. {ProjectShort} now uses a forwarding model, where events are forwarded from a Smart Gateway instance to a user-provided instance of Elasticsearch. + +[NOTE] +==== +The management of an Elasticsearch instances by Service Telemetry Operator is deprecated. +==== + +In new `ServiceTelemetry` deployments, the `observabilityStrategy` parameter has a value of `use_redhat`, that does not request Elasticsearch instances from ECK. Deployments of `ServiceTelemetry` with {ProjectShort} version 1.5.2 or older and were updated to 1.5.3 will have the `observabilityStrategy` parameter set to `use_community`, which matches the previous architecture. + +If a user previously deployed an Elasticsearch instance with {ProjectShort}, the Service Telemetry Operator updates the `ServiceTelemetry` custom resource object to have the `observabilityStrategy` parameter set to `use_community`, and functions similar to previous releases. For more information about observability strategies, see xref:observability-strategy-in-service-telemetry-framework_assembly-preparing-your-ocp-environment-for-stf[]. + +It is recommended that users of {ProjectShort} migrate to the `use_redhat` observability strategy. For more information about migration to the `use_redhat` observability strategy, see the Red Hat Knowledge Base article link:https://access.redhat.com/articles/7011708[Migrating Service Telemetry Framework to fully supported operators]. diff --git a/doc-Service-Telemetry-Framework/modules/con_stf-architecture.adoc b/doc-Service-Telemetry-Framework/modules/con_stf-architecture.adoc index e232e7a2..c81d282b 100644 --- a/doc-Service-Telemetry-Framework/modules/con_stf-architecture.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_stf-architecture.adoc @@ -11,21 +11,25 @@ [role="_abstract"] {Project} ({ProjectShort}) uses a client-server architecture, in which {OpenStack} ({OpenStackShort}) is the client and {OpenShift} is the server. -{ProjectShort} consists of the following components: +By default, {ProjectShort} collects, transports, and stores metrics information. + +You can collect {OpenStackShort} events data, transport it with the message bus, and forward it to a user-provided Elasticsearch from the Smart Gateways, but this option is deprecated. +// For more information about {ProjectShort} when configured with events, see . Tracked via https://issues.redhat.com/browse/STF-1552 +{ProjectShort} consists of the following components: * Data collection -** collectd: Collects infrastructure metrics and events. -** Ceilometer: Collects {OpenStackShort} metrics and events. +** collectd: Collects infrastructure metrics and events on {OpenStackShort}. +** Ceilometer: Collects {OpenStackShort} metrics and events on {OpenStackShort}. * Transport -** {MessageBus}: An AMQP 1.x compatible messaging bus that provides fast and reliable data transport to transfer the metrics to {ProjectShort} for storage. -** Smart Gateway: A Golang application that takes metrics and events from the AMQP 1.x bus to deliver to Elasticsearch or Prometheus. +** {MessageBus}: An AMQP 1.x compatible messaging bus that provides fast and reliable data transport to transfer the metrics from {OpenStackShort} to {ProjectShort} for storage or forwarding. +** Smart Gateway: A Golang application that takes metrics and events from the AMQP 1.x bus to deliver to Prometheus or an external Elasticsearch. * Data storage ** Prometheus: Time-series data storage that stores {ProjectShort} metrics received from the Smart Gateway. -** Elasticsearch: Events data storage that stores {ProjectShort} events received from the Smart Gateway. -* Observation ** Alertmanager: An alerting tool that uses Prometheus alert rules to manage alerts. +* User provided components ** Grafana: A visualization and analytics application that you can use to query, visualize, and explore data. +** Elasticsearch: Events data storage that stores {OpenStackShort} events received and forwarded by the Smart Gateway. The following table describes the application of the client and server components: @@ -51,6 +55,10 @@ The following table describes the application of the client and server component |no |yes +|Grafana +|no +|yes + |collectd |yes |no @@ -66,11 +74,11 @@ To ensure that the monitoring platform can report operational problems with your [[osp-stf-overview]] .Service Telemetry Framework architecture overview -image::OpenStack_STF_Overview_37_1019_arch.png[Service Telemetry Framework architecture overview] +image::363_OpenStack_STF_updates_1223_arch_2.png[Service Telemetry Framework architecture overview] For client side metrics, collectd provides infrastructure metrics without project data, and Ceilometer provides {OpenStackShort} platform data based on projects or user workload. Both Ceilometer and collectd deliver data to Prometheus by using the {MessageBus} transport, delivering the data through the message bus. On the server side, a Golang application called the Smart Gateway takes the data stream from the bus and exposes it as a local scrape endpoint for Prometheus. -If you plan to collect and store events, collectd and Ceilometer deliver event data to the server side by using the {MessageBus} transport. Another Smart Gateway writes the data to the Elasticsearch datastore. +When you collect and store events, collectd and Ceilometer deliver event data to the server side by using the {MessageBus} transport. Another Smart Gateway forwards the data to a user-provided Elasticsearch datastore. Server-side {ProjectShort} monitoring infrastructure consists of the following layers: @@ -85,4 +93,5 @@ endif::[] [[osp-stf-server-side-monitoring]] .Server-side STF monitoring infrastructure -image::STF_Overview_37_0819_deployment_prereq.png[Server-side STF monitoring infrastructure] +image::363_OpenStack_STF_updates_0923_deployment_prereq.png[Server-side STF monitoring infrastructure] + diff --git a/doc-Service-Telemetry-Framework/modules/con_support-for-stf.adoc b/doc-Service-Telemetry-Framework/modules/con_support-for-stf.adoc index bfb3ebbc..f9bc7c19 100644 --- a/doc-Service-Telemetry-Framework/modules/con_support-for-stf.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_support-for-stf.adoc @@ -2,7 +2,7 @@ = Support for {Project} [role="_abstract"] -Red Hat supports the core Operators and workloads, including {MessageBus}, Service Telemetry Operator, and Smart Gateway Operator. Red Hat does not support the community Operators or workload components, such as Elasticsearch, Prometheus, Alertmanager, Grafana, and their Operators. +Red Hat supports the core Operators and workloads, including {MessageBus}, {ObservabilityOperator} (Prometheus, Alertmanager), Service Telemetry Operator, and Smart Gateway Operator. Red Hat does not support the community Operators or workload components, inclusive of Elasticsearch, Grafana, and their Operators. You can only deploy {ProjectShort} in a fully connected network environment. You cannot deploy {ProjectShort} in {OpenShift}-disconnected environments or network proxy environments. diff --git a/doc-Service-Telemetry-Framework/modules/con_tls-certificates-duration.adoc b/doc-Service-Telemetry-Framework/modules/con_tls-certificates-duration.adoc index a804b4ad..881454ac 100644 --- a/doc-Service-Telemetry-Framework/modules/con_tls-certificates-duration.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_tls-certificates-duration.adoc @@ -2,9 +2,8 @@ = Configuring the duration for the TLS certificates [role="_abstract"] -To configure the duration of the TLS certificates that you use for the connections with -Elasticsearch and {MessageBus} in {Project} ({ProjectShort}), -modify the `ServiceTelemetry` object and configure the `certificates` parameters. +To configure the duration of the TLS certificates that you use for the {MessageBus} connection in {Project} ({ProjectShort}), +modify the `ServiceTelemetry` object and configure the `certificates` parameter. [id="configuration-parameters-for-tls-certificates-duration_{context}"] == Configuration parameters for the TLS certificates @@ -18,30 +17,7 @@ caCertDuration:: The requested 'duration' or lifetime of the CA Certificate. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration. Default value is `70080h`. -NOTE:: The default duration of certificates is long, because you usually copy a subset of them in the {OpenStack} deployment when the certificates renew. For more information about the QDR CA Certificate renewal process, see xref:assembly-renewing-the-amq-interconnect-certificate_assembly[] - -The `certificates` parameter for Elasticsearch is part of the `backends.events.elasticsearch` definition and is configured in the `ServiceTelemetry` object: - -[source,yaml,options="nowrap"] ----- -apiVersion: infra.watch/v1beta1 -kind: ServiceTelemetry -metadata: - name: default - namespace: service-telemetry -spec: -... - backends: - ... - events: - elasticsearch: - enabled: true - version: 7.16.1 - certificates: - endpointCertDuration: 70080h - caCertDuration: 70080h -... ----- +NOTE: The default duration of certificates is long, because you usually copy a subset of them in the {OpenStack} deployment when the certificates renew. For more information about the QDR CA Certificate renewal process, see xref:assembly-renewing-the-amq-interconnect-certificate_assembly[]. You can configure the `certificates` parameter for QDR that is part of the `transports.qdr` definition in the `ServiceTelemetry` object: diff --git a/doc-Service-Telemetry-Framework/modules/proc_checking-for-an-expired-amq-interconnect-ca-certificate.adoc b/doc-Service-Telemetry-Framework/modules/proc_checking-for-an-expired-amq-interconnect-ca-certificate.adoc index f8e9c1ed..2549f72f 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_checking-for-an-expired-amq-interconnect-ca-certificate.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_checking-for-an-expired-amq-interconnect-ca-certificate.adoc @@ -18,7 +18,7 @@ $ oc project service-telemetry + [source,bash,options="nowrap"] ---- -$ oc exec -it $(oc get po -l application=default-interconnect -o jsonpath='{.items[0].metadata.name}') -- qdstat --connections | grep Router | wc +$ oc exec -it deploy/default-interconnect -- qdstat --connections | grep Router | wc 0 0 0 ---- diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-custom-alerts.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-custom-alerts.adoc index e0d1a52c..a53057b5 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-custom-alerts.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-custom-alerts.adoc @@ -32,7 +32,7 @@ You can add custom alerts to the `PrometheusRule` object that you created in xre + [source,bash] ---- -$ oc edit prometheusrules prometheus-alarm-rules +$ oc edit prometheusrules.monitoring.rhobs prometheus-alarm-rules ---- . Edit the `PrometheusRules` manifest. diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-observability-strategy.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-observability-strategy.adoc index 864b681c..e406ae5d 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-observability-strategy.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-observability-strategy.adoc @@ -2,10 +2,7 @@ = Configuring an alternate observability strategy [role="_abstract"] -To configure {ProjectShort} to skip the deployment of storage, visualization, and alerting backends, add `observabilityStrategy: none` to the ServiceTelemetry spec. In this mode, only {MessageBus} routers and metrics Smart Gateways are deployed, and you must configure an external Prometheus-compatible system to collect metrics from the {ProjectShort} Smart Gateways. - -[NOTE] -Currently, only metrics are supported when you set `observabilityStrategy` to `none`. Events Smart Gateways are not deployed. +To skip the deployment of storage, visualization, and alerting backends, add `observabilityStrategy: none` to the ServiceTelemetry spec. In this mode, you only deploy {MessageBus} routers and Smart Gateways, and you must configure an external Prometheus-compatible system to collect metrics from the {ProjectShort} Smart Gateways, and an external Elasticsearch to receive the forwarded events. .Procedure . Create a `ServiceTelemetry` object with the property `observabilityStrategy: none` in the `spec` parameter. The manifest shows results in a default deployment of {ProjectShort} that is suitable for receiving telemetry from a single cloud with all metrics collector types. @@ -23,7 +20,7 @@ spec: EOF ---- + -. Delete the left over objects that are managed by community operators +. Delete the remaining objects that are managed by community operators + [source,bash] ---- @@ -36,7 +33,9 @@ $ for o in alertmanager/default prometheus/default elasticsearch/elasticsearch g ---- $ oc get pods NAME READY STATUS RESTARTS AGE +default-cloud1-ceil-event-smartgateway-6f8547df6c-p2db5 3/3 Running 0 132m default-cloud1-ceil-meter-smartgateway-59c845d65b-gzhcs 3/3 Running 0 132m +default-cloud1-coll-event-smartgateway-bf859f8d77-tzb66 3/3 Running 0 132m default-cloud1-coll-meter-smartgateway-75bbd948b9-d5phm 3/3 Running 0 132m ifndef::include_when_13[] default-cloud1-sens-meter-smartgateway-7fdbb57b6d-dh2g9 3/3 Running 0 132m @@ -49,4 +48,5 @@ smart-gateway-operator-58d77dcf7-6xsq7 1/1 Running 0 .Additional resources -For more information about configuring additional clouds or to change the set of supported collectors, see xref:deploying-smart-gateways_assembly-completing-the-stf-configuration[] +* For more information about configuring additional clouds or to change the set of supported collectors, see xref:deploying-smart-gateways_assembly-completing-the-stf-configuration[]. +* To migrate an existing {ProjectShort} deployment to `use_redhat`, see the Red Hat Knowledge Base article link:https://access.redhat.com/articles/7011708[Migrating {Project} to fully supported operators]. diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf-using-director-operator.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf-using-director-operator.adoc index 69d8ff10..63d52520 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf-using-director-operator.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf-using-director-operator.adoc @@ -10,9 +10,9 @@ When you deploy the {OpenStack} ({OpenStackShort}) overcloud deployment using di .Procedure // NOTE: not required until available for RHOSP 17.1 -//ifdef::include_when_13,include_when_17[] -//. xref:getting-ca-certificate-from-stf-for-overcloud-configuration_assembly-completing-the-stf-configuration[] -//endif::include_when_13,include_when_17[] +ifdef::include_when_13,include_when_17[] +. xref:getting-ca-certificate-from-stf-for-overcloud-configuration_assembly-completing-the-stf-configuration[] +endif::include_when_13,include_when_17[] . xref:retrieving-the-qdr-route-address_assembly-completing-the-stf-configuration[Retrieving the {MessageBus} route address] . xref:creating-the-base-configuration-for-director-operator-for-stf_assembly-completing-the-stf-configuration-using-director-operator[Creating the base configuration for director Operator for {ProjectShort}] @@ -23,7 +23,11 @@ When you deploy the {OpenStack} ({OpenStackShort}) overcloud deployment using di .Additional resources -* For more information about deploying an OpenStack cloud using director Operator, see https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/{vernum}/html/rhosp_director_operator_for_openshift_container_platform/index -ifdef::include_when_16_1[] -* To collect data through {MessageBus}, see https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/{vernum}/html/operational_measurements/collectd-plugins_assembly#collectd_plugin_amqp1[the amqp1 plug-in]. -endif::include_when_16_1[] +ifdef::include_when_16_2[] +* For more information about deploying an OpenStack cloud using director Operator, see https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html/rhosp_director_operator_for_openshift_container_platform/index +* To collect data through {MessageBus}, see https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/16.2/html/operational_measurements/collectd-plugins_assembly#collectd_plugin_amqp1[the amqp1 plug-in]. +endif::[] +ifdef::include_when_17_1[] +* For more information about deploying an OpenStack cloud using director Operator, see https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/17.1/html/deploying_an_overcloud_in_a_red_hat_openshift_container_platform_cluster_with_director_operator/index +* To collect data through {MessageBus}, see https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/17.1/html/managing_overcloud_observability/collectd-plugins_assembly#collectd_plugin_amqp1[the amqp1 plug-in]. +endif::[] diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf.adoc index ceba9322..2b7617c3 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf.adoc @@ -9,6 +9,7 @@ As part of the {OpenStack} ({OpenStackShort}) overcloud deployment using directo ifdef::include_when_13,include_when_17[] . xref:getting-ca-certificate-from-stf-for-overcloud-configuration_assembly-completing-the-stf-configuration[] endif::include_when_13,include_when_17[] +. xref:retrieving-the-qdr-password_assembly-completing-the-stf-configuration[Retrieving the {MessageBus} password] . xref:retrieving-the-qdr-route-address_assembly-completing-the-stf-configuration[Retrieving the {MessageBus} route address] . xref:creating-the-base-configuration-for-stf_assembly-completing-the-stf-configuration[Creating the base configuration for {ProjectShort}] . xref:configuring-the-stf-connection-for-the-overcloud_assembly-completing-the-stf-configuration[Configuring the {ProjectShort} connection for the overcloud] @@ -16,7 +17,11 @@ endif::include_when_13,include_when_17[] . xref:validating-clientside-installation_assembly-completing-the-stf-configuration[Validating client-side installation] .Additional resources +ifdef::include_when_16_2[] * For more information about deploying an OpenStack cloud using director, see link:{defaultURL}/director_installation_and_usage/index[Director Installation and Usage]. -ifdef::include_when_16_1[] * To collect data through {MessageBus}, see link:{defaultURL}/operational_measurements/collectd-plugins_assembly#collectd_plugin_amqp1[the amqp1 plug-in]. -endif::include_when_16_1[] +endif::[] +ifdef::include_when_17_1[] +* For more information about deploying an OpenStack cloud using director, see link:{defaultURL}/installing_and_managing_red_hat_openstack_platform_with_director/index[Installing and managing Red Hat OpenStack Platform with director]. +* To collect data through {MessageBus}, see link:{defaultURL}/managing_overcloud_observability/collectd-plugins_assembly#collectd_plugin_amqp1[the amqp1 plug-in]. +endif::[] diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-snmp-traps.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-snmp-traps.adoc index 1abbff32..2f1d5e45 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-snmp-traps.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-snmp-traps.adoc @@ -52,7 +52,7 @@ For more information about available parameters for `snmpTraps`, see xref:config You can create alerts that are configured for delivery by SNMP traps by adding labels that are parsed by the prometheus-webhook-snmp middleware to define the trap information and delivered object identifiers (OID). Adding the `oid` or `severity` labels is only required if you need to change the default values for a particular alert definition. -NOTE:: When you set the oid label, the top-level SNMP trap OID changes, but the sub-OIDs remain defined by the global `trapOidPrefix` value plus the child OID values `.1.1.1` through `.1.1.9`. For more information about the MIB definition, see xref:overview-of-the-mib-definition_{context}[]. +NOTE: When you set the oid label, the top-level SNMP trap OID changes, but the sub-OIDs remain defined by the global `trapOidPrefix` value plus the child OID values `.1.1.1` through `.1.1.9`. For more information about the MIB definition, see xref:overview-of-the-mib-definition_{context}[]. .Procedure @@ -67,10 +67,10 @@ $ oc project service-telemetry . Create a `PrometheusRule` object that contains the alert rule and an `oid` label that contains the SNMP trap OID override value: + -[source,bash] +[source,yaml] ---- $ oc apply -f - < MetricsQdrSSLProfiles: - name: sslProfile - CeilometerQdrEventsConfig: - driver: amqp - topic: cloud1-event - CeilometerQdrMetricsConfig: driver: amqp topic: cloud1-metering CollectdAmqpInstances: - cloud1-notify: - notify: true - format: JSON - presettle: false cloud1-telemetry: format: JSON presettle: false diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-the-stf-connection-for-the-overcloud.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-the-stf-connection-for-the-overcloud.adoc index 484e7c45..769fb11c 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-the-stf-connection-for-the-overcloud.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-the-stf-connection-for-the-overcloud.adoc @@ -2,13 +2,14 @@ = Configuring the {ProjectShort} connection for the overcloud [role="_abstract"] -To configure the {Project} ({ProjectShort}) connection, you must create a file that contains the connection configuration of the {MessageBus} for the overcloud to the {ProjectShort} deployment. Enable the collection of events and storage of the events in {ProjectShort} and deploy the overcloud. The default configuration is for a single cloud instance with the default message bus topics. For configuration of multiple cloud deployments, see xref:configuring-multiple-clouds_assembly-completing-the-stf-configuration[]. +To configure the {Project} ({ProjectShort}) connection, you must create a file that contains the connection configuration of the {MessageBus} for the overcloud to the {ProjectShort} deployment. Enable the collection of metrics and storage of the metrics in {ProjectShort} and deploy the overcloud. The default configuration is for a single cloud instance with the default message bus topics. For configuration of multiple cloud deployments, see xref:configuring-multiple-clouds_assembly-completing-the-stf-configuration[]. .Prerequisites ifdef::include_when_13,include_when_17[] * Retrieve the CA certificate from the {MessageBus} deployed by {ProjectShort}. For more information, see xref:getting-ca-certificate-from-stf-for-overcloud-configuration_assembly-completing-the-stf-configuration[]. endif::include_when_13,include_when_17[] +* Retrieve the {MessageBus} password. For more information, see xref:retrieving-the-qdr-password_assembly-completing-the-stf-configuration[]. * Retrieve the {MessageBus} route address. For more information, see xref:retrieving-the-qdr-route-address_assembly-completing-the-stf-configuration[]. // The following configuration should match the contents in modules/proc_creating-openstack-environment-file-for-multiple-clouds.adoc. If you have changes to make, please make the same changes to both files. @@ -32,12 +33,17 @@ resource_registry: OS::TripleO::Services::Collectd: /usr/share/openstack-tripleo-heat-templates/deployment/metrics/collectd-container-puppet.yaml parameter_defaults: + ExtraConfig: + qdr::router_id: "%{::hostname}.cloud1" + MetricsQdrConnectors: - host: default-interconnect-5671-service-telemetry.apps.infra.watch port: 443 role: edge verifyHostname: false sslProfile: sslProfile + saslUsername: guest@default-interconnect + saslPassword: pass: MetricsQdrSSLProfiles: - name: sslProfile @@ -48,19 +54,11 @@ ifdef::include_when_13,include_when_17[] -----END CERTIFICATE----- endif::include_when_13,include_when_17[] - CeilometerQdrEventsConfig: - driver: amqp - topic: cloud1-event - CeilometerQdrMetricsConfig: driver: amqp topic: cloud1-metering CollectdAmqpInstances: - cloud1-notify: - notify: true - format: JSON - presettle: false cloud1-telemetry: format: JSON presettle: false @@ -70,15 +68,14 @@ ifndef::include_when_13[] endif::[] ---- +* The `qdr::router_id` configuration is to override the default value which uses the fully-qualified domain name (FQDN) of the host. In some cases the FQDN can result in a router ID length of greater than 61 characters which results in failed QDR connections. For deployments with shorter FQDN values this is not necessary. * The `resource_registry` configuration directly loads the collectd service because you do not include the `collectd-write-qdr.yaml` environment file for multiple cloud deployments. -* Replace the `host` parameter with the value that you retrieved in xref:retrieving-the-qdr-route-address_assembly-completing-the-stf-configuration[]. +* Replace the `host` sub-parameter of `MetricsQdrConnectors` with the value that you retrieved in xref:retrieving-the-qdr-route-address_assembly-completing-the-stf-configuration[]. +* Replace the `` portion of the `saslPassword` sub-parameter of `MetricsQdrConnectors` with the value you retrieved in xref:retrieving-the-qdr-password_assembly-completing-the-stf-configuration[]. ifdef::include_when_13,include_when_17[] * Replace the `caCertFileContent` parameter with the contents retrieved in xref:getting-ca-certificate-from-stf-for-overcloud-configuration_assembly-completing-the-stf-configuration[]. endif::include_when_13,include_when_17[] -* Replace the `host` sub-parameter of `MetricsQdrConnectors` with the value that you retrieved in xref:retrieving-the-qdr-route-address_assembly-completing-the-stf-configuration[]. -* Set `topic` value of `CeilometerQdrEventsConfig` to define the topic for Ceilometer events. The value is a unique topic idenifier for the cloud such as `cloud1-event`. * Set `topic` value of `CeilometerQdrMetricsConfig.topic` to define the topic for Ceilometer metrics. The value is a unique topic identifier for the cloud such as `cloud1-metering`. -* Set `CollectdAmqpInstances` sub-paramter to define the topic for collectd events. The section name is a unique topic identifier for the cloud such as `cloud1-notify`. * Set `CollectdAmqpInstances` sub-parameter to define the topic for collectd metrics. The section name is a unique topic identifier for the cloud such as `cloud1-telemetry`. ifndef::include_when_13[] * Set `CollectdSensubilityResultsChannel` to define the topic for collectd-sensubility events. The value is a unique topic identifier for the cloud such as `sensubility/cloud1-telemetry`. diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-tls-certificates-duration.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-tls-certificates-duration.adoc index 74c48b7d..14c6c2d9 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-tls-certificates-duration.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-tls-certificates-duration.adoc @@ -8,14 +8,13 @@ To configure the duration of the TLS certificates to use with {Project} ({Projec * You didn't deploy an instance of Service Telemetry Operator already. -NOTE:: When you create the `ServiceTelemetry` object, the required certificates and their secrets for {ProjectShort} are also created. -For more information about how to modify the certificates and the secrets, see: xref:assembly-renewing-the-amq-interconnect-certificate_assembly[] +NOTE: When you create the `ServiceTelemetry` object, the required certificates and their secrets for {ProjectShort} are also created. +For more information about how to modify the certificates and the secrets, see: xref:assembly-renewing-the-amq-interconnect-certificate_assembly[]. The following procedure is valid for new {ProjectShort} deployments. .Procedure -To edit the duration of the TLS certificates, you can set the Elasticsearch `endpointCertDuration`, for example `26280h` for 3 years, and set the QDR `caCertDuration`, for example `87600h` for 10 years. -You can use the default value of 8 years for the CA certificate for Elasticsearch and endpoint certificate: +. To edit the duration of the TLS certificate, you can set the QDR `caCertDuration`, for example `87600h` for 10 years: + [source,yaml,options="nowrap",role="white-space-pre"] ---- @@ -26,12 +25,6 @@ metadata: name: default namespace: service-telemetry spec: - backends: - events: - elasticsearch: - enabled: true - certificates: - endpointCertDuration: 26280h transport: qdr: enabled: true @@ -42,13 +35,10 @@ EOF .Verification -. Verify that the expiry date for the certificates is correct: +. Verify that the expiry date for the certificate is correct: + [source,bash,options="nowrap"] ---- -$ oc get secret elasticsearch-es-cert -o jsonpath='{.data.tls\.crt}' | base64 -d | openssl x509 -in - -text | grep "Not After" - Not After : Mar 9 21:00:16 2026 GMT - $ oc get secret default-interconnect-selfsigned -o jsonpath='{.data.tls\.crt}' | base64 -d | openssl x509 -in - -text | grep "Not After" Not After : Mar 9 21:00:16 2033 GMT ----- \ No newline at end of file +---- diff --git a/doc-Service-Telemetry-Framework/modules/proc_creating-a-servicetelemetry-object-in-openshift.adoc b/doc-Service-Telemetry-Framework/modules/proc_creating-a-servicetelemetry-object-in-openshift.adoc index 005a2a20..a9157e89 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_creating-a-servicetelemetry-object-in-openshift.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_creating-a-servicetelemetry-object-in-openshift.adoc @@ -4,141 +4,80 @@ [role="_abstract"] Create a `ServiceTelemetry` object in {OpenShift} to result in the Service Telemetry Operator creating the supporting components for a {Project} ({ProjectShort}) deployment. For more information, see xref:primary-parameters-of-the-servicetelemetry-object_assembly-installing-the-core-components-of-stf[]. +.Prerequisites + +* You have deployed {ProjectShort} and the supporting operators. For more information, see xref:deploying-stf-to-the-openshift-environment_assembly-installing-the-core-components-of-stf[]. +* You have installed {ObservabilityOperator} to allow storage of metrics. For more information, see xref:deploying-observability-operator_assembly-installing-the-core-components-of-stf[]. +* You have installed cert-manager for Red Hat OpenShift to allow certificate management. For more information, see xref:deploying-certificate-manager-for-openshift-operator_assembly-installing-the-core-components-of-stf[]. + .Procedure -. To create a `ServiceTelemetry` object that results in an {ProjectShort} deployment that uses the default values, create a `ServiceTelemetry` object with an empty `spec` parameter: +. Log in to your {OpenShift} environment where {ProjectShort} is hosted. + +. To deploy {ProjectShort} that results in the core components for metrics delivery being configured, create a `ServiceTelemetry` object: + [source,yaml,options="nowrap",role="white-space-pre"] ---- $ oc apply -f - < + +// This module can be included from assemblies using the following include statement: +// include::/proc_removing-the-cert-manager-operator.adoc[leveloffset=+1] + +// The file name and the ID are based on the module title. For example: +// * file name: proc_doing-procedure-a.adoc +// * ID: [id='proc_doing-procedure-a_{context}'] +// * Title: = Doing procedure A +// +// The ID is used as an anchor for linking to the module. Avoid changing +// it after the module has been published to ensure existing links are not +// broken. +// +// The `context` attribute enables module reuse. Every module's ID includes +// {context}, which ensures that the module has a unique ID even if it is +// reused multiple times in a guide. +// +// Start the title with a verb, such as Creating or Create. See also +// _Wording of headings_ in _The IBM Style Guide_. + +[id="removing-the-cert-manager-operator_{context}"] += Removing the cert-manager Operator for Red Hat OpenShift + +[role="_abstract"] +If you are not using the cert-manager Operator for Red Hat OpenShift for any other applications, delete the Subscription, ClusterServiceVersion, and CustomResourceDefinitions. + +For more information about removing the cert-manager for Red Hat OpenShift Operator, see link:https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/security/cert_manager_operator/cert-manager-operator-uninstall.html[Removing cert-manager Operator for Red Hat OpenShift] in the _OpenShift Container Platform Documentation_. + +.Additional resources + +* link:https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/operators/admin/olm-deleting-operators-from-cluster.html[Deleting Operators from a cluster]. diff --git a/doc-Service-Telemetry-Framework/modules/ref_removing-the-observability-operator.adoc b/doc-Service-Telemetry-Framework/modules/ref_removing-the-observability-operator.adoc new file mode 100644 index 00000000..aac76fef --- /dev/null +++ b/doc-Service-Telemetry-Framework/modules/ref_removing-the-observability-operator.adoc @@ -0,0 +1,34 @@ +// Module included in the following assemblies: +// +// + +// This module can be included from assemblies using the following include statement: +// include::/proc_removing-the-cert-manager-operator.adoc[leveloffset=+1] + +// The file name and the ID are based on the module title. For example: +// * file name: proc_doing-procedure-a.adoc +// * ID: [id='proc_doing-procedure-a_{context}'] +// * Title: = Doing procedure A +// +// The ID is used as an anchor for linking to the module. Avoid changing +// it after the module has been published to ensure existing links are not +// broken. +// +// The `context` attribute enables module reuse. Every module's ID includes +// {context}, which ensures that the module has a unique ID even if it is +// reused multiple times in a guide. +// +// Start the title with a verb, such as Creating or Create. See also +// _Wording of headings_ in _The IBM Style Guide_. + +[id="removing-the-observability-operator_{context}"] += Removing the {ObservabilityOperator} + +[role="_abstract"] +If you are not using the {ObservabilityOperator} for any other applications, delete the Subscription, ClusterServiceVersion, and CustomResourceDefinitions. + +For more information about removing the {ObservabilityOperator}, see link:https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/monitoring/cluster_observability_operator/installing-the-cluster-observability-operator.html#uninstalling-the-cluster-observability-operator-using-the-web-console_installing_the_cluster_observability_operator[Uninstalling the Cluster Observability Operator using the web console] in the _OpenShift Container Platform Documentation_. + +.Additional resources + +* link:https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/operators/admin/olm-deleting-operators-from-cluster.html[Deleting Operators from a cluster].