diff --git a/common/global/rhosp_attributes.adoc b/common/global/rhosp_attributes.adoc index 645a5204..a463d3df 100644 --- a/common/global/rhosp_attributes.adoc +++ b/common/global/rhosp_attributes.adoc @@ -1,382 +1,33 @@ // rhosp_attributes.adoc -// Use this file to define OSP related acronyms, abbreviations, and terms. // Add this include statement in your master.adoc file: include::common/global/rhosp_attributes.adoc[] // Run this command to create a sym link in your doc folder: $ ln -s ../common // Enclose the attribute in {} brackets in your modules. // Example: Use {osp_long} to display "OpenStack Platform". -// First usage example: {osp_long} ({osp_acro}). -// See also the Red Hat Dictionary: https://mojo.redhat.com/groups/red-hat-dictionary-or-lexicon -// See also: https://docs.google.com/spreadsheets/d/1DLS_lS3VKidgZIvcLmLp9BoiqptkvqHWfe1D5FD2kfk/edit#gid=1987148185 -// RHEL -:rhel_long: Red Hat Enterprise Linux -:rhel_short: RHEL +// RHEL version attributes :rhel_prev_ver: 8.4 -:rhel_curr_ver: 9.0 +:rhel_curr_ver: 9.2 -//The {rhelvernum} attribute is currently in use in several deployment docs -:rhelvernum: 9.0 +// The {rhelvernum} attribute is currently in use in several deployment docs +:rhelvernum: 9.2 + +// OSP version attributes -// OSP :osp_long: Red Hat OpenStack Platform :osp_acro: RHOSP -:osp_upstream: OpenStack -:osp_cmd: openstack -:osp_curr_ver: 17.0 -:osp_curr_ver_no_beta: 17.0 +:osp_curr_ver: 17.1-Beta +:osp_curr_ver_no_beta: 17.1 :osp_z_stream: 0 -// Ceph -:CephVernum: 5.2 - -:ooo_long: OpenStack-On-OpenStack -:ooo_short: TripleO -:ooo_cmd: tripleo - -// REPOSITORY -:defaultURL: https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/{osp_curr_ver}/html -:defaultCephURL: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{CephVernum}/html - -// OSP Components -:horizon_long: {osp_acro} Dashboard -:horizon_short: horizon - -:keystone_long: Identity -:keystone_short: keystone - -:nova_long: Compute -:nova_short: nova {osp_acro} - -:neutron_long: OpenStack Networking -:neutron_short: neutron - -:glance_long: Image Service -:glance_short: glance - -:cinder_long: Block Storage -:cinder_short: cinder - -:swift:long: Object Storage -:swift_short: swift - -:heat_long: Orchestration -:heat_short: heat - -:gnocchi_long: Telemetry Metrics -:gnocchi_short: gnocchi - -:aodh_long: Telemetry Alarming -:aodh_short: aodh +// Ceph version attributes -:panko_long: Telemetry Event Storage -:panko_short: panko +:CephVernum: 6.1 -:sahara_long: Clustering -:sahara_short: sahara +// Common URLs. Do not override. Do not delete. -:manila_long: Shared File Systems -:manila:short: manila - -:ironic_long: Bare Metal -:ironic_short: ironic - -:hci_long: Hyper Converged Infrastructure -:hci_short: HCI - -:rear_long: Relax-and-Recover -:rear_short: ReaR - -// OSP Versions (hard) -:v10: 10 -:v12: 12 -:v13: 13 -:v14: 14 -:v15: 15 -:v16: 16 -:v17: 17 - - -// OSP External URIs and URLs -// Add OSP external links here. - -//The base URL, language, and format for all doc URLs. -//Do not override these settings in this file. Override them in the master.adoc -//file of your project if you need to use a different language or format. :base_url: https://access.redhat.com/documentation -:lang_uri: en-us -:format_uri: html-single - -//OSP Documentation URI variables. -//Attribute references with "-t" include descriptive labels. Use these if you don't -//plan on adding an anchor to the reference in your documentation. -:osp_uri: red_hat_openstack_platform - -//Use the following attributes for the current release. -:osp_base_url: {base_url}/{lang_uri}/{osp_uri}/{osp_curr_ver}/{format_uri} -:osp_base_url-t: {base_url}/{lang_uri}/{osp_uri}/{osp_curr_ver}/{format_uri}[Product Documentation for Red Hat OpenStack Platform {osp_curr_ver}] - -//Use the following attributes for the previous release. -:osp_base_url_13: {base_url}/{lang_uri}/{osp_uri}/{v13}/{format_uri} -:osp_base_url-t_13: {base_url}/{lang_uri}/{osp_uri}/{v13}/{format_uri}[Product Documentation for Red Hat OpenStack Platform {v13}] - - -// OSP Product Guides -// Use these attributes to reference OSP product guides. - -//Previous releases. -:osp_upgrading-t_13: {osp_base_url_13}/upgrading_red_hat_openstack_platform[Upgrading Red Hat OpenStack Platform] -:osp__fast_forward_upgrades-t_13: {osp_base_url_13}/fast_forward_upgrades[Fast Forward Upgrades] - - -//Current release. -:osp_release_notes: {osp_base_url}/release_notes -:osp_release_notes-t: {osp_base_url}/release_notes[Release Notes] - -:osp_product_guide: {osp_base_url}/product_guide -:osp_product_guide-t: {osp_base_url}/product_guide[Product Guide] - -:osp_package_manifest: {osp_base_url}/package_manifest -:osp_package_manifest-t: {osp_base_url}/package_manifest[Package Manifest] - -:osp_platform_high_availability: {osp_base_url}/understanding_red_hat_openstack_platform_high_availability -:osp_platform_high_availability-t: {osp_base_url}/understanding_red_hat_openstack_platform_high_availability[Understanding Red Hat OpenStack Platform High Availability] - -:osp_partner_integration: {osp_base_url}/partner_integration -:osp_partner_integration-t: {osp_base_url}/partner_integration[Partner Integration] - -:osp_security_and_hardening: {osp_base_url}/security_and_hardening_guide -:osp_security_and_hardening-t: {osp_base_url}/security_and_hardening_guide[Security and Hardening Guide] - -:osp_deployment_recommendations: {osp_base_url}/deployment_recommendations_for_specific_red_hat_openstack_platform_services -:osp_deployment_recommendations-t: {osp_base_url}/deployment_recommendations_for_specific_red_hat_openstack_platform_services[Deployment Recommendations for Specific Red Hat OpenStack Platform Services] - -:osp_director: {osp_base_url}/director_installation_and_usage -:osp_director-t: {osp_base_url}/director_installation_and_usage[Director Installation and Usage] - -:osp_keeping_red_hat_openstack_platform_updated: {osp_base_url}/keeping_red_hat_openstack_platform_updated -:osp_keeping_red_hat_openstack_platform_updated-t: {osp_base_url}/keeping_red_hat_openstack_platform_updated[Keeping Red Hat OpenStack Platform Updated] - -:osp_ipv6_networking_for_the_overcloud: {osp_base_url}/ipv6_networking_for_the_overcloud -:osp_ipv6_networking_for_the_overcloud-t: {osp_base_url}/ipv6_networking_for_the_overcloud[IPv6 Networking for the Overcloud] - -:osp_firewall_rules: {osp_base_url}/firewall_rules_for_red_hat_openstack_platform -:osp_firewall_rules-t: {osp_base_url}/firewall_rules_for_red_hat_openstack_platform[Firewall Rules for Red Hat OpenStack Platform] - -:osp_quick_start_guide: {osp_base_url}/quick_start_guide -:osp_quick_start_guide-t: {osp_base_url}/quick_start_guide[Quick Start Guide] - -:osp_hyper-converged_infrastructure: {osp_base_url}/hyper-converged_infrastructure_guide -:osp_hyper-converged_infrastructure-t: {osp_base_url}/hyper-converged_infrastructure_guide[Hyper-Converged Infrastructure Guide] - -:osp_auto_scaling: {osp_base_url}/auto_scaling_for_instances -:osp_auto_scaling-t: {osp_base_url}/auto_scaling_for_instances[Auto Scaling for Instances] - -:osp_creating_and_managing_instances: {osp_base_url}/creating_and_managing_instances -:osp_creating_and_managing_instances-t: {osp_base_url}/creating_and_managing_instances[Creating and Managing Instances] - -:osp_creating_and_managing_images: {osp_base_url}/creating_and_managing_images -:osp_creating_and_managing_images-t: {osp_base_url}/creating_and_managing_images[Creating and Managing Images] - -:osp_openstack_data_processing: {osp_base_url}/openstack_data_processing -:osp_openstack_data_processing-t: {osp_base_url}/openstack_data_processing[OpenStack Data Processing] - -:osp_bare_metal_provisioning: {osp_base_url}/bare_metal_provisioning -:osp_bare_metal_provisioning-t: {osp_base_url}/bare_metal_provisioning[Bare Metal Provisioning] - -:osp_high_availability_for_compute_instances: {osp_base_url}/high_availability_for_compute_instances -:osp_high_availability_for_compute_instances-t: {osp_base_url}/high_availability_for_compute_instances[High Availability for Compute Instances] - -:osp_transitioning_to_containerized_services: {osp_base_url}/transitioning_to_containerized_services -:osp_transitioning_to_containerized_services-t: {osp_base_url}/transitioning_to_containerized_services[Transitioning to Containerized Services] - -:osp_overcloud_parameters: {osp_base_url}/overcloud_parameters -:osp_overcloud_parameters-t: {osp_base_url}/overcloud_parameters[Overcloud Parameters] - -:osp_openstack_dashboard: {osp_base_url}/introduction_to_the_openstack_dashboard -:osp_openstack_dashboard-t: {osp_base_url}/introduction_to_the_openstack_dashboard[Introduction to the OpenStack Dashboard] - -:osp_key_manager: {osp_base_url}/manage_secrets_with_openstack_key_manager -:osp_key_manager-t: {osp_base_url}/manage_secrets_with_openstack_key_manager[Manage Secrets with OpenStack Key Manager] - -:osp_logging_monitoring_and_troubleshooting: {osp_base_url}/logging_monitoring_and_troubleshooting_guide -:osp_logging_monitoring_and_troubleshooting-t: {osp_base_url}/logging_monitoring_and_troubleshooting_guide[Logging, Monitoring, and Troubleshooting Guide] - -:osp_monitoring_tools_configuration: {osp_base_url}/monitoring_tools_configuration_guide -:osp_monitoring_tools_configuration-t: {osp_base_url}/monitoring_tools_configuration_guide[Monitoring Tools Configuration Guide] - -:osp_users_and_identity_management: {osp_base_url}/users_and_identity_management_guide -:osp_users_and_identity_management-t: {osp_base_url}/users_and_identity_management_guide[Users and Identity Management Guide] - -:osp_integrate_with_identity_service: {osp_base_url}/integrate_with_identity_service -:osp_integrate_with_identity_service-t: {osp_base_url}/integrate_with_identity_service[Integrate with Identity Service] - -:osp_federate_with_identity_service: {osp_base_url}/federate_with_identity_service -:osp_federate_with_identity_service-t: {osp_base_url}/federate_with_identity_service[Federate with Identity Service] - -:osp_deploy_fernet_on_the_overcloud: {osp_base_url}/deploy_fernet_on_the_overcloud -:osp_deploy_fernet_on_the_overcloud-t: {osp_base_url}/deploy_fernet_on_the_overcloud[Deploy Fernet on the Overcloud] - -:osp_openstack_integration_test_suite: {osp_base_url}/openstack_integration_test_suite_guide -:osp_openstack_integration_test_suite-t: {osp_base_url}/openstack_integration_test_suite_guide[OpenStack Integration Test Suite Guide] - -:osp_nfv_product_guide: {osp_base_url}/network_functions_virtualization_product_guide -:osp_nfv_product_guide-t: {osp_base_url}/network_functions_virtualization_product_guide[Network Functions Virtualization Product Guide] - -:osp_nfv_planning_and_configuration: {osp_base_url}/network_functions_virtualization_planning_and_configuration_guide -:osp_nfv_planning_and_configuration-t: {osp_base_url}/network_functions_virtualization_planning_and_configuration_guide[Network Functions Virtualization Planning and Configuration Guide] - -:osp_networking_guide: {osp_base_url}/networking_guide -:osp_networking_guide-t: {osp_base_url}/networking_guide[Networking Guide] - -:osp_spine_leaf_networking: {osp_base_url}/spine_leaf_networking -:osp_spine_leaf_networking-t: {osp_base_url}/spine_leaf_networking[Spine Leaf Networking] - -:osp_migrating_to_the_ml2_ovn_mechanism_driver: {osp_base_url}/migrating_to_the_ml2_ovn_mechanism_driver -:osp_networking_with_ovn-t: {osp_base_url}/migrating_to_the_ml2_ovn_mechanism_driver[Migrating to the ML2 OVN mechanism driver] - -:osp_networking_with_ovn: {osp_base_url}/networking_with_open_virtual_network -:osp_networking_with_ovn-t: {osp_base_url}/networking_with_open_virtual_network[Networking with Open Virtual Network] - -:osp_external_load_balancing: {osp_base_url}/external_load_balancing_for_the_overcloud -:osp_external_load_balancing-t: {osp_base_url}/external_load_balancing_for_the_overcloud[External Load Balancing for the Overcloud] - -:osp_storage_guide: {osp_base_url}/storage_guide -:osp_storage_guide-t: {osp_base_url}/storage_guide[Storage Guide] - -:osp_integrating_an_overcloud: {osp_base_url}/integrating_an_overcloud_with_an_existing_red_hat_ceph_cluster -:osp_integrating_an_overcloud-t: {osp_base_url}/integrating_an_overcloud_with_an_existing_red_hat_ceph_cluster[Integrating an Overcloud with an Existing Red Hat Ceph Cluster] - -:osp_deploying_an_overcloud: {osp_base_url}/deploying_an_overcloud_with_containerized_red_hat_ceph -:osp_deploying_an_overcloud-t: {osp_base_url}/deploying_an_overcloud_with_containerized_red_hat_ceph[Deploying an Overcloud with Containerized Red Hat Ceph] - -//// -:osp_cephfs_via_nfs_back_end_guide: {osp_base_url}/cephfs_via_nfs_back_end_guide_for_the_shared_file_system_service -:osp_cephfs_via_nfs_back_end_guide-t: {osp_base_url}/cephfs_via_nfs_back_end_guide_for_the_shared_file_system_service[CephFS via NFS Back End Guide for the Shared File Systems service] -//// - -:osp_cephfs_back_end_guide: {osp_base_url}/cephfs_back_end_guide_for_the_shared_file_system_service -:osp_cephfs_back_end_guide-t: {osp_base_url}/cephfs_back_end_guide_for_the_shared_file_system_service[CephFS Back End Guide for the Shared File Systems service] - -:osp_netapp_back_end_guide: {osp_base_url}/netapp_back_end_guide_for_the_shared_file_system_service -:osp_netapp_back_end_guide-t: {osp_base_url}/netapp_back_end_guide_for_the_shared_file_system_service[NetApp Back End Guide for the Shared File Systems service] - -:osp_google_cloud_backup_guide: {osp_base_url}/google_cloud_backup_guide -:osp_google_cloud_backup_guide-t: {osp_base_url}/google_cloud_backup_guide[Google Cloud Backup Guide] - -:osp_custom_block_storage_back_end_deployment_guide/: {osp_base_url}/custom_block_storage_back_end_deployment_guide/ -:osp_custom_block_storage_back_end_deployment_guide/-t: {osp_base_url}/custom_block_storage_back_end_deployment_guide/[Custom Block Storage Back End Deployment Guide] - -:osp_dell_emc_ps_series_back_end_guide: {osp_base_url}/dell_emc_ps_series_back_end_guide -:osp_dell_emc_ps_series_back_end_guide-t: {osp_base_url}/dell_emc_ps_series_back_end_guide[Dell EMC PS Series Back End Guide] - -:osp_block_storage_backup_guide: {osp_base_url}/block_storage_backup_guide -:osp_block_storage_backup_guide-t: {osp_base_url}/block_storage_backup_guide[Block Storage Backup Guide] - -:osp_dell_storage_center_back_end_guide: {osp_base_url}/dell_storage_center_back_end_guide -:osp_dell_storage_center_back_end_guide-t: {osp_base_url}/dell_storage_center_back_end_guide[Dell Storage Center Back End Guide] - -:osp_fujitsu_eternus_back_end_guide: {osp_base_url}/fujitsu_eternus_back_end_guide -:osp_fujitsu_eternus_back_end_guide-t: {osp_base_url}/fujitsu_eternus_back_end_guide[Fujitsu ETERNUS Back End Guide] - -:osp_netapp_block_storage_back_end_guide: {osp_base_url}/netapp_block_storage_back_end_guide -:osp_netapp_block_storage_back_end_guide-t: {osp_base_url}/netapp_block_storage_back_end_guide[NetApp Block Storage Back End Guide] - - -// Glossary Links -// In progress. These links will be defined as the glossary is built. -// :horizon_gloss: [[glossary-DMA]] DMA:: Direct Memory Access - - - -// Network Functions Virtualization -:nfv_long: Network Functions Virtualization -:nfv_acro: NFV -:nfv_ver: - -:vnf_long: Virtualized Network Functions -:vnf_acro: VNF - -:ovs_long: Open vSwitch -:ovs_short: OVS -:dpdk_long: data plane development kit -:dpdk_short: DPDK -:ovsdpdk: OVS-DPDK - -:sriov_long: single root I/O virtualization -:sriov_short: SR-IOV - -:csp_long: Communication Service Providers -:csp_acro: CSPs - -:rhhiv_long: Red Hat Hyperconverged Infrastructure for Virtualization -:rhhiv_short: RHHI-V - -:vdo_long: Virtual Data Optimizer -:vdo_acro: VDO - - -// Ceph -:ceph_long: Ceph Storage -:ceph_short: Ceph -:ceph_ver: 5.2 - -// Ceph Portal Links -:url_ceph_architecture_guide: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/5/html/architecture_guide/[] -:url_ceph_selection_guide: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/5/html/hardware_guide/index[] - -:cephog_long: Ceph Object Gateway - -:cephfs_long: Ceph File System -:cephfs_short: CephFS - -:mds_long: Ceph Metadata Servers -:mds_short: MDS - - -// Common Terms -:acl_long: Access Control Lists -:acl_acro: ACL - - -// CloudOps - - - -// Networking - - - - -// Telco -:telco_short: Telco - - - - -// Storage - - - - -// OpenShift -:rhos_long: Red Hat OpenShift -:rhos_upstream: OpenShift - -// OpenShift Container Platform -:rhoscp_long: Red Hat OpenShift Container Platform -:rhoscp_short: OpenShift Container Platform -:rhoscp_acro: RHOCP - -// OpenShift Dedicated -:rhosd_long: Red Hat OpenShift Dedicated -:rhosd_short: OpenShift Dedicated +:defaultURL: https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/{osp_curr_ver}/html +:defaultCephURL: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{CephVernum}/html -// OpenShift Container Engine -:rhoce_long: Red Hat OpenShift Container Engine -:rhoce_short: OpenShift Container Engine +:setup-tlse: {defaultURL}/hardening_red_hat_openstack_platform/assembly_securing-rhos-with-tls-and-pki_security_and_hardening#proc_implementing-tls-e-with-ansible_encryption-and-key-management[Implementing TLS-e with Ansible] -// Service Assurance Framework -:saf_long: Service Assurance Framework -:saf_acro: SAF -:saf_prev_ver: -:saf_curr_ver: 1.0 diff --git a/doc-Service-Telemetry-Framework/Makefile b/doc-Service-Telemetry-Framework/Makefile index 1c7c8acf..45501d12 100644 --- a/doc-Service-Telemetry-Framework/Makefile +++ b/doc-Service-Telemetry-Framework/Makefile @@ -4,6 +4,7 @@ ROOTDIR = $(realpath .) NAME = $(notdir $(ROOTDIR)) DEST_DIR = $(BUILD_DIR)/$(NAME) DEST_HTML = $(DEST_DIR)/index-$(BUILD).html +DEST_HTML_171 = $(DEST_DIR)/index-$(BUILD)-171.html DEST_HTML_170 = $(DEST_DIR)/index-$(BUILD)-170.html DEST_HTML_162 = $(DEST_DIR)/index-$(BUILD)-162.html DEST_HTML_13 = $(DEST_DIR)/index-$(BUILD)-13.html @@ -23,10 +24,12 @@ endif all: html -html: html-latest html170 html162 html13 +html: html-latest html171 html170 html162 html13 html-latest: prepare $(IMAGES_TS) $(DEST_HTML) +html171: prepare $(IMAGES_TS) $(DEST_HTML_171) + html170: prepare $(IMAGES_TS) $(DEST_HTML_170) html162: prepare $(IMAGES_TS) $(DEST_HTML_162) @@ -53,7 +56,10 @@ $(IMAGES_TS): $(IMAGES) touch $(IMAGES_TS) $(DEST_HTML): $(SOURCES) - asciidoctor -a source-highlighter=highlightjs -a highlightjs-languages="yaml,bash" -a highlightjs-theme="monokai" --failure-level WARN -a build=$(BUILD) -a vernum=17.0 -b xhtml5 -d book -o $@ $< + asciidoctor -a source-highlighter=highlightjs -a highlightjs-languages="yaml,bash" -a highlightjs-theme="monokai" --failure-level WARN -a build=$(BUILD) -a vernum=17.1 -b xhtml5 -d book -o $@ $< + +$(DEST_HTML_171): $(SOURCES) + asciidoctor -a source-highlighter=highlightjs -a highlightjs-languages="yaml,bash" -a highlightjs-theme="monokai" --failure-level WARN -a build=$(BUILD) -a vernum=17.1 -b xhtml5 -d book -o $@ $< $(DEST_HTML_170): $(SOURCES) asciidoctor -a source-highlighter=highlightjs -a highlightjs-languages="yaml,bash" -a highlightjs-theme="monokai" --failure-level WARN -a build=$(BUILD) -a vernum=17.0 -b xhtml5 -d book -o $@ $< diff --git a/doc-Service-Telemetry-Framework/assemblies/assembly_advanced-features.adoc b/doc-Service-Telemetry-Framework/assemblies/assembly_advanced-features.adoc index cb00819c..13100ea2 100644 --- a/doc-Service-Telemetry-Framework/assemblies/assembly_advanced-features.adoc +++ b/doc-Service-Telemetry-Framework/assemblies/assembly_advanced-features.adoc @@ -11,7 +11,6 @@ You can use the following operational features to provide additional functionali * xref:alerts_assembly-advanced-features[Configuring alerts] * xref:configuring-snmp-traps_assembly-advanced-features[Configuring SNMP traps] * xref:high-availability_assembly-advanced-features[Configuring high availability] -* xref:ephemeral-storage_assembly-advanced-features[Configuring ephemeral storage] * xref:observability-strategy-in-service-telemetry-framework_assembly-advanced-features[Configuring an alternate observability strategy] ifdef::include_when_16[] @@ -56,12 +55,6 @@ endif::include_when_13,include_when_17[] include::../modules/con_high-availability.adoc[leveloffset=+1] include::../modules/proc_configuring-high-availability.adoc[leveloffset=+2] -//Configuring ephemeral storage -include::../modules/con_ephemeral-storage.adoc[leveloffset=+1] -ifeval::["{build}" == "upstream"] -include::../modules/proc_configuring-ephemeral-storage.adoc[leveloffset=+2] -endif::[] - //Observability strategy include::../modules/con_observability-strategy.adoc[leveloffset=+1] include::../modules/proc_configuring-observability-strategy.adoc[leveloffset=+2] diff --git a/doc-Service-Telemetry-Framework/assemblies/assembly_installing-the-core-components-of-stf.adoc b/doc-Service-Telemetry-Framework/assemblies/assembly_installing-the-core-components-of-stf.adoc index 0413d78e..57d84e02 100644 --- a/doc-Service-Telemetry-Framework/assemblies/assembly_installing-the-core-components-of-stf.adoc +++ b/doc-Service-Telemetry-Framework/assemblies/assembly_installing-the-core-components-of-stf.adoc @@ -12,7 +12,6 @@ You can use Operators to load the {Project} ({ProjectShort}) components and obje * {MessageBus} * Smart Gateway * Prometheus and AlertManager -* Elasticsearch * Grafana .Prerequisites @@ -39,6 +38,7 @@ endif::[] .Additional resources * For more information about Operators, see the https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/operators/understanding/olm-what-operators-are.html[_Understanding Operators_] guide. +* For more information about Operator catalogs, see https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/operators/understanding/olm-rh-catalogs.html[_Red Hat-provided Operator catalogs_]. //* For more information about how to remove {ProjectShort} from the {OpenShift} environment, see xref:assembly-removing-stf-from-the-openshift-environment_{}[]. include::../modules/proc_deploying-stf-to-the-openshift-environment.adoc[leveloffset=+1] diff --git a/doc-Service-Telemetry-Framework/assemblies/assembly_preparing-your-ocp-environment-for-stf.adoc b/doc-Service-Telemetry-Framework/assemblies/assembly_preparing-your-ocp-environment-for-stf.adoc index 6fef49f7..ea2ef706 100644 --- a/doc-Service-Telemetry-Framework/assemblies/assembly_preparing-your-ocp-environment-for-stf.adoc +++ b/doc-Service-Telemetry-Framework/assemblies/assembly_preparing-your-ocp-environment-for-stf.adoc @@ -11,20 +11,12 @@ To prepare your {OpenShift} environment for {Project} ({ProjectShort}), you must * Ensure that you have persistent storage available in your {OpenShift} cluster for a production-grade deployment. For more information, see <>. * Ensure that enough resources are available to run the Operators and the application containers. For more information, see <>. * Ensure that you have a fully connected network environment. For more information, see xref:con-network-considerations-for-service-telemetry-framework_assembly-preparing-your-ocp-environment-for-stf[]. -ifeval::["{build}" == "upstream"] -* {ProjectShort} uses Elasticsearch to store events, which requires a larger than normal `vm.max_map_count` value. The `vm.max_map_count` value is set by default in {OpenShift}. For more information about how to edit the value of `vm.max_map_count`, see <>. -endif::[] include::../modules/con_observability-strategy.adoc[leveloffset=+1] include::../modules/con_persistent-volumes.adoc[leveloffset=+1] -include::../modules/con_ephemeral-storage.adoc[leveloffset=+2] include::../modules/con_resource-allocation.adoc[leveloffset=+1] include::../modules/con_network-considerations-for-service-telemetry-framework.adoc[leveloffset=+1] -ifeval::["{build}" == "upstream"] -include::../modules/con_node-tuning-operator.adoc[leveloffset=+1] -endif::[] - //reset the context ifdef::parent-context[:context: {parent-context}] ifndef::parent-context[:!context:] diff --git a/doc-Service-Telemetry-Framework/assemblies/assembly_removing-stf-from-the-openshift-environment.adoc b/doc-Service-Telemetry-Framework/assemblies/assembly_removing-stf-from-the-openshift-environment.adoc index 10afbf6c..35e17644 100644 --- a/doc-Service-Telemetry-Framework/assemblies/assembly_removing-stf-from-the-openshift-environment.adoc +++ b/doc-Service-Telemetry-Framework/assemblies/assembly_removing-stf-from-the-openshift-environment.adoc @@ -11,11 +11,15 @@ Remove {Project} ({ProjectShort}) from an {OpenShift} environment if you no long To remove {ProjectShort} from the {OpenShift} environment, you must perform the following tasks: . Delete the namespace. +ifeval::["{build}" == "upstream"] . Remove the catalog source. +endif::[] . Remove the cert-manager Operator. include::../modules/proc_deleting-the-namespace.adoc[leveloffset=+1] +ifeval::["{build}" == "upstream"] include::../modules/proc_removing-the-catalogsource.adoc[leveloffset=+1] +endif::[] include::../modules/proc_removing-the-cert-manager-operator.adoc[leveloffset=+1] //reset the context diff --git a/doc-Service-Telemetry-Framework/modules/con_ephemeral-storage.adoc b/doc-Service-Telemetry-Framework/modules/con_ephemeral-storage.adoc deleted file mode 100644 index f1d1c582..00000000 --- a/doc-Service-Telemetry-Framework/modules/con_ephemeral-storage.adoc +++ /dev/null @@ -1,32 +0,0 @@ -// Module included in the following assemblies: -// -// - -// This module can be included from assemblies using the following include statement: -// include::/con_ephemeral-storage.adoc[leveloffset=+1] - -// The file name and the ID are based on the module title. For example: -// * file name: con_my-concept-module-a.adoc -// * ID: [id='con_my-concept-module-a_{context}'] -// * Title: = My concept module A -// -// The ID is used as an anchor for linking to the module. Avoid changing -// it after the module has been published to ensure existing links are not -// broken. -// -// The `context` attribute enables module reuse. Every module's ID includes -// {context}, which ensures that the module has a unique ID even if it is -// reused multiple times in a guide. -// -// In the title, include nouns that are used in the body text. This helps -// readers and search engines find information quickly. -// Do not start the title with a verb. See also _Wording of headings_ -// in _The IBM Style Guide_. -[id="ephemeral-storage_{context}"] -= Ephemeral storage - -[role="_abstract"] -You can use ephemeral storage to run {Project} ({ProjectShort}) without persistently storing data in your {OpenShift} cluster. - -[WARNING] -If you use ephemeral storage, you might experience data loss if a pod is restarted, updated, or rescheduled onto another node. Use ephemeral storage only for development or testing, and not production environments. diff --git a/doc-Service-Telemetry-Framework/modules/con_high-availability.adoc b/doc-Service-Telemetry-Framework/modules/con_high-availability.adoc index 3d92fb8d..824584cd 100644 --- a/doc-Service-Telemetry-Framework/modules/con_high-availability.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_high-availability.adoc @@ -10,7 +10,6 @@ With high availability, {Project} ({ProjectShort}) can rapidly recover from fail Enabling high availability has the following effects: -* Three Elasticsearch pods run instead of the default one. * The following components run two pods instead of the default one: ** {MessageBus} ** Alertmanager diff --git a/doc-Service-Telemetry-Framework/modules/con_node-tuning-operator.adoc b/doc-Service-Telemetry-Framework/modules/con_node-tuning-operator.adoc deleted file mode 100644 index 186cb232..00000000 --- a/doc-Service-Telemetry-Framework/modules/con_node-tuning-operator.adoc +++ /dev/null @@ -1,54 +0,0 @@ -// Module included in the following assemblies: -// -// - -// This module can be included from assemblies using the following include statement: -// include::/con_node-tuning-operator.adoc[leveloffset=+1] - -// The file name and the ID are based on the module title. For example: -// * file name: con_my-concept-module-a.adoc -// * ID: [id='con_my-concept-module-a_{context}'] -// * Title: = My concept module A -// -// The ID is used as an anchor for linking to the module. Avoid changing -// it after the module has been published to ensure existing links are not -// broken. -// -// The `context` attribute enables module reuse. Every module's ID includes -// {context}, which ensures that the module has a unique ID even if it is -// reused multiple times in a guide. -// -// In the title, include nouns that are used in the body text. This helps -// readers and search engines find information quickly. -// Do not start the title with a verb. See also _Wording of headings_ -// in _The IBM Style Guide_. -[id="node-tuning-operator_{context}"] -= Node tuning operator - -[role="_abstract"] -{ProjectShort} uses Elasticsearch to store events, which requires a larger than normal `vm.max_map_count`. The `vm.max_map_count` value is set by default in {OpenShift}. - -[TIP] -If your host platform is a typical {OpenShift} 4 environment, do not make any adjustments. The default node tuning operator is configured to account for Elasticsearch workloads. - -If you want to edit the value of `vm.max_map_count`, you cannot apply node tuning manually using the `sysctl` command because {OpenShift} manages nodes directly. To configure values and apply them to the infrastructure, you must use the node tuning operator. For more information, see https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/scalability_and_performance/using-node-tuning-operator.html[Using the Node Tuning Operator]. - -In an {OpenShiftShort} deployment, the default node tuning operator specification provides the required profiles for Elasticsearch workloads or pods scheduled on nodes. To view the default cluster node tuning specification, run the following command: - -[source,bash] ----- -$ oc get Tuned/default -o yaml -n openshift-cluster-node-tuning-operator ----- - -The output of the default specification is documented at https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/scalability_and_performance/using-node-tuning-operator.html#custom-tuning-default-profiles-set_node-tuning-operator[Default profiles set on a cluster]. You can manage the assignment of profiles in the `recommend` section where profiles are applied to a node when certain conditions are met. When scheduling Elasticsearch to a node in {ProjectShort}, one of the following profiles is applied: - -* `openshift-control-plane-es` -* `openshift-node-es` - -When scheduling an Elasticsearch pod, there must be a label present that matches `tuned.openshift.io/elasticsearch`. If the label is present, one of the two profiles is assigned to the pod. No action is required by the administrator if you use the recommended Operator for Elasticsearch. If you use a custom-deployed Elasticsearch with {ProjectShort}, ensure that you add the `tuned.openshift.io/elasticsearch` label to all scheduled pods. - -.Additional resources - -* For more information about virtual memory use by Elasticsearch, see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html - -* For more information about how the profiles are applied to nodes, see https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/scalability_and_performance/using-node-tuning-operator.html#custom-tuning-specification_node-tuning-operator[Custom tuning specification]. diff --git a/doc-Service-Telemetry-Framework/modules/con_observability-strategy.adoc b/doc-Service-Telemetry-Framework/modules/con_observability-strategy.adoc index 093643fb..ecd5de00 100644 --- a/doc-Service-Telemetry-Framework/modules/con_observability-strategy.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_observability-strategy.adoc @@ -4,7 +4,7 @@ [role="_abstract"] {Project} ({ProjectShort}) does not include event storage backends or dashboarding tools. {ProjectShort} can optionally use community operators to deploy Elasticsearch and Grafana for those purposes. {ProjectShort} makes requests to these community operators to create instances of each application configured to work with {ProjectShort}. -Instead of having {ProjectShort} create custom resource requests, you can use your own deployments of these applications or other compatible applications. +Instead of having Service Telemetry Operator create custom resource requests, you can use your own deployments of these applications or other compatible applications, and scrape the metrics Smart Gateways for delivery to your own Prometheus-compatible system for telemetry storage. If you set the `observabilityStrategy` to `none`, then storage backends will not be deployed so persistent storage will not be required by {ProjectShort}. Use the observabilityStrategy property on the {ProjectShort} object to specify which type of obvservability components will be deployed. @@ -21,4 +21,4 @@ Newly deployed {ProjectShort} environments as of 1.5.3 default to `use_redhat`. Existing {ProjectShort} deployments created before 1.5.3 default to `use_community`. -To migrate an existing {ProjectShort} deployment to `use_redhat`, see https://access.redhat.com/articles/7011708[Migrating STF to fully supported operators] +To migrate an existing {ProjectShort} deployment to `use_redhat`, see https://access.redhat.com/articles/7011708[Migrating STF to fully supported operators] \ No newline at end of file diff --git a/doc-Service-Telemetry-Framework/modules/con_persistent-volumes.adoc b/doc-Service-Telemetry-Framework/modules/con_persistent-volumes.adoc index 5cc414ac..09edca24 100644 --- a/doc-Service-Telemetry-Framework/modules/con_persistent-volumes.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_persistent-volumes.adoc @@ -2,7 +2,7 @@ = Persistent volumes [role="_abstract"] -{Project} ({ProjectShort}) uses persistent storage in {OpenShift} to request persistent volumes so that Prometheus and Elasticsearch can store metrics and events. +{Project} ({ProjectShort}) uses persistent storage in {OpenShift} to request persistent volumes so that Prometheus can store metrics. When you enable persistent storage through the Service Telemetry Operator, the Persistent Volume Claims (PVC) requested in an {ProjectShort} deployment results in an access mode of RWO (ReadWriteOnce). If your environment contains pre-provisioned persistent volumes, ensure that volumes of RWO are available in the {OpenShift} default configured `storageClass`. @@ -12,5 +12,3 @@ When you enable persistent storage through the Service Telemetry Operator, the P * For more information about recommended configurable storage technology in {OpenShift}, see https://docs.openshift.com/container-platform/{NextSupportedOpenShiftVersion}/scalability_and_performance/optimizing-storage.html#recommended-configurable-storage-technology_persistent-storage[Recommended configurable storage technology]. * For more information about configuring persistent storage for Prometheus in {ProjectShort}, see xref:backends-configuring-persistent-storage-for-prometheus_assembly-installing-the-core-components-of-stf[]. - -* For more information about configuring persistent storage for Elasticsearch in {ProjectShort}, see xref:backends-configuring-persistent-storage-for-elasticsearch_assembly-installing-the-core-components-of-stf[]. diff --git a/doc-Service-Telemetry-Framework/modules/con_primary-parameters-of-the-servicetelemetry-object.adoc b/doc-Service-Telemetry-Framework/modules/con_primary-parameters-of-the-servicetelemetry-object.adoc index 90ebf7cd..42637a6f 100644 --- a/doc-Service-Telemetry-Framework/modules/con_primary-parameters-of-the-servicetelemetry-object.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_primary-parameters-of-the-servicetelemetry-object.adoc @@ -20,7 +20,7 @@ You can configure each of these configuration parameters to provide different fe Use the `backends` parameter to control which storage back ends are available for storage of metrics and events, and to control the enablement of Smart Gateways that the `clouds` parameter defines. For more information, see xref:clouds_assembly-installing-the-core-components-of-stf[]. -Currently, you can use Prometheus as the metrics storage back end and Elasticsearch as the events storage back end. +You can use Prometheus as the metrics storage back end and Elasticsearch as the events storage back end. The Service Telemetry Operator can create custom resource objects that the Prometheus Operator watches to create a Prometheus workload. For storage of events, an external deployment of Elasticsearch is required. [discrete] === Enabling Prometheus as a storage back end for metrics @@ -29,7 +29,14 @@ To enable Prometheus as a storage back end for metrics, you must configure the ` .Procedure -* Configure the `ServiceTelemetry` object: +. Edit the `ServiceTelemetry` object: ++ +[source,bash] +---- +$ oc edit stf default +---- + +. Set the value of the backends.metrics.prometheus.enabled parameter to `true`: + [source,yaml] ---- @@ -39,6 +46,7 @@ metadata: name: default namespace: service-telemetry spec: + [...] backends: metrics: prometheus: @@ -57,7 +65,7 @@ Use the `pvcStorageRequest` parameter to define the minimum required volume size .Procedure -* List the available storage classes: +. List the available storage classes: + [source,bash,options="nowrap"] ---- @@ -68,7 +76,14 @@ standard (default) kubernetes.io/cinder Delete WaitForFirstCons standard-csi cinder.csi.openstack.org Delete WaitForFirstConsumer true 20h ---- -* Configure the `ServiceTelemetry` object: +. Edit the `ServiceTelemetry` object: ++ +[source,bash] +---- +$ oc edit stf default +---- + +. Set the value of the backends.metrics.prometheus.enabled parameter to `true` and the value of backends.metrics.prometheus.storage.strategy to `persistent`: + [source,yaml] ---- @@ -78,6 +93,7 @@ metadata: name: default namespace: service-telemetry spec: + [...] backends: metrics: prometheus: @@ -92,11 +108,28 @@ spec: [discrete] === Enabling Elasticsearch as a storage back end for events -To enable Elasticsearch as a storage back end for events, you must configure the `ServiceTelemetry` object. +[NOTE] +==== +Older versions of {ProjectShort} would manage Elasticsearch objects for the community supported Elastic Cloud on Kubernetes Operator (ECK). Elasticsearch management functionality is deprecated as of {ProjectShort} 1.5.3. Future versions of Service Telemetry Operator will continue to support forwarding to an existing Elasticsearch instance (which can be deployed and managed by ECK), but will not manage the creation of Elasticsearch objects. When upgrading an {ProjectShort} deployment, any existing Elasticsearch object and deployment will remain intact, but will no longer be managed by {ProjectShort}. + +ifeval::["{build}" == "downstream"] +Refer to this article for additional information about https://access.redhat.com/articles/7031236[Using Service Telemetry Framework with Elasticsearch] +endif::[] + +==== + +To enable events forwarding to Elasticsearch as a storage back end, you must configure the `ServiceTelemetry` object. .Procedure -* Configure the `ServiceTelemetry` object: +. Edit the `ServiceTelemetry` object: ++ +[source,bash] +---- +$ oc edit stf default +---- + +. Set the value of the backends.events.elasticsearch.enabled parameter to `true` and configure the hostUrl to match the Elasticsearch instance you would like to forward to: + [source,yaml] ---- @@ -106,55 +139,37 @@ metadata: name: default namespace: service-telemetry spec: + [...] backends: events: elasticsearch: enabled: true + forwarding: + hostUrl: https://external-elastic-http.domain:9200 + tlsServerName: "" + tlsSecretName: elasticsearch-es-cert + userSecretName: elasticsearch-es-elastic-user + useBasicAuth: true + useTls: true ---- -[id="backends-configuring-persistent-storage-for-elasticsearch_{context}"] -[discrete] -=== Configuring persistent storage for Elasticsearch - -Use the additional parameters defined in `backends.events.elasticsearch.storage.persistent` to configure persistent storage options for Elasticsearch, such as storage class and volume size. - -Use `storageClass` to define the back end storage class. If you do not set this parameter, the Service Telemetry Operator uses the default storage class for the {OpenShift} cluster. - -Use the `pvcStorageRequest` parameter to define the minimum required volume size to satisfy the storage request. If volumes are statically defined, it is possible that a volume size larger than requested is used. By default, Service Telemetry Operator requests a volume size of `20Gi` (20 Gibibytes). - -.Procedure - -. List the available storage classes: +. Create the secret named in the `userSecretName` parameter to store the basic auth credentials + -[source,bash,options="nowrap"] +[source,bash] ---- -$ oc get storageclasses -NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE -csi-manila-ceph manila.csi.openstack.org Delete Immediate false 20h -standard (default) kubernetes.io/cinder Delete WaitForFirstConsumer true 20h -standard-csi cinder.csi.openstack.org Delete WaitForFirstConsumer true 20h +$ oc create secret generic elasticsearch-es-elastic-user --from-literal=elastic='' ---- -. Configure the `ServiceTelemetry` object: +. Copy the CA certificate into a file called `EXTERNAL-ES-CA.pem`, then create the secret named in the `tlsSecretName` parameter to make it available to {ProjectShort} + -[source,yaml] +[source,bash] ---- -apiVersion: infra.watch/v1beta1 -kind: ServiceTelemetry -metadata: - name: default - namespace: service-telemetry -spec: - backends: - events: - elasticsearch: - enabled: true - version: 7.16.1 - storage: - strategy: persistent - persistent: - storageClass: standard-csi - pvcStorageRequest: 50G +$ cat EXTERNAL-ES-CA.pem +-----BEGIN CERTIFICATE----- +[...] +-----END CERTIFICATE----- + +$ oc create secret generic elasticsearch-es-cert --from-file=ca.crt=EXTERNAL-ES-CA.pem ---- [id="clouds_{context}"] @@ -187,20 +202,20 @@ spec: metrics: collectors: - collectorType: collectd - subscriptionAddress: collectd/telemetry + subscriptionAddress: collectd/cloud1-telemetry - collectorType: ceilometer - subscriptionAddress: anycast/ceilometer/metering.sample + subscriptionAddress: anycast/ceilometer/cloud1-metering.sample ifndef::include_when_13[] - collectorType: sensubility - subscriptionAddress: sensubility/telemetry + subscriptionAddress: sensubility/cloud1-telemetry debugEnabled: false endif::[] events: collectors: - collectorType: collectd - subscriptionAddress: collectd/notify + subscriptionAddress: collectd/cloud1-notify - collectorType: ceilometer - subscriptionAddress: anycast/ceilometer/event.sample + subscriptionAddress: anycast/ceilometer/cloud1-event.sample ---- ifndef::include_when_13[] diff --git a/doc-Service-Telemetry-Framework/modules/con_resource-allocation.adoc b/doc-Service-Telemetry-Framework/modules/con_resource-allocation.adoc index 3e7c325c..ca3e4ab5 100644 --- a/doc-Service-Telemetry-Framework/modules/con_resource-allocation.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_resource-allocation.adoc @@ -11,6 +11,4 @@ The amount of resources that you require to run {Project} ({ProjectShort}) depen .Additional resources -* For recommendations about sizing for metrics collection, see https://access.redhat.com/articles/4907241[Service Telemetry Framework Performance and Scaling]. - -* For information about sizing requirements for Elasticsearch, see https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-managing-compute-resources.html. +* For recommendations about sizing for metrics collection, see https://access.redhat.com/articles/4907241[Service Telemetry Framework Performance and Scaling]. \ No newline at end of file diff --git a/doc-Service-Telemetry-Framework/modules/con_stf-architecture.adoc b/doc-Service-Telemetry-Framework/modules/con_stf-architecture.adoc index e232e7a2..d61e0795 100644 --- a/doc-Service-Telemetry-Framework/modules/con_stf-architecture.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_stf-architecture.adoc @@ -19,11 +19,9 @@ ** Ceilometer: Collects {OpenStackShort} metrics and events. * Transport ** {MessageBus}: An AMQP 1.x compatible messaging bus that provides fast and reliable data transport to transfer the metrics to {ProjectShort} for storage. -** Smart Gateway: A Golang application that takes metrics and events from the AMQP 1.x bus to deliver to Elasticsearch or Prometheus. +** Smart Gateway: A Golang application that takes metrics and events from the AMQP 1.x bus to deliver to Prometheus or an external Elasticsearch. * Data storage ** Prometheus: Time-series data storage that stores {ProjectShort} metrics received from the Smart Gateway. -** Elasticsearch: Events data storage that stores {ProjectShort} events received from the Smart Gateway. -* Observation ** Alertmanager: An alerting tool that uses Prometheus alert rules to manage alerts. ** Grafana: A visualization and analytics application that you can use to query, visualize, and explore data. diff --git a/doc-Service-Telemetry-Framework/modules/con_tls-certificates-duration.adoc b/doc-Service-Telemetry-Framework/modules/con_tls-certificates-duration.adoc index a804b4ad..881454ac 100644 --- a/doc-Service-Telemetry-Framework/modules/con_tls-certificates-duration.adoc +++ b/doc-Service-Telemetry-Framework/modules/con_tls-certificates-duration.adoc @@ -2,9 +2,8 @@ = Configuring the duration for the TLS certificates [role="_abstract"] -To configure the duration of the TLS certificates that you use for the connections with -Elasticsearch and {MessageBus} in {Project} ({ProjectShort}), -modify the `ServiceTelemetry` object and configure the `certificates` parameters. +To configure the duration of the TLS certificates that you use for the {MessageBus} connection in {Project} ({ProjectShort}), +modify the `ServiceTelemetry` object and configure the `certificates` parameter. [id="configuration-parameters-for-tls-certificates-duration_{context}"] == Configuration parameters for the TLS certificates @@ -18,30 +17,7 @@ caCertDuration:: The requested 'duration' or lifetime of the CA Certificate. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration. Default value is `70080h`. -NOTE:: The default duration of certificates is long, because you usually copy a subset of them in the {OpenStack} deployment when the certificates renew. For more information about the QDR CA Certificate renewal process, see xref:assembly-renewing-the-amq-interconnect-certificate_assembly[] - -The `certificates` parameter for Elasticsearch is part of the `backends.events.elasticsearch` definition and is configured in the `ServiceTelemetry` object: - -[source,yaml,options="nowrap"] ----- -apiVersion: infra.watch/v1beta1 -kind: ServiceTelemetry -metadata: - name: default - namespace: service-telemetry -spec: -... - backends: - ... - events: - elasticsearch: - enabled: true - version: 7.16.1 - certificates: - endpointCertDuration: 70080h - caCertDuration: 70080h -... ----- +NOTE: The default duration of certificates is long, because you usually copy a subset of them in the {OpenStack} deployment when the certificates renew. For more information about the QDR CA Certificate renewal process, see xref:assembly-renewing-the-amq-interconnect-certificate_assembly[]. You can configure the `certificates` parameter for QDR that is part of the `transports.qdr` definition in the `ServiceTelemetry` object: diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-ephemeral-storage.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-ephemeral-storage.adoc deleted file mode 100644 index ef316756..00000000 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-ephemeral-storage.adoc +++ /dev/null @@ -1,52 +0,0 @@ -[id='configuring-ephemeral-storage_{context}'] -= Configuring ephemeral storage - -[role="_abstract"] -To configure {ProjectShort} components for ephemeral storage, add `...storage.strategy: ephemeral` to the corresponding parameter. For example, to enable ephemeral storage for the Prometheus back end, set `backends.metrics.prometheus.storage.strategy: ephemeral`. Components that support configuration of ephemeral storage include `alerting.alertmanager`, `backends.metrics.prometheus`, and `backends.events.elasticsearch`. You can add ephemeral storage configuration at installation time or, if you already deployed {ProjectShort}, complete the following steps: - -.Procedure - -. Log in to {OpenShift}. -. Change to the `service-telemetry` namespace: -+ -[source,bash] ----- -$ oc project service-telemetry ----- - -. Edit the ServiceTelemetry object: -+ -[source,bash] ----- -$ oc edit stf default ----- - -. Add the `...storage.strategy: ephemeral` parameter to the `spec` section of the relevant component: -+ -[source,yaml] ----- -apiVersion: infra.watch/v1beta1 -kind: ServiceTelemetry -metadata: - name: default - namespace: service-telemetry -spec: - alerting: - enabled: true - alertmanager: - storage: - strategy: ephemeral - backends: - metrics: - prometheus: - enabled: true - storage: - strategy: ephemeral - events: - elasticsearch: - enabled: true - storage: - strategy: ephemeral ----- - -. Save your changes and close the object. diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-observability-strategy.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-observability-strategy.adoc index b9fe20fb..65f795ec 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-observability-strategy.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-observability-strategy.adoc @@ -2,10 +2,7 @@ = Configuring an alternate observability strategy [role="_abstract"] -To configure {ProjectShort} to skip the deployment of storage, visualization, and alerting backends, add `observabilityStrategy: none` to the ServiceTelemetry spec. In this mode, only {MessageBus} routers and metrics Smart Gateways are deployed, and you must configure an external Prometheus-compatible system to collect metrics from the {ProjectShort} Smart Gateways. - -[NOTE] -Currently, only metrics are supported when you set `observabilityStrategy` to `none`. Events Smart Gateways are not deployed. +To configure {ProjectShort} to skip the deployment of storage, visualization, and alerting backends, add `observabilityStrategy: none` to the ServiceTelemetry spec. In this mode, only {MessageBus} routers and Smart Gateways are deployed, and you must configure an external Prometheus-compatible system to collect metrics from the {ProjectShort} Smart Gateways, and an external Elasticsearch to receive forwarded events. .Procedure . Create a `ServiceTelemetry` object with the property `observabilityStrategy: none` in the `spec` parameter. The manifest shows results in a default deployment of {ProjectShort} that is suitable for receiving telemetry from a single cloud with all metrics collector types. @@ -27,7 +24,7 @@ EOF + [source,bash] ---- -$ for o in alertmanager/default prometheus/default elasticsearch/elasticsearch grafana/default lokistack/lokistack; do oc delete $o; done +$ for o in alertmanager/default prometheus/default elasticsearch/elasticsearch grafana/default; do oc delete $o; done ---- + . To verify that all workloads are operating correctly, view the pods and the status of each pod: @@ -36,7 +33,9 @@ $ for o in alertmanager/default prometheus/default elasticsearch/elasticsearch g ---- $ oc get pods NAME READY STATUS RESTARTS AGE +default-cloud1-ceil-event-smartgateway-6f8547df6c-p2db5 3/3 Running 0 132m default-cloud1-ceil-meter-smartgateway-59c845d65b-gzhcs 3/3 Running 0 132m +default-cloud1-coll-event-smartgateway-bf859f8d77-tzb66 3/3 Running 0 132m default-cloud1-coll-meter-smartgateway-75bbd948b9-d5phm 3/3 Running 0 132m ifndef::include_when_13[] default-cloud1-sens-meter-smartgateway-7fdbb57b6d-dh2g9 3/3 Running 0 132m diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf-using-director-operator.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf-using-director-operator.adoc index d41bc47a..69d8ff10 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf-using-director-operator.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf-using-director-operator.adoc @@ -15,9 +15,9 @@ When you deploy the {OpenStack} ({OpenStackShort}) overcloud deployment using di //endif::include_when_13,include_when_17[] . xref:retrieving-the-qdr-route-address_assembly-completing-the-stf-configuration[Retrieving the {MessageBus} route address] -. xref:proc_creating-the-base-configuration-for-director-operator-for-stf[Creating the base configuration for director Operator for {ProjectShort}] -. xref:proc_configuring-the-stf-connection-for-director-operator-for-the-overcloud[Configuring the {ProjectShort} connection for the overcloud] -. xref:proc_deploying-the-overcloud-for-director-operator[Deploying the overcloud for director operator] +. xref:creating-the-base-configuration-for-director-operator-for-stf_assembly-completing-the-stf-configuration-using-director-operator[Creating the base configuration for director Operator for {ProjectShort}] +. xref:configuring-the-stf-connection-for-director-operator-for-the-overcloud_assembly-completing-the-stf-configuration-using-director-operator[Configuring the {ProjectShort} connection for the overcloud] +. xref:deploying-the-overcloud-for-director-operator_assembly-completing-the-stf-configuration-using-director-operator[Deploying the overcloud for director operator] //. xref:validating-clientside-installation_assembly-completing-the-stf-configuration[Validating client-side installation] diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf.adoc index 0fd4cb17..ceba9322 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-red-hat-openstack-platform-overcloud-for-stf.adoc @@ -16,7 +16,7 @@ endif::include_when_13,include_when_17[] . xref:validating-clientside-installation_assembly-completing-the-stf-configuration[Validating client-side installation] .Additional resources -* For more information about deploying an OpenStack cloud using director, see link:{defaultURL}/html/director_installation_and_usage/index +* For more information about deploying an OpenStack cloud using director, see link:{defaultURL}/director_installation_and_usage/index[Director Installation and Usage]. ifdef::include_when_16_1[] -* To collect data through {MessageBus}, see link:{defaultURL}/html/operational_measurements/collectd-plugins_assembly#collectd_plugin_amqp1[the amqp1 plug-in]. +* To collect data through {MessageBus}, see link:{defaultURL}/operational_measurements/collectd-plugins_assembly#collectd_plugin_amqp1[the amqp1 plug-in]. endif::include_when_16_1[] diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-the-stf-connection-for-director-operator-for-the-overcloud.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-the-stf-connection-for-director-operator-for-the-overcloud.adoc index 2d59c26a..6bd7550d 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-the-stf-connection-for-director-operator-for-the-overcloud.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-the-stf-connection-for-director-operator-for-the-overcloud.adoc @@ -7,13 +7,18 @@ Edit the `heat-env-config-deploy` ConfigMap to create a connection from {OpenSta .Procedure -. Login to the {OpenShift} environment and change to the project that hosts your {OpenStack} deployment. +. Log in to the {OpenShift} environment where {OpenStackShort} director Operator is deployed and change to the project that hosts your {OpenStackShort} deployment: ++ +[source,bash] +---- +$ oc project openstack +---- . Open the `heat-env-config-deploy` ConfigMap for editing: + [source,bash,options="nowrap",subs="verbatim"] ---- -$ oc edit heat-env-config-deploy +$ oc edit configmap heat-env-config-deploy ---- . Add your `stf-connectors.yaml` configuration to the `heat-env-config-deploy` ConfigMap, appropriate to your environment, save your edits and close the file: @@ -63,4 +68,4 @@ data: .Additional resources * For more information about the `stf-connectors.yaml` environment file, see xref:configuring-the-stf-connection-for-the-overcloud_assembly-completing-the-stf-configuration[]. -* For more information about adding heat templates to a {OpenStack} director Operator deployment, see link:{defaultURL}/rhosp_director_operator_for_openshift_container_platform/assembly_adding-heat-templates-and-environment-files-with-the-director-operator_rhosp-director-operator#doc-wrapper[Adding heat templates and environment files with the director Operator] +* For more information about adding heat templates to a {OpenStackShort} director Operator deployment, see link:{defaultURL}/rhosp_director_operator_for_openshift_container_platform/assembly_adding-heat-templates-and-environment-files-with-the-director-operator_rhosp-director-operator#doc-wrapper[Adding heat templates and environment files with the director Operator] diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-the-stf-connection-for-the-overcloud.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-the-stf-connection-for-the-overcloud.adoc index 7f4a6b77..484e7c45 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-the-stf-connection-for-the-overcloud.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-the-stf-connection-for-the-overcloud.adoc @@ -14,7 +14,7 @@ endif::include_when_13,include_when_17[] // The following configuration should match the contents in modules/proc_creating-openstack-environment-file-for-multiple-clouds.adoc. If you have changes to make, please make the same changes to both files. .Procedure -. Log in to the {OpenStackShort} undercloud as the `stack` user. +. Log in to the undercloud host as the `stack` user. . Create a configuration file called `stf-connectors.yaml` in the `/home/stack` directory. @@ -24,20 +24,6 @@ endif::[] ifdef::include_when_13[] . In the `stf-connectors.yaml` file, configure the `MetricsQdrConnectors` address to connect the {MessageBus} on the overcloud to the {ProjectShort} deployment. You configure the topic addresses for Ceilometer and collectd in this file to match the defaults in {ProjectShort}. For more information about customizing topics and cloud configuration, see xref:configuring-multiple-clouds_assembly-completing-the-stf-configuration[]. endif::[] - -* The `resource_registry` configuration directly loads the collectd service because you do not include the `collectd-write-qdr.yaml` environment file for multiple cloud deployments. -* Replace the `host` parameter with the value of `HOST/PORT` that you retrieved in xref:retrieving-the-qdr-route-address_assembly-completing-the-stf-configuration[]. -ifdef::include_when_13,include_when_17[] -* Replace the `caCertFileContent` parameter with the contents retrieved in xref:getting-ca-certificate-from-stf-for-overcloud-configuration_assembly-completing-the-stf-configuration[]. -endif::include_when_13,include_when_17[] -* Replace the `host` sub-parameter of `MetricsQdrConnectors` with the value of `HOST/PORT` that you retrieved in xref:retrieving-the-qdr-route-address_assembly-completing-the-stf-configuration[]. -* Set `CeilometerQdrEventsConfig.topic` to define the topic for Ceilometer events. The format of this value is `anycast/ceilometer/cloud1-event.sample`. -* Set `CeilometerQdrMetricsConfig.topic` to define the topic for Ceilometer metrics. The format of this value is `anycast/ceilometer/cloud1-metering.sample`. -* Set `CollectdAmqpInstances` to define the topic for collectd events. The format of this value is `collectd/cloud1-notify`. -* Set `CollectdAmqpInstances` to define the topic for collectd metrics. The format of this value is `collectd/cloud1-telemetry`. -ifndef::include_when_13[] -* Set `CollectdSensubilityResultsChannel` to define the topic for collectd-sensubility events. The format of this value is `sensubility/cloud1-telemetry`. -endif::[] + .stf-connectors.yaml [source,yaml,options="nowrap"] @@ -83,3 +69,27 @@ ifndef::include_when_13[] CollectdSensubilityResultsChannel: sensubility/cloud1-telemetry endif::[] ---- + +* The `resource_registry` configuration directly loads the collectd service because you do not include the `collectd-write-qdr.yaml` environment file for multiple cloud deployments. +* Replace the `host` parameter with the value that you retrieved in xref:retrieving-the-qdr-route-address_assembly-completing-the-stf-configuration[]. +ifdef::include_when_13,include_when_17[] +* Replace the `caCertFileContent` parameter with the contents retrieved in xref:getting-ca-certificate-from-stf-for-overcloud-configuration_assembly-completing-the-stf-configuration[]. +endif::include_when_13,include_when_17[] +* Replace the `host` sub-parameter of `MetricsQdrConnectors` with the value that you retrieved in xref:retrieving-the-qdr-route-address_assembly-completing-the-stf-configuration[]. +* Set `topic` value of `CeilometerQdrEventsConfig` to define the topic for Ceilometer events. The value is a unique topic idenifier for the cloud such as `cloud1-event`. +* Set `topic` value of `CeilometerQdrMetricsConfig.topic` to define the topic for Ceilometer metrics. The value is a unique topic identifier for the cloud such as `cloud1-metering`. +* Set `CollectdAmqpInstances` sub-paramter to define the topic for collectd events. The section name is a unique topic identifier for the cloud such as `cloud1-notify`. +* Set `CollectdAmqpInstances` sub-parameter to define the topic for collectd metrics. The section name is a unique topic identifier for the cloud such as `cloud1-telemetry`. +ifndef::include_when_13[] +* Set `CollectdSensubilityResultsChannel` to define the topic for collectd-sensubility events. The value is a unique topic identifier for the cloud such as `sensubility/cloud1-telemetry`. +endif::[] + +[NOTE] +==== +When you define the topics for collectd and Ceilometer, the value you provide is transposed into the full topic that the Smart Gateway client uses to listen for messages. + +Ceilometer topic values are transposed into the topic address `anycast/ceilometer/.sample` and collectd topic values are transposed into the topic address `collectd/`. +ifndef::include_when_13[The value for sensubility is the full topic path and has no transposition from topic value to topic address.] + +For an example of a cloud configuration in the `ServiceTelemetry` object referring to the full topic address, see xref:clouds_assembly-installing-the-core-components-of-stf[]. +==== diff --git a/doc-Service-Telemetry-Framework/modules/proc_configuring-tls-certificates-duration.adoc b/doc-Service-Telemetry-Framework/modules/proc_configuring-tls-certificates-duration.adoc index 74c48b7d..ccb2fb01 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_configuring-tls-certificates-duration.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_configuring-tls-certificates-duration.adoc @@ -14,8 +14,7 @@ The following procedure is valid for new {ProjectShort} deployments. .Procedure -To edit the duration of the TLS certificates, you can set the Elasticsearch `endpointCertDuration`, for example `26280h` for 3 years, and set the QDR `caCertDuration`, for example `87600h` for 10 years. -You can use the default value of 8 years for the CA certificate for Elasticsearch and endpoint certificate: +To edit the duration of the TLS certificate, you can set the QDR `caCertDuration`, for example `87600h` for 10 years: + [source,yaml,options="nowrap",role="white-space-pre"] ---- @@ -26,12 +25,6 @@ metadata: name: default namespace: service-telemetry spec: - backends: - events: - elasticsearch: - enabled: true - certificates: - endpointCertDuration: 26280h transport: qdr: enabled: true @@ -42,13 +35,10 @@ EOF .Verification -. Verify that the expiry date for the certificates is correct: +. Verify that the expiry date for the certificate is correct: + [source,bash,options="nowrap"] ---- -$ oc get secret elasticsearch-es-cert -o jsonpath='{.data.tls\.crt}' | base64 -d | openssl x509 -in - -text | grep "Not After" - Not After : Mar 9 21:00:16 2026 GMT - $ oc get secret default-interconnect-selfsigned -o jsonpath='{.data.tls\.crt}' | base64 -d | openssl x509 -in - -text | grep "Not After" Not After : Mar 9 21:00:16 2033 GMT ---- \ No newline at end of file diff --git a/doc-Service-Telemetry-Framework/modules/proc_creating-a-servicetelemetry-object-in-openshift.adoc b/doc-Service-Telemetry-Framework/modules/proc_creating-a-servicetelemetry-object-in-openshift.adoc index 46e7eb6e..bb7490d6 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_creating-a-servicetelemetry-object-in-openshift.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_creating-a-servicetelemetry-object-in-openshift.adoc @@ -20,24 +20,6 @@ spec: {} EOF ---- + -To override a default value, define the parameter that you want to override. In this example, enable Elasticsearch by setting `enabled` to `true`: -+ -[source,yaml,options="nowrap",role="white-space-pre"] ----- -$ oc apply -f - <.sample` and collectd topic values are transposed into the topic address `collectd/`. +ifndef::include_when_13[The value for sensubility is the full topic path and has no transposition from topic value to topic address.] + +For an example of a cloud configuration in the `ServiceTelemetry` object referring to the full topic address, see xref:clouds_assembly-installing-the-core-components-of-stf[]. +==== . Ensure that the naming convention in the `stf-connectors.yaml` file aligns with the `spec.bridge.amqpUrl` field in the Smart Gateway configuration. For example, configure the `CeilometerQdrEventsConfig.topic` field to a value of `cloud1-event`. -. Source the authentication file: +. Log in to the undercloud host as the `stack` user. + +. Source the `stackrc` undercloud credentials file: + [source,bash] ---- -[stack@undercloud-0 ~]$ source stackrc - -(undercloud) [stack@undercloud-0 ~]$ +$ source stackrc ---- . Include the `stf-connectors.yaml` file and unique domain name environment file `hostnames.yaml` in the `openstack overcloud deployment` command, with any other environment files relevant to your environment: @@ -127,20 +139,15 @@ endif::[] [WARNING] If you use the `collectd-write-qdr.yaml` file with a custom `CollectdAmqpInstances` parameter, data publishes to the custom and default topics. In a multiple cloud environment, the configuration of the `resource_registry` parameter in the `stf-connectors.yaml` file loads the collectd service. + -// valid use of +quotes subs for rendering emphasis - -+ -[source,bash,options="nowrap",subs="+quotes"] - +[source,bash,options="nowrap"] ---- -(undercloud) [stack@undercloud-0 ~]$ openstack overcloud deploy __ ---templates /usr/share/openstack-tripleo-heat-templates \ - --environment-file _<...other_environment_files...>_ \ - --environment-file /usr/share/openstack-tripleo-heat-templates/environments/metrics/ceilometer-write-qdr.yaml \ - --environment-file /usr/share/openstack-tripleo-heat-templates/environments/metrics/qdr-edge-only.yaml \ - --environment-file /home/stack/hostnames.yaml \ - --environment-file /home/stack/enable-stf.yaml \ - --environment-file /home/stack/stf-connectors.yaml +(undercloud)$ openstack overcloud deploy --templates \ +-e [your environment files] \ +-e /usr/share/openstack-tripleo-heat-templates/environments/metrics/ceilometer-write-qdr.yaml \ +-e /usr/share/openstack-tripleo-heat-templates/environments/metrics/qdr-edge-only.yaml \ +-e /home/stack/hostnames.yaml \ +-e /home/stack/enable-stf.yaml \ +-e /home/stack/stf-connectors.yaml ---- . Deploy the {OpenStack} overcloud. diff --git a/doc-Service-Telemetry-Framework/modules/proc_creating-the-base-configuration-for-director-operator-for-stf.adoc b/doc-Service-Telemetry-Framework/modules/proc_creating-the-base-configuration-for-director-operator-for-stf.adoc index add32966..f99f2fca 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_creating-the-base-configuration-for-director-operator-for-stf.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_creating-the-base-configuration-for-director-operator-for-stf.adoc @@ -7,9 +7,14 @@ Edit the `heat-env-config-deploy` ConfigMap to add the base {Project} ({ProjectS .Procedure -. Login to your {OpenShift} environment and change to the project that hosts your {OpenStack} ({OpenStackShort}) deployment. +. Log in to the {OpenShift} environment where {OpenStackShort} director Operator is deployed and change to the project that hosts your {OpenStackShort} deployment: ++ +[source,bash] +---- +$ oc project openstack +---- -. Open the `heat-env-config-deploy` ConfigMap for editing: +. Open the `heat-env-config-deploy` `ConfigMap` CR for editing: + [source,bash,options="nowrap",subs="verbatim"] ---- diff --git a/doc-Service-Telemetry-Framework/modules/proc_creating-the-base-configuration-for-stf.adoc b/doc-Service-Telemetry-Framework/modules/proc_creating-the-base-configuration-for-stf.adoc index 29744283..47090273 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_creating-the-base-configuration-for-stf.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_creating-the-base-configuration-for-stf.adoc @@ -6,13 +6,13 @@ To configure the base parameters to provide a compatible data collection and tra .Procedure -. Log in to the {OpenStack} ({OpenStackShort}) undercloud as the `stack` user. +. Log in to the undercloud host as the `stack` user. . Create a configuration file called `enable-stf.yaml` in the `/home/stack` directory. + [IMPORTANT] ==== -Setting `EventPipelinePublishers` and `PipelinePublishers` to empty lists results in no event or metric data passing to {OpenStackShort} telemetry components, such as Gnocchi or Panko. If you need to send data to additional pipelines, the Ceilometer polling interval of 30 seconds, as specified in `ExtraConfig`, might overwhelm the {OpenStackShort} telemetry components, and you must increase the interval to a larger value, such as `300`. Increasing the value to a longer polling interval results in less telemetry resolution in {ProjectShort}. +Setting `EventPipelinePublishers` and `PipelinePublishers` to empty lists results in no event or metric data passing to {OpenStackShort} telemetry components, such as Gnocchi or Panko. If you need to send data to additional pipelines, the Ceilometer polling interval of `30` seconds, that you specify in `ExtraConfig`, might overwhelm the {OpenStackShort} telemetry components. You must increase the interval to a larger value, such as `300`, which results in less telemetry resolution in {ProjectShort}. ==== + .enable-stf.yaml diff --git a/doc-Service-Telemetry-Framework/modules/proc_deleting-the-default-smart-gateways.adoc b/doc-Service-Telemetry-Framework/modules/proc_deleting-the-default-smart-gateways.adoc index b448a393..50cb53b3 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_deleting-the-default-smart-gateways.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_deleting-the-default-smart-gateways.adoc @@ -6,7 +6,7 @@ After you configure {Project} ({ProjectShort}) for multiple clouds, you can dele TIP: If you do not want to deploy any Smart Gateways, define an empty clouds list by using the `clouds: []` parameter. -WARNING: The `cloudsRemoveOnMissing` parameter is disabled by default. If you enable the `cloudsRemoveOnMissing` parameter, you remove any manually created `SmartGateway` objects in the current namespace without any possibility to restore. +WARNING: The `cloudsRemoveOnMissing` parameter is disabled by default. If you enable the `cloudsRemoveOnMissing` parameter, you remove any manually-created `SmartGateway` objects in the current namespace without any possibility to restore. .Procedure diff --git a/doc-Service-Telemetry-Framework/modules/proc_deploying-stf-to-the-openshift-environment.adoc b/doc-Service-Telemetry-Framework/modules/proc_deploying-stf-to-the-openshift-environment.adoc index fb63ef37..5d20bc9d 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_deploying-stf-to-the-openshift-environment.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_deploying-stf-to-the-openshift-environment.adoc @@ -2,77 +2,58 @@ = Deploying {Project} to the {OpenShift} environment [role="_abstract"] -Deploy {Project} ({ProjectShort}) to collect, store, and monitor events: +Deploy {Project} ({ProjectShort}) to collect and store {OpenStack} ({OpenStackShort}) telemetry. + +[id="deploying-observability-operator_{context}"] +== Deploying Observability Operator + +// TODO: https://access.redhat.com/articles/7011708 covers migration to OBO from community-operators Prometheus Operator. This documentation references community-operators as the installation CatalogSource. It is hoping OBO is available from redhat-operators CatalogSource prior to STF 1.5.3. If so, then we will need to update this. +{Project} ({ProjectShort}) uses other supporting Operators as part of the deployment. {ProjectShort} is able to satisfy most dependencies automatically but some Operators need to be pre-installed, such as Observability Operator which provides an instance of Prometheus. .Procedure -. Create a namespace to contain the {ProjectShort} components, for example, `service-telemetry`: -+ -[source,bash,options="nowrap",role="white-space-pre"] ----- -$ oc new-project service-telemetry ----- -. Create an OperatorGroup in the namespace so that you can schedule the Operator pods: -+ -[source,yaml,options="nowrap",role="white-space-pre"] ----- -$ oc create -f - <_ ---templates /usr/share/openstack-tripleo-heat-templates \ - --environment-file _<...other_environment_files...>_ \ - --environment-file /usr/share/openstack-tripleo-heat-templates/environments/metrics/ceilometer-write-qdr.yaml \ - --environment-file /usr/share/openstack-tripleo-heat-templates/environments/metrics/qdr-edge-only.yaml \ - --environment-file /home/stack/enable-stf.yaml \ - --environment-file /home/stack/stf-connectors.yaml +(undercloud)$ openstack overcloud deploy --templates \ + -e [your environment files] \ + -e /usr/share/openstack-tripleo-heat-templates/environments/metrics/ceilometer-write-qdr.yaml \ + -e /usr/share/openstack-tripleo-heat-templates/environments/metrics/qdr-edge-only.yaml \ + -e /home/stack/enable-stf.yaml \ + -e /home/stack/stf-connectors.yaml ---- + +* Include the `ceilometer-write-qdr.yaml` file to ensure that Ceilometer telemetry and events are sent to {ProjectShort}. +* Include the `qdr-edge-only.yaml` file to ensure that the message bus is enabled and connected to {ProjectShort} message bus routers. +* Include the `enable-stf.yaml` environment file to ensure that the defaults are configured correctly. +* Include the `stf-connectors.yaml` environment file to define the connection to {ProjectShort}. diff --git a/doc-Service-Telemetry-Framework/modules/proc_disabling-openstack-services-used-with-stf.adoc b/doc-Service-Telemetry-Framework/modules/proc_disabling-openstack-services-used-with-stf.adoc index 5f6edaf1..2e7eb200 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_disabling-openstack-services-used-with-stf.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_disabling-openstack-services-used-with-stf.adoc @@ -6,22 +6,20 @@ Disable the services used when deploying {OpenStack} ({OpenStackShort}) and conn .Procedure -. Log in to the {OpenStackShort} undercloud as the `stack` user. +. Log in to the undercloud host as the `stack` user. -. Source the authentication file: +. Source the `stackrc` undercloud credentials file: + [source,bash] ---- -[stack@undercloud-0 ~]$ source stackrc - -(undercloud) [stack@undercloud-0 ~]$ +$ source ~/stackrc ---- . Create the `disable-stf.yaml` environment file: + [source,yaml,options="nowrap"] ---- -(undercloud) [stack@undercloud-0]$ cat > $HOME/disable-stf.yaml < ~/disable-stf.yaml <_ ---templates /usr/share/openstack-tripleo-heat-templates \ - --environment-file /home/stack/disable-stf.yaml - --environment-file __ \ +(undercloud)$ openstack overcloud deploy --templates \ +-e /home/stack/disable-stf.yaml \ +-e [your environment files] ---- diff --git a/doc-Service-Telemetry-Framework/modules/proc_getting-ca-certificate-from-stf-for-overcloud-configuration.adoc b/doc-Service-Telemetry-Framework/modules/proc_getting-ca-certificate-from-stf-for-overcloud-configuration.adoc index 1db731fc..fbae53c3 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_getting-ca-certificate-from-stf-for-overcloud-configuration.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_getting-ca-certificate-from-stf-for-overcloud-configuration.adoc @@ -24,11 +24,11 @@ = Getting CA certificate from {Project} for overcloud configuration [role="_abstract"] -To connect your {OpenStack} overcloud to {Project} ({ProjectShort}), retrieve the CA certificate of {MessageBus} that runs within {Project} and use the certificate in the {OpenStack} configuration. +To connect your {OpenStack} ({OpenStackShort}) overcloud to {Project} ({ProjectShort}), retrieve the CA certificate of {MessageBus} that runs within {ProjectShort} and use the certificate in {OpenStackShort} configuration. .Procedure -. View a list of available certificates in {Project}: +. View a list of available certificates in {ProjectShort}: + [source,bash,options="nowrap",subs="verbatim"] ---- @@ -40,4 +40,4 @@ $ oc get secrets [source,bash,options="nowrap",subs="verbatim"] ---- $ oc get secret/default-interconnect-selfsigned -o jsonpath='{.data.ca\.crt}' | base64 -d ----- \ No newline at end of file +---- diff --git a/doc-Service-Telemetry-Framework/modules/proc_importing-dashboards.adoc b/doc-Service-Telemetry-Framework/modules/proc_importing-dashboards.adoc index 0656babd..06b30f6e 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_importing-dashboards.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_importing-dashboards.adoc @@ -18,7 +18,7 @@ grafanadashboard.integreatly.org/rhos-dashboard-1 created . Import the cloud dashboard: + [WARNING] -For some panels in the cloud dashboard, you must set the value of the collectd `virt` plugin parameter `hostname_format` to `name uuid hostname` in the `stf-connectors.yaml` file. If you do not configure this parameter, affected dashboards remain empty. For more information about the `virt` plugin, see https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/{vernum}/html-single/service_telemetry_framework_{ProductVersion}/index#collectd-plugins_assembly[collectd plugins]. +In the `stf-connectors.yaml` file, ensure you set the value of the collectd `virt` plugin parameter `hostname_format` to `name uuid hostname`, otherwise some of the panels on the cloud dashboard display no information. For more information about the `virt` plugin, see link:{defaultURL}/operational_measurements/collectd-plugins_assembly[collectd plugins]. + [source,bash,options="nowrap"] ---- @@ -57,12 +57,12 @@ grafanadashboard.integreatly.org/memcached-dashboard-1 created ---- $ oc get grafanadashboards -NAME AGE -memcached-dashboard-1 115s -rhos-cloud-dashboard-1 2m12s -rhos-cloudevents-dashboard 2m6s -rhos-dashboard-1 2m17s -virtual-machine-view-1 2m +NAME AGE +memcached-dashboard-1 7s +rhos-cloud-dashboard-1 23s +rhos-cloudevents-dashboard 18s +rhos-dashboard-1 29s +virtual-machine-view-1 13s ---- . Retrieve the Grafana route address: diff --git a/doc-Service-Telemetry-Framework/modules/proc_installing-the-service-telemetry-framework-1-5-operators.adoc b/doc-Service-Telemetry-Framework/modules/proc_installing-the-service-telemetry-framework-1-5-operators.adoc index 0295448e..39a5b755 100644 --- a/doc-Service-Telemetry-Framework/modules/proc_installing-the-service-telemetry-framework-1-5-operators.adoc +++ b/doc-Service-Telemetry-Framework/modules/proc_installing-the-service-telemetry-framework-1-5-operators.adoc @@ -9,7 +9,7 @@ [id="installing-the-service-telemetry-framework-1-5-operators_{context}"] = Installing the {Project} 1.5 Operators -Install the {Project} ({ProjectShort}) 1.5 Operators and the Certificate Manager for OpenShift Operator on your {OpenShift} 4.10 environment. {ProjectShort} 1.5 only supports {OpenShift} 4.10. Installing {ProjectShort} into disconnected or restricted network environments is unsupported. +Install the {Project} ({ProjectShort}) 1.5 Operators and the Certificate Manager for OpenShift Operator on your {OpenShift} environment. See xref:support-for-project_assembly-introduction-to-stf[] for more information about {ProjectShort} support status and life cycle. ifdef::include_when_13,include_when_17[] [NOTE] @@ -35,7 +35,7 @@ $ oc project service-telemetry . Create a `namespace` for the `cert-manager` Operator: + -[source,bash] +[source,yaml] ---- $ oc create -f - <