From 8ca804b368fecb8e3e0d752b9c1c94bb09584eb3 Mon Sep 17 00:00:00 2001 From: asaezper Date: Mon, 17 Jun 2024 11:26:59 +0200 Subject: [PATCH 01/49] ci: fix docker names (+ not allowed) --- .gitlab/ci/docker.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.gitlab/ci/docker.yml b/.gitlab/ci/docker.yml index d60b128fa8..725bd54407 100644 --- a/.gitlab/ci/docker.yml +++ b/.gitlab/ci/docker.yml @@ -309,7 +309,7 @@ srsran image split72: ARCH: x86-64-v3 TAG: amd64-avx2 PLATFORM: amd64 - - SUFFIX: release+debug_avx2 + - SUFFIX: release_debug_avx2 EXTRA_CMAKE_ARGS: -DAUTO_DETECT_ISA=Off -DFORCE_DEBUG_INFO=On ARCH: x86-64-v3 TAG: amd64-avx2 @@ -320,7 +320,7 @@ srsran image split72: ARCH: x86-64-v4 TAG: amd64-avx2-avx512 PLATFORM: amd64 - - SUFFIX: release+debug_avx512 + - SUFFIX: release_debug_avx512 EXTRA_CMAKE_ARGS: -DAUTO_DETECT_ISA=Off -DFORCE_DEBUG_INFO=On ARCH: x86-64-v4 TAG: amd64-avx2-avx512 @@ -331,7 +331,7 @@ srsran image split72: ARCH: armv8.2-a+crypto+fp16+dotprod TAG: arm64 PLATFORM: arm64 - - SUFFIX: release+debug_arm + - SUFFIX: release_debug_arm EXTRA_CMAKE_ARGS: -DAUTO_DETECT_ISA=Off -DFORCE_DEBUG_INFO=On ARCH: armv8.2-a+crypto+fp16+dotprod TAG: arm64 @@ -350,7 +350,7 @@ srsran image split8: ARCH: x86-64-v3 TAG: amd64-avx2 PLATFORM: amd64 - - SUFFIX: release+debug_avx2 + - SUFFIX: release_debug_avx2 EXTRA_CMAKE_ARGS: -DAUTO_DETECT_ISA=Off -DFORCE_DEBUG_INFO=On ARCH: x86-64-v3 TAG: amd64-avx2 @@ -361,7 +361,7 @@ srsran image split8: ARCH: armv8.2-a+crypto+fp16+dotprod TAG: arm64 PLATFORM: arm64 - - SUFFIX: release+debug_arm + - SUFFIX: release_debug_arm EXTRA_CMAKE_ARGS: -DAUTO_DETECT_ISA=Off -DFORCE_DEBUG_INFO=On ARCH: armv8.2-a+crypto+fp16+dotprod TAG: arm64 From aa196b53d5d869bc36adbecb7e8bdfd191a9342e Mon Sep 17 00:00:00 2001 From: asaezper Date: Mon, 17 Jun 2024 11:28:32 +0200 Subject: [PATCH 02/49] ci: fix test timeout in valgrind --- .gitlab/ci/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab/ci/build.yml b/.gitlab/ci/build.yml index 391f2f4a6c..0be9a2430b 100644 --- a/.gitlab/ci/build.yml +++ b/.gitlab/ci/build.yml @@ -255,7 +255,7 @@ variables: ;; valgrind) G_DEBUG=gc-friendly G_SLICE=always-malloc - ctest_extra="-T memcheck -LE NO_MEMCHECK --timeout 10800" + ctest_extra="-T memcheck -LE NO_MEMCHECK --test-timeout 10800" ;; esac if [ -n "${FINGERPRINT}" ]; then From f37dee3a421d09aed767d14eaeff32e16147abf8 Mon Sep 17 00:00:00 2001 From: asaezper Date: Mon, 17 Jun 2024 11:28:45 +0200 Subject: [PATCH 03/49] ci: increase pod timeout in ru dummy tests --- .gitlab/ci/e2e.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab/ci/e2e.yml b/.gitlab/ci/e2e.yml index e0125e5112..16ee7dba5a 100644 --- a/.gitlab/ci/e2e.yml +++ b/.gitlab/ci/e2e.yml @@ -388,6 +388,7 @@ test mode ru: MARKERS: "test_mode" KEYWORDS: "test_ru" E2E_LOG_LEVEL: "warning" + RETINA_LAUNCHER_ARGS: "--retina-pod-timeout 900" needs: - job: "basic avx512 dpdk" artifacts: true From 2dfd45e9233d507543328a8bee322bb6025a5f55 Mon Sep 17 00:00:00 2001 From: Robert Falkenberg Date: Fri, 14 Jun 2024 22:08:37 +0200 Subject: [PATCH 04/49] gtpu,ngu: configurable warning on expired t_reordering --- include/srsran/gtpu/gtpu_config.h | 3 ++- lib/cu_up/pdu_session_manager_impl.cpp | 1 + lib/gtpu/gtpu_tunnel_ngu_rx_impl.h | 9 +++++++-- tests/unittests/gtpu/gtpu_tunnel_ngu_rx_test.cpp | 8 ++++++++ 4 files changed, 18 insertions(+), 3 deletions(-) diff --git a/include/srsran/gtpu/gtpu_config.h b/include/srsran/gtpu/gtpu_config.h index bdd7d29592..3b27e05d16 100644 --- a/include/srsran/gtpu/gtpu_config.h +++ b/include/srsran/gtpu/gtpu_config.h @@ -26,7 +26,8 @@ constexpr unsigned GTPU_PORT = 2152; struct gtpu_tunnel_ngu_config { struct gtpu_tunnel_ngu_rx_config { gtpu_teid_t local_teid; - std::chrono::milliseconds t_reordering = {}; + std::chrono::milliseconds t_reordering = {}; + bool warn_expired_t_reordering = false; } rx; struct gtpu_tunnel_ngu_tx_config { gtpu_teid_t peer_teid; diff --git a/lib/cu_up/pdu_session_manager_impl.cpp b/lib/cu_up/pdu_session_manager_impl.cpp index e6015621ec..cc02d13e8a 100644 --- a/lib/cu_up/pdu_session_manager_impl.cpp +++ b/lib/cu_up/pdu_session_manager_impl.cpp @@ -292,6 +292,7 @@ pdu_session_setup_result pdu_session_manager_impl::setup_pdu_session(const e1ap_ msg.cfg.tx.peer_port = net_config.upf_port; msg.cfg.rx.local_teid = new_session->local_teid; msg.cfg.rx.t_reordering = n3_config.gtpu_reordering_timer; + msg.cfg.rx.warn_expired_t_reordering = n3_config.warn_on_drop; msg.rx_lower = &new_session->gtpu_to_sdap_adapter; msg.tx_upper = >pu_tx_notifier; msg.gtpu_pcap = >pu_pcap; diff --git a/lib/gtpu/gtpu_tunnel_ngu_rx_impl.h b/lib/gtpu/gtpu_tunnel_ngu_rx_impl.h index f3a7516600..ec6d830715 100644 --- a/lib/gtpu/gtpu_tunnel_ngu_rx_impl.h +++ b/lib/gtpu/gtpu_tunnel_ngu_rx_impl.h @@ -270,8 +270,13 @@ class gtpu_tunnel_ngu_rx_impl : public gtpu_tunnel_base_rx explicit reordering_callback(gtpu_tunnel_ngu_rx_impl* parent_) : parent(parent_) {} void operator()(timer_id_t timer_id) { - parent->logger.log_warning( - "reordering timer expired after {}ms. {}", parent->config.t_reordering.count(), parent->st); + if (not parent->config.warn_expired_t_reordering) { + parent->logger.log_info( + "reordering timer expired after {}ms. {}", parent->config.t_reordering.count(), parent->st); + } else { + parent->logger.log_warning( + "reordering timer expired after {}ms. {}", parent->config.t_reordering.count(), parent->st); + } parent->handle_t_reordering_expire(); } diff --git a/tests/unittests/gtpu/gtpu_tunnel_ngu_rx_test.cpp b/tests/unittests/gtpu/gtpu_tunnel_ngu_rx_test.cpp index f2aa2d55cf..41aff13bef 100644 --- a/tests/unittests/gtpu/gtpu_tunnel_ngu_rx_test.cpp +++ b/tests/unittests/gtpu/gtpu_tunnel_ngu_rx_test.cpp @@ -190,6 +190,7 @@ TEST_F(gtpu_tunnel_ngu_rx_test, entity_creation) gtpu_tunnel_ngu_config::gtpu_tunnel_ngu_rx_config rx_cfg = {}; rx_cfg.local_teid = gtpu_teid_t{0x1}; rx_cfg.t_reordering = std::chrono::milliseconds{10}; + rx_cfg.warn_expired_t_reordering = false; rx = std::make_unique(srs_cu_up::ue_index_t::MIN_UE_INDEX, rx_cfg, rx_lower, timers); @@ -203,6 +204,7 @@ TEST_F(gtpu_tunnel_ngu_rx_test, rx_no_sn) gtpu_tunnel_ngu_config::gtpu_tunnel_ngu_rx_config rx_cfg = {}; rx_cfg.local_teid = gtpu_teid_t{0x1}; rx_cfg.t_reordering = std::chrono::milliseconds{10}; + rx_cfg.warn_expired_t_reordering = false; rx = std::make_unique(srs_cu_up::ue_index_t::MIN_UE_INDEX, rx_cfg, rx_lower, timers); ASSERT_NE(rx, nullptr); @@ -228,6 +230,7 @@ TEST_F(gtpu_tunnel_ngu_rx_test, rx_in_order) gtpu_tunnel_ngu_config::gtpu_tunnel_ngu_rx_config rx_cfg = {}; rx_cfg.local_teid = gtpu_teid_t{0x1}; rx_cfg.t_reordering = std::chrono::milliseconds{10}; + rx_cfg.warn_expired_t_reordering = true; rx = std::make_unique(srs_cu_up::ue_index_t::MIN_UE_INDEX, rx_cfg, rx_lower, timers); ASSERT_NE(rx, nullptr); @@ -253,6 +256,7 @@ TEST_F(gtpu_tunnel_ngu_rx_test, rx_out_of_order) gtpu_tunnel_ngu_config::gtpu_tunnel_ngu_rx_config rx_cfg = {}; rx_cfg.local_teid = gtpu_teid_t{0x1}; rx_cfg.t_reordering = std::chrono::milliseconds{10}; + rx_cfg.warn_expired_t_reordering = true; rx = std::make_unique(srs_cu_up::ue_index_t::MIN_UE_INDEX, rx_cfg, rx_lower, timers); ASSERT_NE(rx, nullptr); @@ -329,6 +333,7 @@ TEST_F(gtpu_tunnel_ngu_rx_test, rx_out_of_order_two_holes) gtpu_tunnel_ngu_config::gtpu_tunnel_ngu_rx_config rx_cfg = {}; rx_cfg.local_teid = gtpu_teid_t{0x1}; rx_cfg.t_reordering = std::chrono::milliseconds{10}; + rx_cfg.warn_expired_t_reordering = true; rx = std::make_unique(srs_cu_up::ue_index_t::MIN_UE_INDEX, rx_cfg, rx_lower, timers); ASSERT_NE(rx, nullptr); @@ -403,6 +408,7 @@ TEST_F(gtpu_tunnel_ngu_rx_test, rx_t_reordering_expiration) gtpu_tunnel_ngu_config::gtpu_tunnel_ngu_rx_config rx_cfg = {}; rx_cfg.local_teid = gtpu_teid_t{0x1}; rx_cfg.t_reordering = std::chrono::milliseconds{10}; + rx_cfg.warn_expired_t_reordering = true; rx = std::make_unique(srs_cu_up::ue_index_t::MIN_UE_INDEX, rx_cfg, rx_lower, timers); ASSERT_NE(rx, nullptr); @@ -475,6 +481,7 @@ TEST_F(gtpu_tunnel_ngu_rx_test, rx_t_reordering_two_holes) gtpu_tunnel_ngu_config::gtpu_tunnel_ngu_rx_config rx_cfg = {}; rx_cfg.local_teid = gtpu_teid_t{0x1}; rx_cfg.t_reordering = std::chrono::milliseconds{10}; + rx_cfg.warn_expired_t_reordering = true; rx = std::make_unique(srs_cu_up::ue_index_t::MIN_UE_INDEX, rx_cfg, rx_lower, timers); ASSERT_NE(rx, nullptr); @@ -552,6 +559,7 @@ TEST_F(gtpu_tunnel_ngu_rx_test, rx_stop) gtpu_tunnel_ngu_config::gtpu_tunnel_ngu_rx_config rx_cfg = {}; rx_cfg.local_teid = gtpu_teid_t{0x1}; rx_cfg.t_reordering = std::chrono::milliseconds{10}; + rx_cfg.warn_expired_t_reordering = false; rx = std::make_unique(srs_cu_up::ue_index_t::MIN_UE_INDEX, rx_cfg, rx_lower, timers); ASSERT_NE(rx, nullptr); From 284493df172949b7b45ea5bc19f7a734e6f32c3c Mon Sep 17 00:00:00 2001 From: Andre Puschmann Date: Fri, 14 Jun 2024 17:39:53 +0200 Subject: [PATCH 05/49] copyright: add 2-clause-BSD license text --- COPYRIGHT | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/COPYRIGHT b/COPYRIGHT index 55d0aec68a..2b08009f4c 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -82,6 +82,31 @@ License: MIT such object form without including the above copyright and permission notices. +License: BSD-2-clause (Simplified BSD) + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + . + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + . + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + License: BSD-3-clause Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are From 0ebd8fefa969eb2bcf9d27a5bc92c1d01eda1f8e Mon Sep 17 00:00:00 2001 From: Robert Falkenberg Date: Fri, 14 Jun 2024 21:15:04 +0200 Subject: [PATCH 06/49] gtpu,ngu: extend logging with PDU length --- lib/gtpu/gtpu_tunnel_ngu_rx_impl.h | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/lib/gtpu/gtpu_tunnel_ngu_rx_impl.h b/lib/gtpu/gtpu_tunnel_ngu_rx_impl.h index ec6d830715..2c01a305ae 100644 --- a/lib/gtpu/gtpu_tunnel_ngu_rx_impl.h +++ b/lib/gtpu/gtpu_tunnel_ngu_rx_impl.h @@ -88,6 +88,7 @@ class gtpu_tunnel_ngu_rx_impl : public gtpu_tunnel_base_rx return; } + size_t pdu_len = pdu.buf.length(); gtpu_teid_t teid = pdu.hdr.teid; psup_dl_pdu_session_information pdu_session_info = {}; bool have_pdu_session_info = false; @@ -97,28 +98,29 @@ class gtpu_tunnel_ngu_rx_impl : public gtpu_tunnel_base_rx if (!have_pdu_session_info) { have_pdu_session_info = psup_packer.unpack(pdu_session_info, ext_hdr.container); if (!have_pdu_session_info) { - logger.log_error("Failed to unpack PDU session container."); + logger.log_error("Failed to unpack PDU session container. pdu_len={}", pdu_len); } } else { - logger.log_warning("Ignoring multiple PDU session container."); + logger.log_warning("Ignoring multiple PDU session container. pdu_len={}", pdu_len); } break; default: - logger.log_warning("Ignoring unexpected extension header at NG-U interface. type={}", - ext_hdr.extension_header_type); + logger.log_warning("Ignoring unexpected extension header at NG-U interface. type={} pdu_len={}", + ext_hdr.extension_header_type, + pdu_len); } } if (!have_pdu_session_info) { logger.log_warning( "Incomplete PDU at NG-U interface: missing or invalid PDU session container. pdu_len={} teid={}", - pdu.buf.length(), + pdu_len, teid); // As per TS 29.281 Sec. 5.2.2.7 the (...) PDU Session Container (...) shall be transmitted in a G-PDU over the // N3 and N9 user plane interfaces (...). return; } - logger.log_debug(pdu.buf.begin(), pdu.buf.end(), "RX PDU. sdu_len={} {}", pdu.buf.length(), st); + logger.log_debug(pdu.buf.begin(), pdu.buf.end(), "RX PDU. pdu_len={} {}", pdu_len, st); if (!pdu.hdr.flags.seq_number || config.t_reordering.count() == 0) { // Forward this SDU straight away. @@ -133,7 +135,7 @@ class gtpu_tunnel_ngu_rx_impl : public gtpu_tunnel_base_rx // Check out-of-window if (!inside_rx_window(sn)) { - logger.log_warning("SN falls out of Rx window. sn={} {}", sn, st); + logger.log_warning("SN falls out of Rx window. sn={} pdu_len={} {}", sn, pdu_len, st); gtpu_rx_sdu_info rx_sdu_info = {std::move(rx_sdu), pdu_session_info.qos_flow_id, sn}; deliver_sdu(rx_sdu_info); return; @@ -141,7 +143,7 @@ class gtpu_tunnel_ngu_rx_impl : public gtpu_tunnel_base_rx // Check late SN if (rx_mod_base(sn) < rx_mod_base(st.rx_deliv)) { - logger.log_debug("Out-of-order after timeout or duplicate. sn={} {}", sn, st); + logger.log_debug("Out-of-order after timeout or duplicate. sn={} pdu_len={} {}", sn, pdu_len, st); gtpu_rx_sdu_info rx_sdu_info = {std::move(rx_sdu), pdu_session_info.qos_flow_id, sn}; deliver_sdu(rx_sdu_info); return; @@ -149,7 +151,7 @@ class gtpu_tunnel_ngu_rx_impl : public gtpu_tunnel_base_rx // Check if PDU has been received if (rx_window->has_sn(sn)) { - logger.log_warning("Duplicate PDU dropped. sn={}", sn); + logger.log_warning("Duplicate PDU dropped. sn={} pdu_len={}", sn, pdu_len); return; } From 5bbd23c2cf507aa2be2ab567d52d3b781437d417 Mon Sep 17 00:00:00 2001 From: Robert Falkenberg Date: Fri, 14 Jun 2024 21:15:39 +0200 Subject: [PATCH 07/49] gtpu,nru: extend logging with PDU length --- lib/gtpu/gtpu_tunnel_nru_rx_impl.h | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/lib/gtpu/gtpu_tunnel_nru_rx_impl.h b/lib/gtpu/gtpu_tunnel_nru_rx_impl.h index ced3dc6147..ff375673e3 100644 --- a/lib/gtpu/gtpu_tunnel_nru_rx_impl.h +++ b/lib/gtpu/gtpu_tunnel_nru_rx_impl.h @@ -37,7 +37,8 @@ class gtpu_tunnel_nru_rx_impl : public gtpu_tunnel_base_rx // domain-specific PDU handler void handle_pdu(gtpu_dissected_pdu&& pdu, const sockaddr_storage& src_addr) final { - gtpu_teid_t teid = pdu.hdr.teid; + size_t pdu_len = pdu.buf.length(); + gtpu_teid_t teid = pdu.hdr.teid; std::variant nru_msg; bool have_nr_ran_container = false; for (auto ext_hdr : pdu.hdr.ext_list) { @@ -56,29 +57,31 @@ class gtpu_tunnel_nru_rx_impl : public gtpu_tunnel_base_rx packer.unpack(std::get(nru_msg), ext_hdr.container); break; default: - logger.log_warning("Unsupported PDU type in NR RAN container. pdu_type={}", pdu_type); + logger.log_warning( + "Unsupported PDU type in NR RAN container. pdu_type={} pdu_len={}", pdu_type, pdu_len); return; } if (!have_nr_ran_container) { - logger.log_error("Failed to unpack NR RAN container."); + logger.log_error("Failed to unpack NR RAN container. pdu_len={}", pdu_len); } } else { - logger.log_warning("Ignoring multiple NR RAN container."); + logger.log_warning("Ignoring multiple NR RAN container. pdu_len={}", pdu_len); } break; default: - logger.log_warning("Ignoring unexpected extension header at F1-U interface. type={}", - ext_hdr.extension_header_type); + logger.log_warning("Ignoring unexpected extension header at F1-U interface. type={} pdu_len={}", + ext_hdr.extension_header_type, + pdu_len); } } if (!have_nr_ran_container) { // As per TS 29.281 Sec. 5.2.2.6 the (...) NR RAN Container (...) may be transmitted in a G-PDU over the // X2-U, Xn-U and F1-U user plane interfaces (...). - logger.log_info("T-PDU without NR RAN container. Assuming UL. pdu_len={} teid={}", pdu.buf.length(), teid); + logger.log_info("T-PDU without NR RAN container. Assuming UL. pdu_len={} teid={}", pdu_len, teid); nru_msg = {nru_dl_data_delivery_status{}}; // set to UL } - logger.log_debug(pdu.buf.begin(), pdu.buf.end(), "RX PDU. pdu_len={}", pdu.buf.length()); + logger.log_debug(pdu.buf.begin(), pdu.buf.end(), "RX PDU. pdu_len={}", pdu_len); if (std::holds_alternative(nru_msg)) { nru_dl_message dl_message = {}; @@ -96,7 +99,7 @@ class gtpu_tunnel_nru_rx_impl : public gtpu_tunnel_base_rx expected buf = byte_buffer_chain::create(gtpu_extract_msg(std::move(pdu))); // header is invalidated after extraction; if (buf.is_error()) { - logger.log_error("Dropped PDU: Failed to create byte_buffer_chain"); + logger.log_error("Dropped PDU: Failed to create byte_buffer_chain. pdu_len={}", pdu_len); return; } if (!buf.value().empty()) { @@ -117,7 +120,7 @@ class gtpu_tunnel_nru_rx_impl : public gtpu_tunnel_base_rx } // We should never come here - logger.log_error("Unhandled NR-U PDU"); + logger.log_error("Unhandled NR-U PDU. pdu_len={}", pdu_len); } private: From ec8a7db6c4452ff11457f4d4a5e085d38346b273 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Thu, 13 Jun 2024 16:36:00 +0100 Subject: [PATCH 08/49] apps,cu: rename f1u cli paramter to nru --- apps/cu/cu.cpp | 5 ++--- apps/cu/cu_appconfig.h | 8 -------- apps/cu/cu_appconfig_cli11_schema.cpp | 14 -------------- apps/units/cu_up/cu_up_unit_config.h | 7 +++++++ .../units/cu_up/cu_up_unit_config_cli11_schema.cpp | 14 ++++++++++++++ 5 files changed, 23 insertions(+), 25 deletions(-) diff --git a/apps/cu/cu.cpp b/apps/cu/cu.cpp index d6b2589204..055d2d0f0a 100644 --- a/apps/cu/cu.cpp +++ b/apps/cu/cu.cpp @@ -60,7 +60,6 @@ #include "apps/services/application_tracer.h" #include "apps/services/stdin_command_dispatcher.h" #include "apps/units/cu_cp/cu_cp_config_translators.h" -#include "apps/units/cu_up/cu_up_wrapper.h" #include "cu_appconfig.h" #include @@ -283,7 +282,7 @@ int main(int argc, char** argv) cu_f1u_gtpu_msg.gtpu_pcap = cu_up_dlt_pcaps.f1u.get(); std::unique_ptr cu_f1u_gtpu_demux = create_gtpu_demux(cu_f1u_gtpu_msg); udp_network_gateway_config cu_f1u_gw_config = {}; - cu_f1u_gw_config.bind_address = cu_cfg.f1u_cfg.f1u_bind_addr; + cu_f1u_gw_config.bind_address = cu_up_config.nru_cfg.bind_addr; cu_f1u_gw_config.bind_port = GTPU_PORT; cu_f1u_gw_config.reuse_addr = true; std::unique_ptr cu_f1u_gw = @@ -360,7 +359,7 @@ int main(int argc, char** argv) // function and create things direclty here. std::unique_ptr cu_up_obj = app_build_cu_up(cu_up_config, workers, - cu_cfg.f1u_cfg.f1u_bind_addr, + cu_up_config.nru_cfg.bind_addr, *e1_gw, *cu_f1u_conn, *cu_up_dlt_pcaps.n3, diff --git a/apps/cu/cu_appconfig.h b/apps/cu/cu_appconfig.h index c3f9ca2884..e5cb2f4747 100644 --- a/apps/cu/cu_appconfig.h +++ b/apps/cu/cu_appconfig.h @@ -64,19 +64,11 @@ struct expert_execution_appconfig { expert_threads_appconfig threads; }; -struct cu_up_f1u_appconfig { - std::string f1u_bind_addr = "127.0.10.1"; // Bind address used by the F1-U interface - int udp_rx_max_msgs = 256; // Max number of UDP packets received by a single syscall on the F1-U interface. -}; - /// Monolithic gnb application configuration. struct cu_appconfig { /// Logging configuration. log_appconfig log_cfg; - /// F1-U split configuration. - cu_up_f1u_appconfig f1u_cfg; - /// Expert configuration. expert_execution_appconfig expert_execution_cfg; diff --git a/apps/cu/cu_appconfig_cli11_schema.cpp b/apps/cu/cu_appconfig_cli11_schema.cpp index f663c7bd1b..e5bbb0b241 100644 --- a/apps/cu/cu_appconfig_cli11_schema.cpp +++ b/apps/cu/cu_appconfig_cli11_schema.cpp @@ -83,16 +83,6 @@ static void configure_cli11_log_args(CLI::App& app, log_appconfig& log_params) }); } -static void configure_cli11_f1u_args(CLI::App& app, cu_up_f1u_appconfig& f1u_cfg) -{ - add_option(app, - "--f1u_bind_addr", - f1u_cfg.f1u_bind_addr, - "Default local IP address interfaces bind to, unless a specific bind address is specified") - ->check(CLI::ValidIPV4); - add_option(app, "--udp_max_rx_msgs", f1u_cfg.udp_rx_max_msgs, "Maximum amount of messages RX in a single syscall"); -} - void srsran::configure_cli11_with_cu_appconfig_schema(CLI::App& app, cu_appconfig& cu_parsed_cfg) { cu_appconfig& cu_cfg = cu_parsed_cfg; @@ -100,8 +90,4 @@ void srsran::configure_cli11_with_cu_appconfig_schema(CLI::App& app, cu_appconfi // Logging section. CLI::App* log_subcmd = app.add_subcommand("log", "Logging configuration")->configurable(); configure_cli11_log_args(*log_subcmd, cu_cfg.log_cfg); - - // F1-U section. - CLI::App* f1u_subcmd = add_subcommand(app, "f1u", "F1-U parameters")->configurable(); - configure_cli11_f1u_args(*f1u_subcmd, cu_parsed_cfg.f1u_cfg); } diff --git a/apps/units/cu_up/cu_up_unit_config.h b/apps/units/cu_up/cu_up_unit_config.h index ed4b494798..94884a6025 100644 --- a/apps/units/cu_up/cu_up_unit_config.h +++ b/apps/units/cu_up/cu_up_unit_config.h @@ -34,6 +34,11 @@ struct cu_up_unit_upf_config { bool no_core = false; }; +struct cu_up_nru_appconfig { + std::string bind_addr = "127.0.10.1"; // Bind address used by the F1-U interface + int udp_rx_max_msgs = 256; // Max number of UDP packets received by a single syscall on the F1-U interface. +}; + /// QoS configuration. struct cu_up_unit_qos_config { five_qi_t five_qi = uint_to_five_qi(9); @@ -54,6 +59,8 @@ struct cu_up_unit_config { cu_up_unit_logger_config loggers; /// PCAPs. cu_up_unit_pcap_config pcap_cfg; + /// NR-U + cu_up_nru_appconfig nru_cfg; /// QoS configuration. std::vector qos_cfg; }; diff --git a/apps/units/cu_up/cu_up_unit_config_cli11_schema.cpp b/apps/units/cu_up/cu_up_unit_config_cli11_schema.cpp index f74d1334b2..b8d62f85c0 100644 --- a/apps/units/cu_up/cu_up_unit_config_cli11_schema.cpp +++ b/apps/units/cu_up/cu_up_unit_config_cli11_schema.cpp @@ -16,6 +16,16 @@ using namespace srsran; +static void configure_cli11_nru_args(CLI::App& app, cu_up_nru_appconfig& nru_cfg) +{ + add_option(app, + "--bind_addr", + nru_cfg.bind_addr, + "Default local IP address interfaces bind to, unless a specific bind address is specified") + ->check(CLI::ValidIPV4); + add_option(app, "--udp_max_rx_msgs", nru_cfg.udp_rx_max_msgs, "Maximum amount of messages RX in a single syscall"); +} + static void configure_cli11_cu_up_args(CLI::App& app, cu_up_unit_config& cu_up_params) { add_option(app, "--gtpu_queue_size", cu_up_params.gtpu_queue_size, "GTP-U queue size, in PDUs") @@ -30,6 +40,10 @@ static void configure_cli11_cu_up_args(CLI::App& app, cu_up_unit_config& cu_up_p cu_up_params.warn_on_drop, "Log a warning for dropped packets in GTP-U, SDAP, PDCP and F1-U due to full queues") ->capture_default_str(); + + // NR-U section. + CLI::App* nru_subcmd = add_subcommand(app, "nru", "NR-U parameters")->configurable(); + configure_cli11_nru_args(*nru_subcmd, cu_up_params.nru_cfg); } static void configure_cli11_log_args(CLI::App& app, cu_up_unit_logger_config& log_params) From 834ba93c260a89deb73ae55b666c4fcb1cb456a7 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Thu, 13 Jun 2024 17:34:00 +0100 Subject: [PATCH 09/49] apps,du: rename f1u to nru in DU app --- apps/du/du.cpp | 5 ++--- apps/du/du_appconfig.h | 4 ++-- apps/du/du_appconfig_cli11_schema.cpp | 6 +++--- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/apps/du/du.cpp b/apps/du/du.cpp index d9c2baf211..0a713d5f6e 100644 --- a/apps/du/du.cpp +++ b/apps/du/du.cpp @@ -9,7 +9,6 @@ */ #include "srsran/gtpu/gtpu_config.h" -#include "srsran/pcap/dlt_pcap.h" #include "srsran/pcap/mac_pcap.h" #include "srsran/support/build_info/build_info.h" #include "srsran/support/cpu_features.h" @@ -240,7 +239,7 @@ int main(int argc, char** argv) cu_cp_unit_pcap_config dummy_cu_cp_pcap{}; cu_up_unit_pcap_config dummy_cu_up_pcap{}; worker_manager workers{ - du_unit_cfg, du_cfg.expert_execution_cfg, dummy_cu_cp_pcap, dummy_cu_up_pcap, du_cfg.f1u_cfg.pdu_queue_size}; + du_unit_cfg, du_cfg.expert_execution_cfg, dummy_cu_cp_pcap, dummy_cu_up_pcap, du_cfg.nru_cfg.pdu_queue_size}; // Set layer-specific pcap options. const auto& low_prio_cpu_mask = du_cfg.expert_execution_cfg.affinities.low_priority_cpu_cfg.mask; @@ -270,7 +269,7 @@ int main(int argc, char** argv) du_f1u_gtpu_msg.gtpu_pcap = du_dlt_pcaps.f1u.get(); std::unique_ptr du_f1u_gtpu_demux = create_gtpu_demux(du_f1u_gtpu_msg); udp_network_gateway_config du_f1u_gw_config = {}; - du_f1u_gw_config.bind_address = du_cfg.f1u_cfg.bind_address; + du_f1u_gw_config.bind_address = du_cfg.nru_cfg.bind_address; du_f1u_gw_config.bind_port = GTPU_PORT; du_f1u_gw_config.reuse_addr = true; std::unique_ptr du_f1u_gw = srs_cu_up::create_udp_ngu_gateway( diff --git a/apps/du/du_appconfig.h b/apps/du/du_appconfig.h index 6eb8106e82..4cb308089f 100644 --- a/apps/du/du_appconfig.h +++ b/apps/du/du_appconfig.h @@ -46,7 +46,7 @@ struct f1ap_appconfig { std::string bind_address = "127.0.10.2"; }; -struct f1u_appconfig { +struct nru_appconfig { unsigned pdu_queue_size = 2048; /// IP address to bind the F1-U interface to. std::string bind_address = "127.0.10.2"; @@ -75,7 +75,7 @@ struct du_appconfig { /// F1-C configuration. srs_du::f1ap_appconfig f1ap_cfg; /// F1-U configuration. - srs_du::f1u_appconfig f1u_cfg; + srs_du::nru_appconfig nru_cfg; /// Buffer pool configuration. buffer_pool_appconfig buffer_pool_config; /// Expert configuration. diff --git a/apps/du/du_appconfig_cli11_schema.cpp b/apps/du/du_appconfig_cli11_schema.cpp index 50da83b80a..2fa3637b0a 100644 --- a/apps/du/du_appconfig_cli11_schema.cpp +++ b/apps/du/du_appconfig_cli11_schema.cpp @@ -303,7 +303,7 @@ static void configure_cli11_f1ap_args(CLI::App& app, srs_du::f1ap_appconfig& f1c ->capture_default_str(); } -static void configure_cli11_f1u_args(CLI::App& app, srs_du::f1u_appconfig& f1u_params) +static void configure_cli11_f1u_args(CLI::App& app, srs_du::nru_appconfig& f1u_params) { app.add_option("--queue_size", f1u_params.pdu_queue_size, "F1-U PDU queue size")->capture_default_str(); app.add_option( @@ -325,8 +325,8 @@ void srsran::configure_cli11_with_du_appconfig_schema(CLI::App& app, du_appconfi configure_cli11_f1ap_args(*f1ap_subcmd, du_cfg.f1ap_cfg); // F1-U section. - CLI::App* f1u_subcmd = app.add_subcommand("f1u", "F1-U interface configuration")->configurable(); - configure_cli11_f1u_args(*f1u_subcmd, du_cfg.f1u_cfg); + CLI::App* nru_subcmd = app.add_subcommand("nru", "NR-U interface configuration")->configurable(); + configure_cli11_f1u_args(*nru_subcmd, du_cfg.nru_cfg); // Logging section. CLI::App* log_subcmd = app.add_subcommand("log", "Logging configuration")->configurable(); From f403e40f23b84edc17754dcfadffd0c761754ad1 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Fri, 14 Jun 2024 17:53:35 +0100 Subject: [PATCH 10/49] apps,cu: remove F1AP/NR-U bind address config from main app --- apps/cu/cu.cpp | 6 ++--- apps/cu/cu_appconfig.h | 18 +++++++++++++ apps/cu/cu_appconfig_cli11_schema.cpp | 27 ++++++++++++++++++- apps/units/cu_cp/cu_cp_unit_config.h | 3 --- .../cu_cp/cu_cp_unit_config_cli11_schema.cpp | 3 +-- apps/units/cu_up/cu_up_unit_config.h | 7 ----- .../cu_up/cu_up_unit_config_cli11_schema.cpp | 14 ---------- 7 files changed, 48 insertions(+), 30 deletions(-) diff --git a/apps/cu/cu.cpp b/apps/cu/cu.cpp index 055d2d0f0a..4cc42cfee9 100644 --- a/apps/cu/cu.cpp +++ b/apps/cu/cu.cpp @@ -270,7 +270,7 @@ int main(int argc, char** argv) // Create F1-C GW (TODO cleanup port and PPID args with factory) sctp_network_gateway_config f1c_sctp_cfg = {}; f1c_sctp_cfg.if_name = "F1-C"; - f1c_sctp_cfg.bind_address = cu_cp_config.f1ap_config.f1c_bind_address; + f1c_sctp_cfg.bind_address = cu_cfg.f1ap_cfg.bind_address; f1c_sctp_cfg.bind_port = 38471; f1c_sctp_cfg.ppid = F1AP_PPID; f1c_cu_sctp_gateway_config f1c_server_cfg({f1c_sctp_cfg, *epoll_broker, *cu_cp_dlt_pcaps.f1ap}); @@ -282,7 +282,7 @@ int main(int argc, char** argv) cu_f1u_gtpu_msg.gtpu_pcap = cu_up_dlt_pcaps.f1u.get(); std::unique_ptr cu_f1u_gtpu_demux = create_gtpu_demux(cu_f1u_gtpu_msg); udp_network_gateway_config cu_f1u_gw_config = {}; - cu_f1u_gw_config.bind_address = cu_up_config.nru_cfg.bind_addr; + cu_f1u_gw_config.bind_address = cu_cfg.nru_cfg.bind_addr; cu_f1u_gw_config.bind_port = GTPU_PORT; cu_f1u_gw_config.reuse_addr = true; std::unique_ptr cu_f1u_gw = @@ -359,7 +359,7 @@ int main(int argc, char** argv) // function and create things direclty here. std::unique_ptr cu_up_obj = app_build_cu_up(cu_up_config, workers, - cu_up_config.nru_cfg.bind_addr, + cu_cfg.nru_cfg.bind_addr, *e1_gw, *cu_f1u_conn, *cu_up_dlt_pcaps.n3, diff --git a/apps/cu/cu_appconfig.h b/apps/cu/cu_appconfig.h index e5cb2f4747..b865504c6e 100644 --- a/apps/cu/cu_appconfig.h +++ b/apps/cu/cu_appconfig.h @@ -64,6 +64,18 @@ struct expert_execution_appconfig { expert_threads_appconfig threads; }; +/// NR-U configuration +struct cu_nru_appconfig { + std::string bind_addr = "127.0.10.1"; // Bind address used by the F1-U interface + int udp_rx_max_msgs = 256; // Max number of UDP packets received by a single syscall on the F1-U interface. +}; + +/// F1AP configuration +struct cu_f1ap_appconfig { + /// F1-C bind address + std::string bind_address = "127.0.10.1"; +}; + /// Monolithic gnb application configuration. struct cu_appconfig { /// Logging configuration. @@ -72,6 +84,12 @@ struct cu_appconfig { /// Expert configuration. expert_execution_appconfig expert_execution_cfg; + /// NR-U + cu_nru_appconfig nru_cfg; + + /// F1AP + cu_f1ap_appconfig f1ap_cfg; + /// TODO fill in the rest of the configuration }; diff --git a/apps/cu/cu_appconfig_cli11_schema.cpp b/apps/cu/cu_appconfig_cli11_schema.cpp index e5bbb0b241..d5ae9e4c96 100644 --- a/apps/cu/cu_appconfig_cli11_schema.cpp +++ b/apps/cu/cu_appconfig_cli11_schema.cpp @@ -83,11 +83,36 @@ static void configure_cli11_log_args(CLI::App& app, log_appconfig& log_params) }); } +static void configure_cli11_f1ap_args(CLI::App& app, cu_f1ap_appconfig& f1ap_params) +{ + add_option(app, "--bind_address", f1ap_params.bind_address, "F1-C bind address")->capture_default_str(); +} + +static void configure_cli11_nru_args(CLI::App& app, cu_nru_appconfig& nru_cfg) +{ + add_option(app, + "--bind_addr", + nru_cfg.bind_addr, + "Default local IP address interfaces bind to, unless a specific bind address is specified") + ->check(CLI::ValidIPV4); + add_option(app, "--udp_max_rx_msgs", nru_cfg.udp_rx_max_msgs, "Maximum amount of messages RX in a single syscall"); +} + void srsran::configure_cli11_with_cu_appconfig_schema(CLI::App& app, cu_appconfig& cu_parsed_cfg) { cu_appconfig& cu_cfg = cu_parsed_cfg; // Logging section. - CLI::App* log_subcmd = app.add_subcommand("log", "Logging configuration")->configurable(); + CLI::App* log_subcmd = add_subcommand(app, "log", "Logging configuration")->configurable(); configure_cli11_log_args(*log_subcmd, cu_cfg.log_cfg); + + // F1AP section. + CLI::App* cu_cp_subcmd = add_subcommand(app, "cu_cp", "CU-UP parameters")->configurable(); + CLI::App* f1ap_subcmd = add_subcommand(*cu_cp_subcmd, "f1ap", "F1AP parameters")->configurable(); + configure_cli11_f1ap_args(*f1ap_subcmd, cu_parsed_cfg.f1ap_cfg); + + // NR-U section. + CLI::App* cu_up_subcmd = add_subcommand(app, "cu_up", "CU-UP parameters")->configurable(); + CLI::App* nru_subcmd = add_subcommand(*cu_up_subcmd, "nru", "NR-U parameters")->configurable(); + configure_cli11_nru_args(*nru_subcmd, cu_parsed_cfg.nru_cfg); } diff --git a/apps/units/cu_cp/cu_cp_unit_config.h b/apps/units/cu_cp/cu_cp_unit_config.h index eeb90d8d23..44f36fde32 100644 --- a/apps/units/cu_cp/cu_cp_unit_config.h +++ b/apps/units/cu_cp/cu_cp_unit_config.h @@ -12,7 +12,6 @@ #include "apps/units/cu_cp/cu_cp_unit_pcap_config.h" #include "cu_cp_unit_logger_config.h" -#include "srsran/adt/optional.h" #include "srsran/ran/five_qi.h" #include "srsran/ran/gnb_id.h" #include "srsran/ran/nr_band.h" @@ -99,8 +98,6 @@ struct cu_cp_unit_security_config { struct cu_cp_unit_f1ap_config { /// Timeout for the UE context setup procedure in milliseconds. unsigned ue_context_setup_timeout = 1000; - /// F1-C bind address - std::string f1c_bind_address = "127.0.10.1"; }; /// RLC UM TX configuration diff --git a/apps/units/cu_cp/cu_cp_unit_config_cli11_schema.cpp b/apps/units/cu_cp/cu_cp_unit_config_cli11_schema.cpp index 04a58277ea..309120ad9c 100644 --- a/apps/units/cu_cp/cu_cp_unit_config_cli11_schema.cpp +++ b/apps/units/cu_cp/cu_cp_unit_config_cli11_schema.cpp @@ -227,7 +227,6 @@ static void configure_cli11_f1ap_args(CLI::App& app, cu_cp_unit_f1ap_config& f1a f1ap_params.ue_context_setup_timeout, "UE context setup timeout in milliseconds") ->capture_default_str(); - add_option(app, "--f1c_bind_address", f1ap_params.f1c_bind_address, "F1-C bind address")->capture_default_str(); } static void configure_cli11_cu_cp_args(CLI::App& app, cu_cp_unit_config& cu_cp_params) @@ -275,7 +274,7 @@ static void configure_cli11_cu_cp_args(CLI::App& app, cu_cp_unit_config& cu_cp_p CLI::App* security_subcmd = app.add_subcommand("security", "Security configuration"); configure_cli11_security_args(*security_subcmd, cu_cp_params.security_config); - CLI::App* f1ap_subcmd = app.add_subcommand("f1ap", "F1AP configuration"); + CLI::App* f1ap_subcmd = add_subcommand(app, "f1ap", "F1AP configuration parameters"); configure_cli11_f1ap_args(*f1ap_subcmd, cu_cp_params.f1ap_config); } diff --git a/apps/units/cu_up/cu_up_unit_config.h b/apps/units/cu_up/cu_up_unit_config.h index 94884a6025..ed4b494798 100644 --- a/apps/units/cu_up/cu_up_unit_config.h +++ b/apps/units/cu_up/cu_up_unit_config.h @@ -34,11 +34,6 @@ struct cu_up_unit_upf_config { bool no_core = false; }; -struct cu_up_nru_appconfig { - std::string bind_addr = "127.0.10.1"; // Bind address used by the F1-U interface - int udp_rx_max_msgs = 256; // Max number of UDP packets received by a single syscall on the F1-U interface. -}; - /// QoS configuration. struct cu_up_unit_qos_config { five_qi_t five_qi = uint_to_five_qi(9); @@ -59,8 +54,6 @@ struct cu_up_unit_config { cu_up_unit_logger_config loggers; /// PCAPs. cu_up_unit_pcap_config pcap_cfg; - /// NR-U - cu_up_nru_appconfig nru_cfg; /// QoS configuration. std::vector qos_cfg; }; diff --git a/apps/units/cu_up/cu_up_unit_config_cli11_schema.cpp b/apps/units/cu_up/cu_up_unit_config_cli11_schema.cpp index b8d62f85c0..f74d1334b2 100644 --- a/apps/units/cu_up/cu_up_unit_config_cli11_schema.cpp +++ b/apps/units/cu_up/cu_up_unit_config_cli11_schema.cpp @@ -16,16 +16,6 @@ using namespace srsran; -static void configure_cli11_nru_args(CLI::App& app, cu_up_nru_appconfig& nru_cfg) -{ - add_option(app, - "--bind_addr", - nru_cfg.bind_addr, - "Default local IP address interfaces bind to, unless a specific bind address is specified") - ->check(CLI::ValidIPV4); - add_option(app, "--udp_max_rx_msgs", nru_cfg.udp_rx_max_msgs, "Maximum amount of messages RX in a single syscall"); -} - static void configure_cli11_cu_up_args(CLI::App& app, cu_up_unit_config& cu_up_params) { add_option(app, "--gtpu_queue_size", cu_up_params.gtpu_queue_size, "GTP-U queue size, in PDUs") @@ -40,10 +30,6 @@ static void configure_cli11_cu_up_args(CLI::App& app, cu_up_unit_config& cu_up_p cu_up_params.warn_on_drop, "Log a warning for dropped packets in GTP-U, SDAP, PDCP and F1-U due to full queues") ->capture_default_str(); - - // NR-U section. - CLI::App* nru_subcmd = add_subcommand(app, "nru", "NR-U parameters")->configurable(); - configure_cli11_nru_args(*nru_subcmd, cu_up_params.nru_cfg); } static void configure_cli11_log_args(CLI::App& app, cu_up_unit_logger_config& log_params) From 93b7bd5befc4bd7f5cc1e6f4b7dda23267b35ad6 Mon Sep 17 00:00:00 2001 From: Oriol Font-Bach Date: Mon, 17 Jun 2024 14:12:15 +0000 Subject: [PATCH 11/49] hal: enable more flexible use of hardware-queues in accelerated ldpc functions, refines pdsch encoder factory --- include/srsran/hal/dpdk/bbdev/bbdev_acc.h | 37 ++++-- .../hw_accelerator_factories.h | 7 ++ .../hw_accelerator_pdsch_enc.h | 14 +++ .../pusch/hw_accelerator_factories.h | 2 + .../pusch/hw_accelerator_pusch_dec.h | 6 + .../channel_processor_factories.h | 2 - lib/hal/dpdk/bbdev/bbdev.cpp | 4 +- lib/hal/dpdk/bbdev/bbdev_acc.cpp | 109 +++++++++++------- .../hw_accelerator_factories.cpp | 15 ++- .../hw_accelerator_pdsch_enc_acc100_impl.cpp | 64 ++++++++-- .../hw_accelerator_pdsch_enc_acc100_impl.h | 52 +++++++-- .../hw_accelerator_pdsch_enc_impl.cpp | 20 ++++ .../hw_accelerator_pdsch_enc_impl.h | 16 +++ .../pusch/hw_accelerator_factories.cpp | 7 +- .../hw_accelerator_pusch_dec_acc100_impl.cpp | 48 ++++++-- .../hw_accelerator_pusch_dec_acc100_impl.h | 37 ++++-- .../pusch/hw_accelerator_pusch_dec_impl.cpp | 10 ++ .../pusch/hw_accelerator_pusch_dec_impl.h | 8 ++ .../channel_processor_factories.cpp | 7 +- .../pdsch_encoder_hw_impl.cpp | 8 ++ .../pdsch_encoder_hw_impl.h | 9 +- .../pusch/pusch_decoder_hw_impl.cpp | 6 + .../pdsch_encoder_hwacc_benchmark.cpp | 25 ++-- .../pdsch_processor_benchmark.cpp | 25 ++-- .../pusch/pusch_decoder_hwacc_benchmark.cpp | 19 ++- .../pusch/pusch_processor_benchmark.cpp | 19 ++- .../channel_processors/pdsch_encoder_test.cpp | 25 ++-- .../pdsch_processor_vectortest.cpp | 7 +- .../pusch/pusch_decoder_vectortest.cpp | 21 ++-- .../pusch/pusch_processor_vectortest.cpp | 1 + 30 files changed, 464 insertions(+), 166 deletions(-) diff --git a/include/srsran/hal/dpdk/bbdev/bbdev_acc.h b/include/srsran/hal/dpdk/bbdev/bbdev_acc.h index 7fc5e2cab3..eb2300a31e 100644 --- a/include/srsran/hal/dpdk/bbdev/bbdev_acc.h +++ b/include/srsran/hal/dpdk/bbdev/bbdev_acc.h @@ -11,6 +11,7 @@ #pragma once #include "srsran/adt/bounded_bitset.h" +#include "srsran/adt/mpmc_queue.h" #include "srsran/srslog/logger.h" #include "srsran/support/units.h" #include @@ -23,6 +24,8 @@ namespace dpdk { static constexpr unsigned MAX_NOF_BBDEV_QUEUES = 128; /// Maximum number of operations that can be stored in a hardware queue at a given time. static constexpr unsigned MAX_NOF_OP_IN_QUEUE = 16; +/// Maximum number of VF instances supported by a bbdev-based hardware-accelerator. +static constexpr unsigned MAX_NOF_BBDEV_VF_INSTANCES = 64; /// Configuration parameters and objects tied to a bbdev-based hardware-accelerator. struct bbdev_acc_configuration { @@ -49,9 +52,9 @@ class bbdev_acc { public: /// Constructor. - /// \param[in] dpdk Pointer to a dpdk-based hardware-accelerator interface. - /// \param[in] info bbdev Device information. - /// \param[in] cfg Configuration parameters of the bbdev-based hardware-accelerator. + /// \param[in] cfg Configuration parameters of the bbdev-based hardware-accelerator. + /// \param[in] info bbdev Device information. + /// \param[in] logger SRS logger. explicit bbdev_acc(const bbdev_acc_configuration& cfg, const ::rte_bbdev_info& info_, srslog::basic_logger& logger); /// Destructor. @@ -100,13 +103,25 @@ class bbdev_acc /// Reserves a free queue to be used by a specific hardware-accelerated channel processor function. /// \param[in] op_type Type of bbdev op. /// \return ID of the reserved queue. - int reserve_queue(rte_bbdev_op_type op_type); + int reserve_queue(::rte_bbdev_op_type op_type); /// Frees a queue used by a specific hardware-accelerated channel processor function. /// \param[in] queue_id ID of the queue to be freed. - void free_queue(unsigned queue_id); + void free_queue(::rte_bbdev_op_type op_type, unsigned queue_id); + + /// Returns a unique ID for an instance of an LDPC encoder using the bbdev-based accelerator. + /// \return Encoder ID. + unsigned reserve_encoder() { return nof_ldpc_enc_instances++; } + + /// Returns a unique ID for an instance of an LDPC decoder using the bbdev-based accelerator. + /// \return Decoder ID. + unsigned reserve_decoder() { return nof_ldpc_dec_instances++; } private: + /// Codeblock identifier list type. + using queue_id_list = + concurrent_queue; + /// ID of the bbdev-based hardware-accelerator. unsigned id; /// Structure providing device information. @@ -117,8 +132,12 @@ class bbdev_acc unsigned nof_ldpc_dec_lcores; /// Number of lcores available to the hardware-accelerated FFT (disabled if 0). unsigned nof_fft_lcores; - /// Array indicating the queue availability. - bounded_bitset queue_used; + /// List containing the free queue ids for hardware-acclerated LDPC encoder functions. + queue_id_list available_ldpc_enc_queue; + /// List containing the free queue ids for hardware-acclerated LDPC decoder functions. + queue_id_list available_ldpc_dec_queue; + /// List containing the free queue ids for hardware-acclerated FFT functions. + queue_id_list available_fft_queue; /// Size of each mbuf used to exchange unencoded and unrate-matched messages with the accelerator in bytes. unsigned msg_mbuf_size; /// Size of each mbuf used to exchange encoded and rate-matched messages with the accelerator in bytes. @@ -127,6 +146,10 @@ class bbdev_acc unsigned nof_mbuf; /// SRS logger. srslog::basic_logger& logger; + /// Number of LDPC encoder instances using this bbdev accelerator. + unsigned nof_ldpc_enc_instances; + /// Number of LDPC decoder instances using this bbdev accelerator. + unsigned nof_ldpc_dec_instances; }; } // namespace dpdk diff --git a/include/srsran/hal/phy/upper/channel_processors/hw_accelerator_factories.h b/include/srsran/hal/phy/upper/channel_processors/hw_accelerator_factories.h index 2ea1c65354..fdc5c44311 100644 --- a/include/srsran/hal/phy/upper/channel_processors/hw_accelerator_factories.h +++ b/include/srsran/hal/phy/upper/channel_processors/hw_accelerator_factories.h @@ -22,6 +22,13 @@ struct hw_accelerator_pdsch_enc_configuration { std::string acc_type; /// Interfacing to a bbdev-based hardware-accelerator. std::shared_ptr bbdev_accelerator; + /// Defines if the PDSCH encoder operates in CB mode (true) or TB mode (false). + bool cb_mode = false; + /// Defines the maximum supported TB size in bytes (CB mode will be forced for larger TBs). + /// Only used in TB mode. + unsigned max_tb_size; + /// Indicates if the accelerated function uses a dedicated hardware queue or needs to reserve one for each operation. + bool dedicated_queue; }; /// Returns an instance of a PDSCH encoder hardware accelerator factory on success, diff --git a/include/srsran/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc.h b/include/srsran/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc.h index 6249c1d114..af4da7bfdb 100644 --- a/include/srsran/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc.h +++ b/include/srsran/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc.h @@ -66,10 +66,24 @@ class hw_accelerator_pdsch_enc : public hw_accelerator /// Default destructor. virtual ~hw_accelerator_pdsch_enc() = default; + // Reserves a hardware queue from the accelerator. + virtual void reserve_queue() = 0; + + // Frees a hardware queue from the accelerator. + virtual void free_queue() = 0; + /// Configures encoding operation given the common HW-oriented PDSCH encoder configuration. /// \param[in] config Structure providing the configuration parameters of the PDSCH encoder. /// \param[in] cb_index Optional. Index of the CB for which the PDSCH encoding operation is being configured. virtual void configure_operation(const hw_pdsch_encoder_configuration& config, unsigned cb_index = 0) = 0; + + /// Checks if the hardware-accelerated PDSCH encoder uses CB mode or TB mode. + /// \return True if CB mode is used, false otherwise. + virtual bool get_cb_mode() const = 0; + + /// Checks the maximum supported TB size. Only used in TB mode. + /// \return TB size (in bytes). + virtual unsigned get_max_tb_size() const = 0; }; } // namespace hal diff --git a/include/srsran/hal/phy/upper/channel_processors/pusch/hw_accelerator_factories.h b/include/srsran/hal/phy/upper/channel_processors/pusch/hw_accelerator_factories.h index 555475bc6f..e624229e8a 100644 --- a/include/srsran/hal/phy/upper/channel_processors/pusch/hw_accelerator_factories.h +++ b/include/srsran/hal/phy/upper/channel_processors/pusch/hw_accelerator_factories.h @@ -27,6 +27,8 @@ struct hw_accelerator_pusch_dec_configuration { bool ext_softbuffer; /// Interfacing to an external HARQ buffer context repository. std::shared_ptr harq_buffer_context; + /// Indicates if the accelerated function uses a dedicated hardware queue or needs to reserve one for each operation. + bool dedicated_queue = true; }; /// Returns an instance of a PUSCH decoder hardware accelerator factory on success, diff --git a/include/srsran/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec.h b/include/srsran/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec.h index 70fc387f46..7d3dc97c62 100644 --- a/include/srsran/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec.h +++ b/include/srsran/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec.h @@ -74,6 +74,12 @@ class hw_accelerator_pusch_dec : public hw_accelerator /// Default destructor. virtual ~hw_accelerator_pusch_dec() = default; + // Reserves a hardware queue from the accelerator. + virtual void reserve_queue() = 0; + + // Frees a hardware queue from the accelerator. + virtual void free_queue() = 0; + /// Configures the decoding operation given the common HW-oriented PUSCH decoder configuration. /// \param[in] config Structure providing the configuration parameters of the PUSCH decoder. /// \param[in] cb_index Optional. Index of the CB for which the PUSCH decoding operation is being configured. diff --git a/include/srsran/phy/upper/channel_processors/channel_processor_factories.h b/include/srsran/phy/upper/channel_processors/channel_processor_factories.h index d444fbf3e6..f525379060 100644 --- a/include/srsran/phy/upper/channel_processors/channel_processor_factories.h +++ b/include/srsran/phy/upper/channel_processors/channel_processor_factories.h @@ -119,8 +119,6 @@ std::shared_ptr create_pdsch_encoder_factory_sw(pdsch_enc /// HW-accelerated PDSCH encoder factory configuration parameters. struct pdsch_encoder_factory_hw_configuration { - bool cb_mode = false; - unsigned max_tb_size; std::shared_ptr crc_factory; std::shared_ptr segmenter_factory; std::shared_ptr hw_encoder_factory; diff --git a/lib/hal/dpdk/bbdev/bbdev.cpp b/lib/hal/dpdk/bbdev/bbdev.cpp index 6a1e4bf659..68551dcda3 100644 --- a/lib/hal/dpdk/bbdev/bbdev.cpp +++ b/lib/hal/dpdk/bbdev/bbdev.cpp @@ -27,10 +27,10 @@ expected<::rte_bbdev_info> dpdk::bbdev_start(const bbdev_acc_configuration& cfg, "available: ldpc_enc={}, ldpc_dec={}, fft={}).", cfg.id, cfg.nof_ldpc_enc_lcores, - info.drv.num_queues[RTE_BBDEV_OP_LDPC_ENC], cfg.nof_ldpc_dec_lcores, - info.drv.num_queues[RTE_BBDEV_OP_LDPC_DEC], cfg.nof_fft_lcores, + info.drv.num_queues[RTE_BBDEV_OP_LDPC_ENC], + info.drv.num_queues[RTE_BBDEV_OP_LDPC_DEC], info.drv.num_queues[RTE_BBDEV_OP_FFT]); return default_error_t{}; } diff --git a/lib/hal/dpdk/bbdev/bbdev_acc.cpp b/lib/hal/dpdk/bbdev/bbdev_acc.cpp index c963337776..3022b449cf 100644 --- a/lib/hal/dpdk/bbdev/bbdev_acc.cpp +++ b/lib/hal/dpdk/bbdev/bbdev_acc.cpp @@ -20,12 +20,48 @@ bbdev_acc::bbdev_acc(const bbdev_acc_configuration& cfg, const ::rte_bbdev_info& nof_ldpc_enc_lcores(cfg.nof_ldpc_enc_lcores), nof_ldpc_dec_lcores(cfg.nof_ldpc_dec_lcores), nof_fft_lcores(cfg.nof_fft_lcores), - queue_used(cfg.nof_ldpc_enc_lcores + cfg.nof_ldpc_dec_lcores + cfg.nof_fft_lcores), + available_ldpc_enc_queue(MAX_NOF_BBDEV_QUEUES), + available_ldpc_dec_queue(MAX_NOF_BBDEV_QUEUES), + available_fft_queue(MAX_NOF_BBDEV_QUEUES), msg_mbuf_size(cfg.msg_mbuf_size), rm_mbuf_size(cfg.rm_mbuf_size), nof_mbuf(cfg.nof_mbuf), logger(logger_) { + unsigned nof_vfs = nof_ldpc_enc_lcores + nof_ldpc_dec_lcores + nof_fft_lcores; + srsran_assert(nof_ldpc_enc_lcores <= MAX_NOF_BBDEV_VF_INSTANCES, + "Requested {} LDPC encoder VFs but only {} are available.", + nof_ldpc_enc_lcores, + MAX_NOF_BBDEV_VF_INSTANCES); + srsran_assert(nof_ldpc_dec_lcores <= MAX_NOF_BBDEV_VF_INSTANCES, + "Requested {} LDPC decoder VFs but only {} are available.", + nof_ldpc_dec_lcores, + MAX_NOF_BBDEV_VF_INSTANCES); + srsran_assert(nof_fft_lcores <= MAX_NOF_BBDEV_VF_INSTANCES, + "Requested {} FFT VFs but only {} are available.", + nof_fft_lcores, + MAX_NOF_BBDEV_VF_INSTANCES); + srsran_assert(nof_vfs <= MAX_NOF_BBDEV_QUEUES, + "Requested {} BBDEV VFs but only {} are available.", + nof_vfs, + MAX_NOF_BBDEV_QUEUES); + + nof_ldpc_enc_instances = 0; + nof_ldpc_dec_instances = 0; + // Hardware-accelerated LDPC encoder functions use queues 0:(nof_ldpc_enc_lcores - 1) + for (unsigned qid = 0, lastq = nof_ldpc_enc_lcores; qid != lastq; qid++) { + available_ldpc_enc_queue.try_push(qid); + } + // Hardware-accelerated LDPC decoder functions use queues nof_ldpc_enc_lcores:(nof_ldpc_enc_lcores + + // nof_ldpc_dec_lcores - 1) + for (unsigned qid = nof_ldpc_enc_lcores, lastq = nof_ldpc_enc_lcores + nof_ldpc_dec_lcores; qid != lastq; qid++) { + available_ldpc_dec_queue.try_push(qid); + } + // Hardware-accelerated FFT functions use queues (nof_ldpc_enc_lcores + nof_ldpc_dec_lcores):(nof_ldpc_enc_lcores + + // nof_ldpc_dec_lcores + nof_fft_lcores - 1) + for (unsigned qid = nof_ldpc_enc_lcores + nof_ldpc_dec_lcores, lastq = nof_vfs; qid != lastq; qid++) { + available_fft_queue.try_push(qid); + } } bbdev_acc::~bbdev_acc() @@ -38,57 +74,42 @@ int bbdev_acc::reserve_queue(::rte_bbdev_op_type op_type) { int queue_id = -1; - // Hardware-accelerated LDPC encoder functions use queues 0:(nof_ldpc_enc_lcores - 1) if (op_type == RTE_BBDEV_OP_LDPC_ENC) { - for (unsigned qid = 0, lastq = nof_ldpc_enc_lcores; qid != lastq; qid++) { - if (!queue_used.test(qid)) { - queue_id = qid; - queue_used.set(qid); - break; - } - } - } - // Hardware-accelerated LDPC decoder functions use queues nof_ldpc_enc_lcores:(nof_ldpc_enc_lcores + - // nof_ldpc_dec_lcores - 1) - else if (op_type == RTE_BBDEV_OP_LDPC_DEC) { - for (unsigned qid = nof_ldpc_enc_lcores, lastq = nof_ldpc_enc_lcores + nof_ldpc_dec_lcores; qid != lastq; qid++) { - if (!queue_used.test(qid)) { - queue_id = qid; - queue_used.set(qid); - break; - } + // Try to get an available LDPC encoder queue. + std::optional qid = available_ldpc_enc_queue.try_pop(); + if (qid.has_value()) { + queue_id = qid.value(); } - } - // Hardware-accelerated FFT functions use queues (nof_ldpc_enc_lcores + nof_ldpc_dec_lcores):(nof_ldpc_enc_lcores + - // nof_ldpc_dec_lcores + nof_fft_lcores - 1) - else { - for (unsigned qid = nof_ldpc_enc_lcores + nof_ldpc_dec_lcores, - lastq = nof_ldpc_enc_lcores + nof_ldpc_dec_lcores + nof_fft_lcores; - qid != lastq; - qid++) { - if (!queue_used.test(qid)) { - queue_id = qid; - queue_used.set(qid); - break; - } + } else if (op_type == RTE_BBDEV_OP_LDPC_DEC) { + // Try to get an available LDPC decoder queue. + std::optional qid = available_ldpc_dec_queue.try_pop(); + if (qid.has_value()) { + queue_id = qid.value(); } - } - - // Log failure to obtain a free queue. - if (queue_id < 0) { - if (op_type == RTE_BBDEV_OP_LDPC_ENC) { - logger.error("[bbdev] unable to reserve a free LDPC encoder queue in device {}.", id); - } else if (op_type == RTE_BBDEV_OP_LDPC_DEC) { - logger.error("[bbdev] unable to reserve a free LDPC decoder queue in device {}.", id); - } else { - logger.error("[bbdev] unable to reserve a free FFT queue in device {}.", id); + } else { + // Try to get an available FFT queue. + std::optional qid = available_fft_queue.try_pop(); + if (qid.has_value()) { + queue_id = qid.value(); } } return queue_id; } -void bbdev_acc::free_queue(unsigned queue_id) +void bbdev_acc::free_queue(::rte_bbdev_op_type op_type, unsigned queue_id) { - queue_used.reset(queue_id); + if (op_type == RTE_BBDEV_OP_LDPC_ENC) { + // Free a LDPC encoder queue. + while (!available_ldpc_enc_queue.try_push(queue_id)) { + } + } else if (op_type == RTE_BBDEV_OP_LDPC_DEC) { + // Free a LDPC decoder queue. + while (!available_ldpc_dec_queue.try_push(queue_id)) { + } + } else { + // Free a FFT queue. + while (!available_fft_queue.try_push(queue_id)) { + } + } } diff --git a/lib/hal/phy/upper/channel_processors/hw_accelerator_factories.cpp b/lib/hal/phy/upper/channel_processors/hw_accelerator_factories.cpp index 27eeaa2874..bf41ea9f9a 100644 --- a/lib/hal/phy/upper/channel_processors/hw_accelerator_factories.cpp +++ b/lib/hal/phy/upper/channel_processors/hw_accelerator_factories.cpp @@ -28,18 +28,29 @@ class hw_accelerator_pdsch_enc_factory_spec : public hw_accelerator_pdsch_enc_fa std::string acc_type; /// Interfacing to a bbdev-based hardware-accelerator. std::shared_ptr bbdev_accelerator; + /// Operation mode of the PDSCH encoder (CB = true, TB = false [default]). + bool cb_mode = false; + /// Maximum supported TB size in bytes (used to size the mbufs). + unsigned max_tb_size; + /// Indicates if the accelerated function uses a dedicated hardware queue or needs to reserve one for each operation. + bool dedicated_queue = true; public: // Default constructor. explicit hw_accelerator_pdsch_enc_factory_spec(const hw_accelerator_pdsch_enc_configuration& accelerator_config) : - acc_type(accelerator_config.acc_type), bbdev_accelerator(std::move(accelerator_config.bbdev_accelerator)) + acc_type(accelerator_config.acc_type), + bbdev_accelerator(std::move(accelerator_config.bbdev_accelerator)), + cb_mode(accelerator_config.cb_mode), + max_tb_size(accelerator_config.max_tb_size), + dedicated_queue(accelerator_config.dedicated_queue) { } std::unique_ptr create() override { if (acc_type == "acc100") { - return std::make_unique(bbdev_accelerator); + return std::make_unique( + bbdev_accelerator, cb_mode, max_tb_size, dedicated_queue); } // Handle other accelerator types here. return {}; diff --git a/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_acc100_impl.cpp b/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_acc100_impl.cpp index cea29ef90e..85eb87af49 100644 --- a/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_acc100_impl.cpp +++ b/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_acc100_impl.cpp @@ -22,12 +22,10 @@ void hw_accelerator_pdsch_enc_acc100_impl::allocate_resources() int socket_id = bbdev_accelerator->get_socket_id(); // Create bbdev op pools for the accelerated LDPC encoder operations. - // Note that a single hardware-queue per lcore is assumed. - unsigned nof_ldpc_enc_cores = bbdev_accelerator->get_nof_ldpc_enc_cores(); // op pools require unique names. - std::string op_pool_name = fmt::format("enc_op_pool_{}_{}", device_id, queue_id); + std::string op_pool_name = fmt::format("enc_op_pool_{}_{}", device_id, id); op_pool = ::dpdk::create_bbdev_op_pool( - op_pool_name.c_str(), RTE_BBDEV_OP_LDPC_ENC, nof_ldpc_enc_cores, socket_id, bbdev_accelerator->get_logger()); + op_pool_name.c_str(), RTE_BBDEV_OP_LDPC_ENC, MAX_NOF_SEGMENTS, socket_id, bbdev_accelerator->get_logger()); // Create new mbuf pools for both input and output data for the hardware-accelerated LDPC encoder. // Note that a predefined headroom length is added on top of the size required for the data in the mbufs. Also, the @@ -41,14 +39,44 @@ void hw_accelerator_pdsch_enc_acc100_impl::allocate_resources() rm_mpool_cfg.mbuf_data_size = bbdev_accelerator->get_rm_mbuf_size().value() + RTE_PKTMBUF_HEADROOM; rm_mpool_cfg.n_mbuf = nof_mbuf; // mbuf pools require unique names. - std::string mbuf_pool_name = fmt::format("enc_in_mbuf_pool_{}_{}", device_id, queue_id); + std::string mbuf_pool_name = fmt::format("enc_in_mbuf_pool_{}_{}", device_id, id); in_mbuf_pool = ::dpdk::create_mbuf_pool(mbuf_pool_name.c_str(), socket_id, msg_mpool_cfg, bbdev_accelerator->get_logger()); - mbuf_pool_name = fmt::format("enc_out_mbuf_pool_{}_{}", device_id, queue_id); + mbuf_pool_name = fmt::format("enc_out_mbuf_pool_{}_{}", device_id, id); out_mbuf_pool = ::dpdk::create_mbuf_pool(mbuf_pool_name.c_str(), socket_id, rm_mpool_cfg, bbdev_accelerator->get_logger()); } +void hw_accelerator_pdsch_enc_acc100_impl::hw_reserve_queue() +{ + // Verify that no hardware-queue is reserved already. + if (queue_id < 0) { + int qid = -1; + do { + qid = bbdev_accelerator->reserve_queue(RTE_BBDEV_OP_LDPC_ENC); + } while (qid < 0 && !dedicated_queue); + queue_id = qid; + + // HAL logging. + srslog::basic_logger& logger = bbdev_accelerator->get_logger(); + logger.info("[acc100] encoder id={}: reserved queue={}.", id, queue_id); + } +} + +void hw_accelerator_pdsch_enc_acc100_impl::hw_free_queue() +{ + // Verify that the hardware queue won't be requrired anymore. + if (!dedicated_queue || (dedicated_queue && queue_id > 0)) { + bbdev_accelerator->free_queue(RTE_BBDEV_OP_LDPC_ENC, queue_id); + + // HAL logging. + srslog::basic_logger& logger = bbdev_accelerator->get_logger(); + logger.info("[acc100] encoder id={}: freed queue={}.", id, queue_id); + + queue_id = -1; + } +} + void hw_accelerator_pdsch_enc_acc100_impl::hw_config(const hw_pdsch_encoder_configuration& config, unsigned cb_index) { // Save configuration. @@ -107,8 +135,11 @@ bool hw_accelerator_pdsch_enc_acc100_impl::hw_enqueue(span data, queue_id); // Enqueue the LDPC encoding operation. - enqueued = ::dpdk::enqueue_ldpc_enc_operation( - op[cb_index], 1, device_id, queue_id, bbdev_accelerator->get_logger()); // TBD: single operation enqueued. + enqueued = ::dpdk::enqueue_ldpc_enc_operation(op[cb_index], + 1, + device_id, + static_cast(queue_id), + bbdev_accelerator->get_logger()); // TBD: single operation enqueued. // Update the enqueued task counter. if (enqueued) { @@ -133,8 +164,11 @@ bool hw_accelerator_pdsch_enc_acc100_impl::hw_dequeue(span data, // Verify that the queue is not already emtpy and that the operation has not been dropped before trying to dequeue. if (nof_enqueued_op > 0 && !dropped) { // Dequeue processed operations from the hardware-accelerated LDPC encoder. - dequeued = ::dpdk::dequeue_ldpc_enc_operation( - op[segment_index], 1, device_id, queue_id, bbdev_accelerator->get_logger()); // TBD: single operation dequeued. + dequeued = ::dpdk::dequeue_ldpc_enc_operation(op[segment_index], + 1, + device_id, + static_cast(queue_id), + bbdev_accelerator->get_logger()); // TBD: single operation dequeued. // Read the returned results (if any). if (dequeued) { @@ -161,3 +195,13 @@ bool hw_accelerator_pdsch_enc_acc100_impl::hw_dequeue(span data, return dequeued; } + +bool hw_accelerator_pdsch_enc_acc100_impl::get_hw_cb_mode() const +{ + return cb_mode; +} + +unsigned hw_accelerator_pdsch_enc_acc100_impl::get_hw_max_tb_size() const +{ + return max_tb_size; +} diff --git a/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_acc100_impl.h b/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_acc100_impl.h index 640a80b3e2..48f73edf33 100644 --- a/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_acc100_impl.h +++ b/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_acc100_impl.h @@ -27,12 +27,20 @@ namespace hal { /// Class representing the ACC100 implementation of PDSCH encoding. class hw_accelerator_pdsch_enc_acc100_impl : public hw_accelerator_pdsch_enc_impl { + /// Hardware-specific implementation of the reserve queue function. + void hw_reserve_queue() override; + /// Hardware-specific implementation of the free queue function. + void hw_free_queue() override; /// Hardware-specific implementation of the enqueue_operation function. bool hw_enqueue(span data, unsigned cb_index) override; /// Hardware-specific implementation of the dequeue_operation function. bool hw_dequeue(span data, span packed_data, unsigned segment_index) override; /// Hardware-specific configuration function. void hw_config(const hw_pdsch_encoder_configuration& config, unsigned cb_index) override; + /// Hardware-specific CB mode quering function. + bool get_hw_cb_mode() const override; + /// Hardware-specific maximum supported TB size quering function. + unsigned get_hw_max_tb_size() const override; /// Allocate the required resources from the bbdev-based hardware-accelerator. void allocate_resources(); @@ -40,6 +48,9 @@ class hw_accelerator_pdsch_enc_acc100_impl : public hw_accelerator_pdsch_enc_imp /// \file /// \brief Members specific to bbdev-accelerated LDPC encoder functions. + /// Unique ID of the current hardware-accelerated function. + unsigned id; + /// Pointer to a bbdev-based hardware-accelerator. std::shared_ptr bbdev_accelerator; @@ -47,7 +58,7 @@ class hw_accelerator_pdsch_enc_acc100_impl : public hw_accelerator_pdsch_enc_imp unsigned device_id; /// Private member to store the ID of the queue used by the hardware-accelerated LDPC encoder function. - uint16_t queue_id; + int queue_id; /// Indicates the number of encoding operations in the queue. unsigned nof_enqueued_op = 0; @@ -70,38 +81,59 @@ class hw_accelerator_pdsch_enc_acc100_impl : public hw_accelerator_pdsch_enc_imp /// Private member to store the configuration of the current operation. hw_pdsch_encoder_configuration enc_config; + /// Private member to store the operation mode. + bool cb_mode = false; + + /// Private member to store the maximum supported TB size (in bytes). + unsigned max_tb_size; + /// Private member to store the TB CRC (only for TB mode operation). static_vector tb_crc; /// Array flagging those encoding operations that will be dropped due to enqueueing errors. bounded_bitset drop_op; + /// Indicates if the accelerated function uses a dedicated hardware queue or needs to reserve one for each operation. + bool dedicated_queue; + public: /// Constructor taking care of obtaining a bbdev-based hardware-accelerator queue and allocating the required /// resources. - hw_accelerator_pdsch_enc_acc100_impl(std::shared_ptr bbdev_accelerator_) : - bbdev_accelerator(std::move(bbdev_accelerator_)) + hw_accelerator_pdsch_enc_acc100_impl(std::shared_ptr bbdev_accelerator_, + bool cb_mode_, + unsigned max_tb_size_, + bool dedicated_queue_) : + bbdev_accelerator(std::move(bbdev_accelerator_)), + cb_mode(cb_mode_), + max_tb_size(max_tb_size_), + dedicated_queue(dedicated_queue_) { - int qid = bbdev_accelerator->reserve_queue(RTE_BBDEV_OP_LDPC_ENC); - srsran_assert(qid >= 0, "No free bbdev queues available."); - queue_id = static_cast(qid); + id = bbdev_accelerator->reserve_encoder(); device_id = bbdev_accelerator->get_device_id(); + // Reserve a hardware queue in case of dedicated use. + queue_id = -1; + if (dedicated_queue) { + hw_reserve_queue(); + srsran_assert(queue_id >= 0, "No free RTE_BBDEV_OP_LDPC_ENC queues available."); + } allocate_resources(); drop_op.resize(MAX_NOF_SEGMENTS); drop_op.reset(); + // HAL logging. srslog::basic_logger& logger = bbdev_accelerator->get_logger(); - logger.info("[acc100] new encoder: queue={}.", queue_id); + logger.info("[acc100] new encoder: id={}.", id); } - /// Destructor taking care of freeing the utilized resources and hardware-accelerator queue. + /// Destructor taking care of freeing the utilized resources. ~hw_accelerator_pdsch_enc_acc100_impl() { - bbdev_accelerator->free_queue(queue_id); + // Free the reserved hardware queue in case of dedicated use. + hw_free_queue(); // HAL logging. srslog::basic_logger& logger = bbdev_accelerator->get_logger(); - logger.info("[acc100] destroyed encoder: queue={}.", queue_id); + logger.info("[acc100] destroyed encoder: id={}.", id); } }; diff --git a/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_impl.cpp b/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_impl.cpp index 5b3b0f9b13..944ff02e3f 100644 --- a/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_impl.cpp +++ b/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_impl.cpp @@ -13,6 +13,16 @@ using namespace srsran; using namespace hal; +void hw_accelerator_pdsch_enc_impl::reserve_queue() +{ + hw_reserve_queue(); +} + +void hw_accelerator_pdsch_enc_impl::free_queue() +{ + hw_free_queue(); +} + bool hw_accelerator_pdsch_enc_impl::enqueue_operation(span data, span aux_data, unsigned cb_index) @@ -31,3 +41,13 @@ void hw_accelerator_pdsch_enc_impl::configure_operation(const hw_pdsch_encoder_c { hw_config(config, cb_index); } + +bool hw_accelerator_pdsch_enc_impl::get_cb_mode() const +{ + return get_hw_cb_mode(); +} + +unsigned hw_accelerator_pdsch_enc_impl::get_max_tb_size() const +{ + return get_hw_max_tb_size(); +} diff --git a/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_impl.h b/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_impl.h index 4d1bad278d..cdf67af306 100644 --- a/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_impl.h +++ b/lib/hal/phy/upper/channel_processors/hw_accelerator_pdsch_enc_impl.h @@ -25,20 +25,36 @@ class hw_accelerator_pdsch_enc_impl : public hw_accelerator_pdsch_enc /// Default constructor. hw_accelerator_pdsch_enc_impl() = default; + // See hw_accelerator interface for the documentation. + void reserve_queue() override; + // See hw_accelerator interface for the documentation. + void free_queue() override; // See hw_accelerator interface for the documentation. bool enqueue_operation(span data, span aux_data = {}, unsigned cb_index = 0) override; // See hw_accelerator interface for the documentation. bool dequeue_operation(span data, span packed_data = {}, unsigned segment_index = 0) override; // See hw_accelerator interface for the documentation. void configure_operation(const hw_pdsch_encoder_configuration& config, unsigned cb_index = 0) override; + // See hw_accelerator interface for the documentation. + bool get_cb_mode() const override; + // See hw_accelerator interface for the documentation. + unsigned get_max_tb_size() const override; private: + /// Hardware-specific implementation of the reserve queue function. + virtual void hw_reserve_queue() = 0; + /// Hardware-specific implementation of the free queue function. + virtual void hw_free_queue() = 0; /// Hardware-specific implementation of the enqueue_operation function. virtual bool hw_enqueue(span data, unsigned cb_index) = 0; /// Hardware-specific implementation of the dequeue_operation function. virtual bool hw_dequeue(span data, span packed_data, unsigned segment_index) = 0; /// Hardware-specific configuration function. virtual void hw_config(const hw_pdsch_encoder_configuration& config, unsigned cb_index) = 0; + /// Hardware-specific CB mode quering function. + virtual bool get_hw_cb_mode() const = 0; + /// Hardware-specific maximum supported TB size quering function. + virtual unsigned get_hw_max_tb_size() const = 0; }; } // namespace hal diff --git a/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_factories.cpp b/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_factories.cpp index 77fa2c8fac..837b577de2 100644 --- a/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_factories.cpp +++ b/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_factories.cpp @@ -32,6 +32,8 @@ class hw_accelerator_pusch_dec_factory_spec : public hw_accelerator_pusch_dec_fa bool ext_softbuffer; /// Interfacing to an external HARQ buffer context repository. std::shared_ptr harq_buffer_context; + /// Indicates if the accelerated function uses a dedicated hardware queue or needs to reserve one for each operation. + bool dedicated_queue; public: // Default constructor. @@ -39,7 +41,8 @@ class hw_accelerator_pusch_dec_factory_spec : public hw_accelerator_pusch_dec_fa acc_type(accelerator_config.acc_type), bbdev_accelerator(std::move(accelerator_config.bbdev_accelerator)), ext_softbuffer(accelerator_config.ext_softbuffer), - harq_buffer_context(accelerator_config.harq_buffer_context) + harq_buffer_context(accelerator_config.harq_buffer_context), + dedicated_queue(accelerator_config.dedicated_queue) { } @@ -47,7 +50,7 @@ class hw_accelerator_pusch_dec_factory_spec : public hw_accelerator_pusch_dec_fa { if (acc_type == "acc100") { return std::make_unique( - bbdev_accelerator, ext_softbuffer, harq_buffer_context); + bbdev_accelerator, ext_softbuffer, harq_buffer_context, dedicated_queue); } // Handle other accelerator types here. return {}; diff --git a/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_acc100_impl.cpp b/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_acc100_impl.cpp index adef66c679..1a26b099de 100644 --- a/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_acc100_impl.cpp +++ b/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_acc100_impl.cpp @@ -22,12 +22,10 @@ void hw_accelerator_pusch_dec_acc100_impl::allocate_resources() int socket_id = bbdev_accelerator->get_socket_id(); // Create bbdev op pools for the accelerated LDPC decoder operations. - // Note that a single hardware-queue per lcore is assumed. - unsigned nof_ldpc_dec_cores = bbdev_accelerator->get_nof_ldpc_dec_cores(); // op pools require unique names. - std::string op_pool_name = fmt::format("dec_op_pool_{}_{}", device_id, queue_id); + std::string op_pool_name = fmt::format("dec_op_pool_{}_{}", device_id, id); op_pool = ::dpdk::create_bbdev_op_pool( - op_pool_name.c_str(), RTE_BBDEV_OP_LDPC_DEC, nof_ldpc_dec_cores, socket_id, bbdev_accelerator->get_logger()); + op_pool_name.c_str(), RTE_BBDEV_OP_LDPC_DEC, MAX_NOF_SEGMENTS, socket_id, bbdev_accelerator->get_logger()); // Create new mbuf pools for both input and output data for the hardware-accelerated LDPC decoder. // Note that the buffers are sized taking into account that only CB mode is supported by the decoder. @@ -40,23 +38,53 @@ void hw_accelerator_pusch_dec_acc100_impl::allocate_resources() msg_mpool_cfg.n_mbuf = nof_mbuf; // mbuf pools require unique names. - std::string mbuf_pool_name = fmt::format("dec_in_mbuf_pool_{}_{}", device_id, queue_id); + std::string mbuf_pool_name = fmt::format("dec_in_mbuf_pool_{}_{}", device_id, id); in_mbuf_pool = ::dpdk::create_mbuf_pool(mbuf_pool_name.c_str(), socket_id, rm_mpool_cfg, bbdev_accelerator->get_logger()); - mbuf_pool_name = fmt::format("harq_in_mbuf_pool_{}_{}", device_id, queue_id); + mbuf_pool_name = fmt::format("harq_in_mbuf_pool_{}_{}", device_id, id); harq_in_mbuf_pool = ::dpdk::create_mbuf_pool(mbuf_pool_name.c_str(), socket_id, rm_mpool_cfg, bbdev_accelerator->get_logger()); - mbuf_pool_name = fmt::format("dec_out_mbuf_pool_{}_{}", device_id, queue_id); + mbuf_pool_name = fmt::format("dec_out_mbuf_pool_{}_{}", device_id, id); out_mbuf_pool = ::dpdk::create_mbuf_pool(mbuf_pool_name.c_str(), socket_id, msg_mpool_cfg, bbdev_accelerator->get_logger()); - mbuf_pool_name = fmt::format("harq_out_mbuf_pool_{}_{}", device_id, queue_id); + mbuf_pool_name = fmt::format("harq_out_mbuf_pool_{}_{}", device_id, id); harq_out_mbuf_pool = ::dpdk::create_mbuf_pool(mbuf_pool_name.c_str(), socket_id, rm_mpool_cfg, bbdev_accelerator->get_logger()); } +void hw_accelerator_pusch_dec_acc100_impl::hw_reserve_queue() +{ + // Verify that no hardware-queue is reserved already. + if (queue_id < 0) { + int qid = -1; + do { + qid = bbdev_accelerator->reserve_queue(RTE_BBDEV_OP_LDPC_DEC); + } while (qid < 0 && !dedicated_queue); + queue_id = qid; + + // HAL logging. + srslog::basic_logger& logger = bbdev_accelerator->get_logger(); + logger.info("[acc100] decoder id={}: reserved queue={}.", id, queue_id); + } +} + +void hw_accelerator_pusch_dec_acc100_impl::hw_free_queue() +{ + // Verify that the hardware queue won't be requrired anymore. + if (!dedicated_queue || (dedicated_queue && queue_id > 0)) { + bbdev_accelerator->free_queue(RTE_BBDEV_OP_LDPC_DEC, queue_id); + + // HAL logging. + srslog::basic_logger& logger = bbdev_accelerator->get_logger(); + logger.info("[acc100] decoder id={}: freed queue={}.", id, queue_id); + + queue_id = -1; + } +} + void hw_accelerator_pusch_dec_acc100_impl::hw_config(const hw_pusch_decoder_configuration& config, unsigned cb_index) { // Save configuration. @@ -128,7 +156,7 @@ bool hw_accelerator_pusch_dec_acc100_impl::hw_enqueue(span data, enqueued = ::dpdk::enqueue_ldpc_dec_operation(op[cb_index], 1, device_id, - queue_id, + static_cast(queue_id), dec_config.new_data, ext_softbuffer, bbdev_accelerator->get_logger()); // TBD: single operation enqueued. @@ -162,7 +190,7 @@ bool hw_accelerator_pusch_dec_acc100_impl::hw_dequeue(span data, dequeued = ::dpdk::dequeue_ldpc_dec_operation(op[segment_index], 1, device_id, - queue_id, + static_cast(queue_id), bbdev_accelerator->get_logger()); // TBD: single operation dequeued. // Check if there are new results available from the hardware accelerator. diff --git a/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_acc100_impl.h b/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_acc100_impl.h index 8206d76608..8617c35a96 100644 --- a/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_acc100_impl.h +++ b/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_acc100_impl.h @@ -28,6 +28,10 @@ namespace hal { /// Class representing the ACC100 implementation of PUSCH decoding. class hw_accelerator_pusch_dec_acc100_impl : public hw_accelerator_pusch_dec_impl { + /// Hardware-specific implementation of the reserve queue function. + void hw_reserve_queue() override; + /// Hardware-specific implementation of the free queue function. + void hw_free_queue() override; /// Hardware-specific implementation of the enqueue_operation function. bool hw_enqueue(span data, span soft_data, unsigned cb_index) override; /// Hardware-specific implementation of the dequeue_operation function. @@ -47,6 +51,9 @@ class hw_accelerator_pusch_dec_acc100_impl : public hw_accelerator_pusch_dec_imp /// \file /// \brief Members specific to bbdev-accelerated LDPC decoder functions. + /// Unique ID of the current hardware-accelerated function. + unsigned id; + /// Pointer to a bbdev-based hardware-accelerator. std::shared_ptr bbdev_accelerator; @@ -54,7 +61,7 @@ class hw_accelerator_pusch_dec_acc100_impl : public hw_accelerator_pusch_dec_imp unsigned device_id; /// Private member to store the ID of the queue used by the hardware-accelerated LDPC decoder function. - uint16_t queue_id; + int queue_id; /// Indicates the number of decoding operations in the queue. unsigned nof_enqueued_op = 0; @@ -98,20 +105,29 @@ class hw_accelerator_pusch_dec_acc100_impl : public hw_accelerator_pusch_dec_imp /// HARQ context repository entry for the current decoding operation. std::vector harq_context_entries; + /// Indicates if the accelerated function uses a dedicated hardware queue or needs to reserve one for each operation. + bool dedicated_queue; + public: /// Constructor taking care of obtaining a bbdev-based hardware-accelerator queue and allocating the required /// resources. hw_accelerator_pusch_dec_acc100_impl(std::shared_ptr bbdev_accelerator_, bool ext_softbuffer_, - std::shared_ptr harq_buffer_context_) : + std::shared_ptr harq_buffer_context_, + bool dedicated_queue_) : bbdev_accelerator(std::move(bbdev_accelerator_)), ext_softbuffer(ext_softbuffer_), - harq_buffer_context(std::move(harq_buffer_context_)) + harq_buffer_context(std::move(harq_buffer_context_)), + dedicated_queue(dedicated_queue_) { - int qid = bbdev_accelerator->reserve_queue(RTE_BBDEV_OP_LDPC_DEC); - srsran_assert(qid >= 0, "No free bbdev queues available."); - queue_id = static_cast(qid); + id = bbdev_accelerator->reserve_decoder(); device_id = bbdev_accelerator->get_device_id(); + // Reserve a hardware queue in case of dedicated use. + queue_id = -1; + if (dedicated_queue) { + hw_reserve_queue(); + srsran_assert(queue_id >= 0, "No free RTE_BBDEV_OP_LDPC_DEC queues available."); + } allocate_resources(); drop_op.resize(MAX_NOF_SEGMENTS); drop_op.reset(); @@ -119,17 +135,18 @@ class hw_accelerator_pusch_dec_acc100_impl : public hw_accelerator_pusch_dec_imp // HAL logging. srslog::basic_logger& logger = bbdev_accelerator->get_logger(); - logger.info("[acc100] new decoder: queue={}.", queue_id); + logger.info("[acc100] new decoder: id={}.", id); } - /// Destructor taking care of freeing the utilized resources and hardware-accelerator queue. + /// Destructor taking care of freeing the utilized resources. ~hw_accelerator_pusch_dec_acc100_impl() { - bbdev_accelerator->free_queue(queue_id); + // Free the reserved hardware queue in case of dedicated use. + hw_free_queue(); // HAL logging. srslog::basic_logger& logger = bbdev_accelerator->get_logger(); - logger.info("[acc100] destroyed decoder: queue={}.", queue_id); + logger.info("[acc100] destroyed decoder: id={}.", id); } }; diff --git a/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_impl.cpp b/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_impl.cpp index d47f5c948e..627489b630 100644 --- a/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_impl.cpp +++ b/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_impl.cpp @@ -13,6 +13,16 @@ using namespace srsran; using namespace hal; +void hw_accelerator_pusch_dec_impl::reserve_queue() +{ + hw_reserve_queue(); +} + +void hw_accelerator_pusch_dec_impl::free_queue() +{ + hw_free_queue(); +} + bool hw_accelerator_pusch_dec_impl::enqueue_operation(span data, span soft_data, unsigned cb_index) diff --git a/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_impl.h b/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_impl.h index 0e5a71f74d..4352579d12 100644 --- a/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_impl.h +++ b/lib/hal/phy/upper/channel_processors/pusch/hw_accelerator_pusch_dec_impl.h @@ -25,6 +25,10 @@ class hw_accelerator_pusch_dec_impl : public hw_accelerator_pusch_dec /// Default constructor. hw_accelerator_pusch_dec_impl() = default; + // See hw_accelerator interface for the documentation. + void reserve_queue() override; + // See hw_accelerator interface for the documentation. + void free_queue() override; // See hw_accelerator interface for the documentation. bool enqueue_operation(span data, span soft_data = {}, unsigned cb_index = 0) override; // See hw_accelerator interface for the documentation. @@ -40,6 +44,10 @@ class hw_accelerator_pusch_dec_impl : public hw_accelerator_pusch_dec bool is_external_harq_supported() const override; private: + /// Hardware-specific implementation of the reserve queue function. + virtual void hw_reserve_queue() = 0; + /// Hardware-specific implementation of the free queue function. + virtual void hw_free_queue() = 0; /// Hardware-specific implementation of the enqueue_operation function. virtual bool hw_enqueue(span data, span soft_data, unsigned cb_index) = 0; /// Hardware-specific implementation of the dequeue_operation function. diff --git a/lib/phy/upper/channel_processors/channel_processor_factories.cpp b/lib/phy/upper/channel_processors/channel_processor_factories.cpp index 4799d54f9c..de8544346b 100644 --- a/lib/phy/upper/channel_processors/channel_processor_factories.cpp +++ b/lib/phy/upper/channel_processors/channel_processor_factories.cpp @@ -252,16 +252,12 @@ class pdsch_encoder_factory_sw : public pdsch_encoder_factory class pdsch_encoder_factory_hw : public pdsch_encoder_factory { private: - bool cb_mode; - unsigned max_tb_size; std::shared_ptr crc_factory; std::shared_ptr segmenter_factory; std::shared_ptr hw_encoder_factory; public: explicit pdsch_encoder_factory_hw(const pdsch_encoder_factory_hw_configuration& config) : - cb_mode(config.cb_mode), - max_tb_size(config.max_tb_size), crc_factory(std::move(config.crc_factory)), segmenter_factory(std::move(config.segmenter_factory)), hw_encoder_factory(std::move(config.hw_encoder_factory)) @@ -278,8 +274,7 @@ class pdsch_encoder_factory_hw : public pdsch_encoder_factory crc_factory->create(crc_generator_poly::CRC24A), crc_factory->create(crc_generator_poly::CRC24B), }; - return std::make_unique( - cb_mode, max_tb_size, crc, segmenter_factory->create(), hw_encoder_factory->create()); + return std::make_unique(crc, segmenter_factory->create(), hw_encoder_factory->create()); } }; diff --git a/lib/phy/upper/channel_processors/pdsch_encoder_hw_impl.cpp b/lib/phy/upper/channel_processors/pdsch_encoder_hw_impl.cpp index ed5fbb9cb5..dd818f59fd 100644 --- a/lib/phy/upper/channel_processors/pdsch_encoder_hw_impl.cpp +++ b/lib/phy/upper/channel_processors/pdsch_encoder_hw_impl.cpp @@ -24,10 +24,15 @@ void pdsch_encoder_hw_impl::encode(span codeword, const configuration& config) { // CB mode will be forced if TB mode is requested for a TB larger than the maximum supported size. + cb_mode = encoder->get_cb_mode(); + max_tb_size = encoder->get_max_tb_size(); if (!cb_mode && transport_block.size_bytes() > max_tb_size) { cb_mode = true; } + // Reserve a hardware-queue for the current encoding operation. + encoder->reserve_queue(); + // Set the TB encoding parameters (common to all CBs) as required by the hardware-accelerated PDSCH encoder. hal::hw_pdsch_encoder_configuration hw_cfg = {}; @@ -157,6 +162,9 @@ void pdsch_encoder_hw_impl::encode(span codeword, } } } + + // Free the hardware-queue utilized by completed encoding operation. + encoder->free_queue(); } /// \brief Computes the length of the rate-matched codeblock corresponding to each segment, diff --git a/lib/phy/upper/channel_processors/pdsch_encoder_hw_impl.h b/lib/phy/upper/channel_processors/pdsch_encoder_hw_impl.h index 7d7feafb1a..2aaa3e751b 100644 --- a/lib/phy/upper/channel_processors/pdsch_encoder_hw_impl.h +++ b/lib/phy/upper/channel_processors/pdsch_encoder_hw_impl.h @@ -42,21 +42,14 @@ class pdsch_encoder_hw_impl : public pdsch_encoder /// /// Sets up the internal components, namely LDPC segmenter, all the CRC calculators and the hardware accelerator. /// - /// \param[in] cb_mode Defines if the PDSCH encoder operates in CB mode (true) or TB mode (false). - /// \param[in] max_cb_size Defines the maximum supported TB size in bytes (CB mode will be forced for larger TBs). - /// Only used in TB mode. /// \param[in] seg Unique pointer to an LDPC segmenter. /// \param[in] crcs Structure with pointers to three CRC calculator objects, with generator polynomials of type /// \c /// CRC16, \c CRC24A and \c CRC24B. /// \param[in] hw Unique pointer to a hardware-accelerator. - pdsch_encoder_hw_impl(bool cb_mode_, - unsigned max_tb_size_, - sch_crc& c, + pdsch_encoder_hw_impl(sch_crc& c, std::unique_ptr seg, std::unique_ptr hw) : - cb_mode(cb_mode_), - max_tb_size(max_tb_size_), crc_set({std::move(c.crc16), std::move(c.crc24A), std::move(c.crc24B)}), segmenter(std::move(seg)), encoder(std::move(hw)) diff --git a/lib/phy/upper/channel_processors/pusch/pusch_decoder_hw_impl.cpp b/lib/phy/upper/channel_processors/pusch/pusch_decoder_hw_impl.cpp index 12bbccd5ad..182f94c118 100644 --- a/lib/phy/upper/channel_processors/pusch/pusch_decoder_hw_impl.cpp +++ b/lib/phy/upper/channel_processors/pusch/pusch_decoder_hw_impl.cpp @@ -125,6 +125,9 @@ void pusch_decoder_hw_impl::on_end_softbits() softbits_count, modulation_order); + // Reserve a hardware-queue for the current decoding operation. + decoder->reserve_queue(); + segmenter_config segmentation_config; segmentation_config.base_graph = current_config.base_graph; segmentation_config.rv = current_config.rv; @@ -385,6 +388,9 @@ void pusch_decoder_hw_impl::copy_tb_and_notify(pusch_decoder_result& stats, unsi softbuffer.unlock(); } + // Free the hardware-queue utilized by completed decoding operation. + decoder->free_queue(); + // In case there are multiple codeblocks and at least one has a corrupted codeblock CRC, nothing to do. // Finally report decoding result. diff --git a/tests/benchmarks/phy/upper/channel_processors/pdsch_encoder_hwacc_benchmark.cpp b/tests/benchmarks/phy/upper/channel_processors/pdsch_encoder_hwacc_benchmark.cpp index c9de2ad411..df779f0b18 100644 --- a/tests/benchmarks/phy/upper/channel_processors/pdsch_encoder_hwacc_benchmark.cpp +++ b/tests/benchmarks/phy/upper/channel_processors/pdsch_encoder_hwacc_benchmark.cpp @@ -44,10 +44,11 @@ static bounded_bitset dmrs_symbol_mask = {false, false, true, false, false, false, false, false, false, false, false, false, false, false}; #ifdef DPDK_FOUND -static bool cb_mode = false; -static std::string hal_log_level = "ERROR"; -static bool std_out_sink = true; -static std::string eal_arguments = ""; +static bool dedicated_queue = true; +static bool cb_mode = false; +static std::string hal_log_level = "ERROR"; +static bool std_out_sink = true; +static std::string eal_arguments = ""; #endif // DPDK_FOUND // Test profile structure, initialized with default profile values. @@ -70,9 +71,11 @@ static test_profile selected_profile = {}; static void usage(const char* prog) { - fmt::print("Usage: {} [-T X] [-e] [-i X] [-x] [-y] [-z error|warning|info|debug] [-h] [eal_args ...]\n", prog); + fmt::print("Usage: {} [-T X] [-e] [-i X] [-w] [-x] [-y] [-z error|warning|info|debug] [-h] [eal_args ...]\n", prog); fmt::print("\t-T PDSCH encoder type [generic,acc100][Default {}]\n", hwacc_encoder_type); #ifdef DPDK_FOUND + fmt::print("\t-w Force shared hardware-queue use [Default {}]\n", + dedicated_queue ? "dedicated_queue" : "shared_queue"); fmt::print("\t-x Force TB mode [Default {}]\n", cb_mode ? "cb_mode" : "tb_mode"); fmt::print("\t-y Force logging output written to a file [Default {}]\n", std_out_sink ? "std_out" : "file"); fmt::print("\t-z Set logging level for the HAL [Default {}]\n", hal_log_level); @@ -117,12 +120,15 @@ static std::string capture_eal_args(int* argc, char*** argv) static int parse_args(int argc, char** argv) { int opt = 0; - while ((opt = getopt(argc, argv, "T:xyz:h")) != -1) { + while ((opt = getopt(argc, argv, "T:wxyz:h")) != -1) { switch (opt) { case 'T': hwacc_encoder_type = std::string(optarg); break; #ifdef DPDK_FOUND + case 'w': + dedicated_queue = false; + break; case 'x': cb_mode = true; break; @@ -194,6 +200,9 @@ static std::shared_ptr create_hw_accelera hal::hw_accelerator_pdsch_enc_configuration hw_encoder_config; hw_encoder_config.acc_type = "acc100"; hw_encoder_config.bbdev_accelerator = bbdev_accelerator; + hw_encoder_config.cb_mode = cb_mode; + hw_encoder_config.max_tb_size = RTE_BBDEV_LDPC_E_MAX_MBUF; + hw_encoder_config.dedicated_queue = dedicated_queue; // ACC100 hardware-accelerator implementation. return create_hw_accelerator_pdsch_enc_factory(hw_encoder_config); @@ -215,10 +224,6 @@ static std::shared_ptr create_acc100_pdsch_encoder_factor // Set the hardware-accelerated PDSCH encoder configuration. pdsch_encoder_factory_hw_configuration encoder_hw_factory_config; -#ifdef DPDK_FOUND - encoder_hw_factory_config.cb_mode = cb_mode; - encoder_hw_factory_config.max_tb_size = RTE_BBDEV_LDPC_E_MAX_MBUF; -#endif // DPDK_FOUND encoder_hw_factory_config.crc_factory = crc_calc_factory; encoder_hw_factory_config.segmenter_factory = segmenter_factory; encoder_hw_factory_config.hw_encoder_factory = hw_encoder_factory; diff --git a/tests/benchmarks/phy/upper/channel_processors/pdsch_processor_benchmark.cpp b/tests/benchmarks/phy/upper/channel_processors/pdsch_processor_benchmark.cpp index 28b6abafeb..8bdcc4e484 100644 --- a/tests/benchmarks/phy/upper/channel_processors/pdsch_processor_benchmark.cpp +++ b/tests/benchmarks/phy/upper/channel_processors/pdsch_processor_benchmark.cpp @@ -97,10 +97,11 @@ static unsigned pending_count = 0; static unsigned finish_count = 0; #ifdef HWACC_PDSCH_ENABLED -static bool cb_mode = false; -static bool std_out_sink = true; -static std::string hal_log_level = "error"; -static std::string eal_arguments = ""; +static bool dedicated_queue = true; +static bool cb_mode = false; +static bool std_out_sink = true; +static std::string hal_log_level = "error"; +static std::string eal_arguments = ""; #endif // HWACC_PDSCH_ENABLED // Test profile structure, initialized with default profile values. @@ -269,7 +270,7 @@ static void usage(const char* prog) { fmt::print("Usage: {} [-m benchmark mode] [-R repetitions] [-B Batch size per thread] [-T number of threads] [-D " "LDPC type] [-M rate " - "matcher type] [-P profile] [-x] [-y] [-z error|warning|info|debug] [-h] [eal_args ...]\n", + "matcher type] [-P profile] [-w] [-x] [-y] [-z error|warning|info|debug] [-h] [eal_args ...]\n", prog); fmt::print("\t-m Benchmark mode. [Default {}]\n", to_string(benchmark_mode)); fmt::print("\t\t {:<20}It does not print any result.\n", to_string(benchmark_modes::silent)); @@ -288,6 +289,8 @@ static void usage(const char* prog) fmt::print("\t\t {:<30}{}\n", profile.name, profile.description); } #ifdef HWACC_PDSCH_ENABLED + fmt::print("\t-w Force shared hardware-queue use [Default {}]\n", + dedicated_queue ? "dedicated_queue" : "shared_queue"); fmt::print("\t-x Force TB mode [Default {}]\n", cb_mode ? "cb_mode" : "tb_mode"); fmt::print("\t-y Force logging output written to a file [Default {}]\n", std_out_sink ? "std_out" : "file"); fmt::print("\t-z Set logging level for the HAL [Default {}]\n", hal_log_level); @@ -332,7 +335,7 @@ static std::string capture_eal_args(int* argc, char*** argv) static int parse_args(int argc, char** argv) { int opt = 0; - while ((opt = getopt(argc, argv, "R:T:B:D:P:m:t:xyz:h")) != -1) { + while ((opt = getopt(argc, argv, "R:T:B:D:P:m:t:wxyz:h")) != -1) { switch (opt) { case 'R': nof_repetitions = std::strtol(optarg, nullptr, 10); @@ -361,6 +364,9 @@ static int parse_args(int argc, char** argv) } break; #ifdef HWACC_PDSCH_ENABLED + case 'w': + dedicated_queue = false; + break; case 'x': cb_mode = true; break; @@ -499,6 +505,9 @@ static std::shared_ptr create_hw_accelera hal::hw_accelerator_pdsch_enc_configuration hw_encoder_config; hw_encoder_config.acc_type = "acc100"; hw_encoder_config.bbdev_accelerator = bbdev_accelerator; + hw_encoder_config.cb_mode = cb_mode; + hw_encoder_config.max_tb_size = RTE_BBDEV_LDPC_E_MAX_MBUF; + hw_encoder_config.dedicated_queue = dedicated_queue; // ACC100 hardware-accelerator implementation. return create_hw_accelerator_pdsch_enc_factory(hw_encoder_config); @@ -519,10 +528,6 @@ create_acc100_pdsch_encoder_factory(std::shared_ptr crc_ // Set the hardware-accelerated PDSCH encoder configuration. pdsch_encoder_factory_hw_configuration encoder_hw_factory_config; -#ifdef HWACC_PDSCH_ENABLED - encoder_hw_factory_config.cb_mode = cb_mode; - encoder_hw_factory_config.max_tb_size = RTE_BBDEV_LDPC_E_MAX_MBUF; -#endif // HWACC_PDSCH_ENABLED encoder_hw_factory_config.crc_factory = crc_calculator_factory; encoder_hw_factory_config.segmenter_factory = segmenter_factory; encoder_hw_factory_config.hw_encoder_factory = hw_encoder_factory; diff --git a/tests/benchmarks/phy/upper/channel_processors/pusch/pusch_decoder_hwacc_benchmark.cpp b/tests/benchmarks/phy/upper/channel_processors/pusch/pusch_decoder_hwacc_benchmark.cpp index a6ac621f35..f70bd6bc9b 100644 --- a/tests/benchmarks/phy/upper/channel_processors/pusch/pusch_decoder_hwacc_benchmark.cpp +++ b/tests/benchmarks/phy/upper/channel_processors/pusch/pusch_decoder_hwacc_benchmark.cpp @@ -49,10 +49,11 @@ static bounded_bitset dmrs_symbol_mask = {false, false, true, false, false, false, false, false, false, false, false, false, false, false}; #ifdef DPDK_FOUND -static bool test_harq = false; -static std::string hal_log_level = "ERROR"; -static bool std_out_sink = true; -static std::string eal_arguments = ""; +static bool dedicated_queue = true; +static bool test_harq = false; +static std::string hal_log_level = "ERROR"; +static bool std_out_sink = true; +static std::string eal_arguments = ""; #endif // DPDK_FOUND // Test profile structure, initialized with default profile values. @@ -75,11 +76,13 @@ static test_profile selected_profile = {}; static void usage(const char* prog) { - fmt::print("Usage: {} [-T X] [-e] [-i X] [-x] [-y] [-z error|warning|info|debug] [-h] [eal_args ...]\n", prog); + fmt::print("Usage: {} [-T X] [-e] [-i X] [-w] [-x] [-y] [-z error|warning|info|debug] [-h] [eal_args ...]\n", prog); fmt::print("\t-T Hardware-accelerated PUSCH decoder type [acc100][Default {}]\n", hwacc_decoder_type); fmt::print("\t-e Use LDPC decoder early stop [Default {}]\n", use_early_stop); fmt::print("\t-i Number of LDPC iterations [Default {}]\n", nof_ldpc_iterations); #ifdef DPDK_FOUND + fmt::print("\t-w Force shared hardware-queue use [Default {}]\n", + dedicated_queue ? "dedicated_queue" : "shared_queue"); fmt::print("\t-x Use the host's memory for the soft-buffer [Default {}]\n", !ext_softbuffer); fmt::print("\t-y Force logging output written to a file [Default {}]\n", std_out_sink ? "std_out" : "file"); fmt::print("\t-z Force DEBUG logging level for the HAL [Default {}]\n", hal_log_level); @@ -124,7 +127,7 @@ static std::string capture_eal_args(int* argc, char*** argv) static int parse_args(int argc, char** argv) { int opt = 0; - while ((opt = getopt(argc, argv, "T:ei:xyz:h")) != -1) { + while ((opt = getopt(argc, argv, "T:ei:wxyz:h")) != -1) { switch (opt) { case 'T': hwacc_decoder_type = std::string(optarg); @@ -136,6 +139,9 @@ static int parse_args(int argc, char** argv) nof_ldpc_iterations = strtol(optarg, nullptr, 10); break; #ifdef DPDK_FOUND + case 'w': + dedicated_queue = false; + break; case 'x': ext_softbuffer = false; break; @@ -220,6 +226,7 @@ static std::shared_ptr create_hw_accelera hw_decoder_config.bbdev_accelerator = bbdev_accelerator; hw_decoder_config.ext_softbuffer = ext_softbuffer; hw_decoder_config.harq_buffer_context = harq_buffer_context; + hw_decoder_config.dedicated_queue = dedicated_queue; // ACC100 hardware-accelerator implementation. return create_hw_accelerator_pusch_dec_factory(hw_decoder_config); diff --git a/tests/benchmarks/phy/upper/channel_processors/pusch/pusch_processor_benchmark.cpp b/tests/benchmarks/phy/upper/channel_processors/pusch/pusch_processor_benchmark.cpp index f361908aa1..6af8973bfe 100644 --- a/tests/benchmarks/phy/upper/channel_processors/pusch/pusch_processor_benchmark.cpp +++ b/tests/benchmarks/phy/upper/channel_processors/pusch/pusch_processor_benchmark.cpp @@ -124,10 +124,11 @@ static std::atomic pending_count = {0}; static std::atomic finish_count = {0}; #ifdef HWACC_PUSCH_ENABLED -static bool ext_softbuffer = true; -static bool std_out_sink = true; -static std::string hal_log_level = "error"; -static std::string eal_arguments = ""; +static bool dedicated_queue = true; +static bool ext_softbuffer = true; +static bool std_out_sink = true; +static std::string hal_log_level = "error"; +static std::string eal_arguments = ""; #endif // HWACC_PUSCH_ENABLED // Test profile structure, initialized with default profile values. @@ -197,7 +198,7 @@ static void usage(const char* prog) { fmt::print("Usage: {} [-m benchmark mode] [-R repetitions] [-B Batch size per thread] [-T number of threads] [-D " "LDPC type] [-M rate " - "dematcher type] [-P profile] [-x] [-y] [-z error|warning|info|debug] [-h] [eal_args ...]\n", + "dematcher type] [-P profile] [-w] [-x] [-y] [-z error|warning|info|debug] [-h] [eal_args ...]\n", prog); fmt::print("\t-m Benchmark mode. [Default {}]\n", to_string(benchmark_mode)); fmt::print("\t\t {:<20}It does not print any result.\n", to_string(benchmark_modes::silent)); @@ -220,6 +221,8 @@ static void usage(const char* prog) fmt::print("\t\t {:<40} {}\n", profile.name, profile.description); } #ifdef HWACC_PUSCH_ENABLED + fmt::print("\t-w Force shared hardware-queue use [Default {}]\n", + dedicated_queue ? "dedicated_queue" : "shared_queue"); fmt::print("\t-x Use the host's memory for the soft-buffer [Default {}]\n", !ext_softbuffer); fmt::print("\t-y Force logging output written to a file [Default {}]\n", std_out_sink ? "std_out" : "file"); fmt::print("\t-z Set logging level for the HAL [Default {}]\n", hal_log_level); @@ -265,7 +268,7 @@ static std::string capture_eal_args(int* argc, char*** argv) static int parse_args(int argc, char** argv) { int opt = 0; - while ((opt = getopt(argc, argv, "R:T:t:B:D:M:EP:m:xyz:h")) != -1) { + while ((opt = getopt(argc, argv, "R:T:t:B:D:M:EP:m:wxyz:h")) != -1) { switch (opt) { case 'R': nof_repetitions = std::strtol(optarg, nullptr, 10); @@ -300,6 +303,9 @@ static int parse_args(int argc, char** argv) } break; #ifdef HWACC_PUSCH_ENABLED + case 'w': + dedicated_queue = false; + break; case 'x': ext_softbuffer = false; break; @@ -447,6 +453,7 @@ static std::shared_ptr create_hw_accelera hw_decoder_config.bbdev_accelerator = bbdev_accelerator; hw_decoder_config.ext_softbuffer = ext_softbuffer; hw_decoder_config.harq_buffer_context = harq_buffer_context; + hw_decoder_config.dedicated_queue = dedicated_queue; // ACC100 hardware-accelerator implementation. return create_hw_accelerator_pusch_dec_factory(hw_decoder_config); diff --git a/tests/unittests/phy/upper/channel_processors/pdsch_encoder_test.cpp b/tests/unittests/phy/upper/channel_processors/pdsch_encoder_test.cpp index 65de306749..17fd621052 100644 --- a/tests/unittests/phy/upper/channel_processors/pdsch_encoder_test.cpp +++ b/tests/unittests/phy/upper/channel_processors/pdsch_encoder_test.cpp @@ -26,17 +26,20 @@ using namespace srsran::ldpc; static std::string encoder_type = "generic"; #ifdef HWACC_PDSCH_ENABLED -static bool cb_mode = false; -static std::string hal_log_level = "ERROR"; -static bool std_out_sink = true; -static std::string eal_arguments = ""; +static bool dedicated_queue = true; +static bool cb_mode = false; +static std::string hal_log_level = "ERROR"; +static bool std_out_sink = true; +static std::string eal_arguments = ""; #endif // HWACC_PDSCH_ENABLED static void usage(const char* prog) { - fmt::print("Usage: {} [-T X] [-x] [-y] [-z error|warning|info|debug] [-h] [eal_args ...]\n", prog); + fmt::print("Usage: {} [-T X] [-w] [-x] [-y] [-z error|warning|info|debug] [-h] [eal_args ...]\n", prog); fmt::print("\t-T PDSCH encoder type [generic,acc100][Default {}]\n", encoder_type); #ifdef HWACC_PDSCH_ENABLED + fmt::print("\t-w Force shared hardware-queue use [Default {}]\n", + dedicated_queue ? "dedicated_queue" : "shared_queue"); fmt::print("\t-x Force TB mode [Default {}]\n", cb_mode ? "cb_mode" : "tb_mode"); fmt::print("\t-y Force logging output written to a file [Default {}]\n", std_out_sink ? "std_out" : "file"); fmt::print("\t-z Set logging level for the HAL [Default {}]\n", hal_log_level); @@ -81,12 +84,15 @@ static std::string capture_eal_args(int* argc, char*** argv) static void parse_args(int argc, char** argv) { int opt = 0; - while ((opt = getopt(argc, argv, "T:xyz:h")) != -1) { + while ((opt = getopt(argc, argv, "T:wxyz:h")) != -1) { switch (opt) { case 'T': encoder_type = std::string(optarg); break; #ifdef HWACC_PDSCH_ENABLED + case 'w': + dedicated_queue = false; + break; case 'x': cb_mode = true; break; @@ -157,6 +163,9 @@ static std::shared_ptr create_hw_accelera hal::hw_accelerator_pdsch_enc_configuration hw_encoder_config; hw_encoder_config.acc_type = "acc100"; hw_encoder_config.bbdev_accelerator = bbdev_accelerator; + hw_encoder_config.cb_mode = cb_mode; + hw_encoder_config.max_tb_size = RTE_BBDEV_LDPC_E_MAX_MBUF; + hw_encoder_config.dedicated_queue = dedicated_queue; // ACC100 hardware-accelerator implementation. return create_hw_accelerator_pdsch_enc_factory(hw_encoder_config); @@ -178,10 +187,6 @@ static std::shared_ptr create_acc100_pdsch_encoder_factor // Set the hardware-accelerated PDSCH encoder configuration. pdsch_encoder_factory_hw_configuration encoder_hw_factory_config; -#ifdef HWACC_PDSCH_ENABLED - encoder_hw_factory_config.cb_mode = cb_mode; - encoder_hw_factory_config.max_tb_size = RTE_BBDEV_LDPC_E_MAX_MBUF; -#endif // HWACC_PDSCH_ENABLED encoder_hw_factory_config.crc_factory = crc_calc_factory; encoder_hw_factory_config.segmenter_factory = segmenter_factory; encoder_hw_factory_config.hw_encoder_factory = hw_encoder_factory; diff --git a/tests/unittests/phy/upper/channel_processors/pdsch_processor_vectortest.cpp b/tests/unittests/phy/upper/channel_processors/pdsch_processor_vectortest.cpp index 63982e7fee..c5ca4e97d0 100644 --- a/tests/unittests/phy/upper/channel_processors/pdsch_processor_vectortest.cpp +++ b/tests/unittests/phy/upper/channel_processors/pdsch_processor_vectortest.cpp @@ -111,6 +111,9 @@ class PdschProcessorFixture : public ::testing::TestWithParam create_hw_accelera hw_decoder_config.bbdev_accelerator = bbdev_accelerator; hw_decoder_config.ext_softbuffer = ext_softbuffer; hw_decoder_config.harq_buffer_context = harq_buffer_context; + hw_decoder_config.dedicated_queue = dedicated_queue; // ACC100 hardware-accelerator implementation. return create_hw_accelerator_pusch_dec_factory(hw_decoder_config); diff --git a/tests/unittests/phy/upper/channel_processors/pusch/pusch_processor_vectortest.cpp b/tests/unittests/phy/upper/channel_processors/pusch/pusch_processor_vectortest.cpp index e73f7ac3ea..10a946de8e 100644 --- a/tests/unittests/phy/upper/channel_processors/pusch/pusch_processor_vectortest.cpp +++ b/tests/unittests/phy/upper/channel_processors/pusch/pusch_processor_vectortest.cpp @@ -137,6 +137,7 @@ class PuschProcessorFixture : public ::testing::TestWithParam Date: Mon, 17 Jun 2024 14:25:52 +0200 Subject: [PATCH 12/49] Silence a pair of clant tidy warnings introducing too much noise in the codebase --- .clang-tidy | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.clang-tidy b/.clang-tidy index 7e8be7d9cb..9ac1ace65b 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -25,8 +25,10 @@ Checks: '-*, -misc-include-cleaner, modernize-*, -modernize-avoid-c-arrays, + -modernize-use-nodiscard, -modernize-use-trailing-return-type, performance-*, + -performance-enum-size, readability-*, -readability-avoid-unconditional-preprocessor-if, -readability-function-cognitive-complexity, From 25e21e7bca1a6ea90227c1ea5624bd055f8b672e Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Thu, 13 Jun 2024 14:50:52 +0100 Subject: [PATCH 13/49] apps,cu: remove some TODOs from app --- apps/cu/cu.cpp | 28 ++--------------- apps/cu/cu_appconfig.h | 45 ++++++--------------------- apps/cu/cu_appconfig_cli11_schema.cpp | 6 ++-- 3 files changed, 14 insertions(+), 65 deletions(-) diff --git a/apps/cu/cu.cpp b/apps/cu/cu.cpp index 4cc42cfee9..9abce69a43 100644 --- a/apps/cu/cu.cpp +++ b/apps/cu/cu.cpp @@ -35,6 +35,7 @@ #include "apps/cu/cu_worker_manager.h" #include "apps/services/metrics_log_helper.h" #include "apps/units/cu_cp/cu_cp_builder.h" +#include "apps/units/cu_cp/cu_cp_config_translators.h" #include "apps/units/cu_cp/cu_cp_logger_registrator.h" #include "apps/units/cu_cp/cu_cp_unit_config.h" #include "apps/units/cu_cp/cu_cp_unit_config_cli11_schema.h" @@ -59,7 +60,6 @@ #include "apps/services/application_message_banners.h" #include "apps/services/application_tracer.h" #include "apps/services/stdin_command_dispatcher.h" -#include "apps/units/cu_cp/cu_cp_config_translators.h" #include "cu_appconfig.h" #include @@ -119,7 +119,7 @@ static void initialize_log(const std::string& filename) srslog::init(); } -static void register_app_logs(const log_appconfig& log_cfg, +static void register_app_logs(const srs_cu::log_appconfig& log_cfg, const cu_cp_unit_logger_config& cu_cp_loggers, const cu_up_unit_logger_config& cu_up_loggers) { @@ -191,12 +191,8 @@ int main(int argc, char** argv) // Set the callback for the app calling all the autoderivation functions. app.callback([&app, &cu_cp_config]() { // Create the PLMN and TAC list from the cells. - // TODO remove hard-coded value std::vector plmns; std::vector tacs; - plmns.emplace_back("00101"); - tacs.emplace_back(7); - autoderive_cu_cp_parameters_after_parsing(app, cu_cp_config, std::move(plmns), std::move(tacs)); }); @@ -296,24 +292,16 @@ int main(int argc, char** argv) // Create manager of timers for CU-CP and CU-UP, which will be // driven by the system timer slot ticks. - // TODO revisit how to use the system timer timer source. timer_manager app_timers{256}; timer_manager* cu_timers = &app_timers; // Create time source that ticks the timers io_timer_source time_source{app_timers, *epoll_broker, std::chrono::milliseconds{1}}; - // Set up the JSON log channel used by metrics. - // TODO metrics. Do we have any CU-CP or CU-UP JSON metrics? - // Create N2 Client Gateway. std::unique_ptr n2_client = srs_cu_cp::create_n2_connection_client( generate_n2_client_config(cu_cp_config.amf_cfg, *cu_cp_dlt_pcaps.ngap, *epoll_broker)); - // E2AP configuration. - // Create E2AP GW remote connector. - // TODO This seems to be used in the DU only? - // Create CU-CP config. cu_cp_build_dependencies cu_cp_dependencies; cu_cp_dependencies.cu_cp_executor = workers.cu_cp_exec; @@ -325,12 +313,6 @@ int main(int argc, char** argv) auto cu_cp_obj_and_cmds = build_cu_cp(cu_cp_config, cu_cp_dependencies); srs_cu_cp::cu_cp& cu_cp_obj = *cu_cp_obj_and_cmds.unit; - // TODO: Remove JSON sink and refactor console_helper to not require it upon construction - // Set up the JSON log channel used by metrics. - srslog::sink& json_sink = srslog::fetch_udp_sink("127.0.9.9", 61234, srslog::create_json_formatter()); - srslog::log_channel& json_channel = srslog::fetch_log_channel("JSON_channel", json_sink, {}); - json_channel.set_enabled(false); - // Create console helper object for commands and metrics printing. app_services::stdin_command_dispatcher command_parser(*epoll_broker, cu_cp_obj_and_cmds.commands); @@ -354,9 +336,6 @@ int main(int argc, char** argv) cu_f1c_gw->attach_cu_cp(cu_cp_obj.get_f1c_handler()); // Create and start CU-UP - // TODO right now build_cu_up() depends on the worker_manager, but the worker manager - // depends on multiple configurations of the DU/RU. As such, temporarly we avoid the - // function and create things direclty here. std::unique_ptr cu_up_obj = app_build_cu_up(cu_up_config, workers, cu_cfg.nru_cfg.bind_addr, @@ -394,9 +373,6 @@ int main(int argc, char** argv) srslog::flush(); - // Clean cgroups - // TODO required for CU? - return 0; } diff --git a/apps/cu/cu_appconfig.h b/apps/cu/cu_appconfig.h index b865504c6e..28af379a24 100644 --- a/apps/cu/cu_appconfig.h +++ b/apps/cu/cu_appconfig.h @@ -10,12 +10,11 @@ #pragma once -#include "apps/services/os_sched_affinity_manager.h" -#include "srsran/support/executors/unique_thread.h" -#include +#include "apps/gnb/gnb_appconfig.h" #include namespace srsran { +namespace srs_cu { /// Configuration of logging functionalities. struct log_appconfig { @@ -34,36 +33,6 @@ struct log_appconfig { std::string tracing_filename; }; -/// CPU affinities configuration for the gNB app. -struct cpu_affinities_appconfig { - /// CPUs isolation. - std::optional isolated_cpus; - /// Low priority workers CPU affinity mask. - os_sched_affinity_config low_priority_cpu_cfg = {sched_affinity_mask_types::low_priority, - {}, - sched_affinity_mask_policy::mask}; -}; - -/// Non real time thread configuration for the gNB. -struct non_rt_threads_appconfig { - /// Number of non real time threads for processing of CP and UP data in the upper layers - unsigned nof_non_rt_threads = 4; -}; - -/// Expert threads configuration of the CU app. -struct expert_threads_appconfig { - /// Non real time thread configuration of the gNB app. - non_rt_threads_appconfig non_rt_threads; -}; - -/// Expert configuration of the gNB app. -struct expert_execution_appconfig { - /// gNB CPU affinities. - cpu_affinities_appconfig affinities; - /// Expert thread configuration of the gNB app. - expert_threads_appconfig threads; -}; - /// NR-U configuration struct cu_nru_appconfig { std::string bind_addr = "127.0.10.1"; // Bind address used by the F1-U interface @@ -75,20 +44,24 @@ struct cu_f1ap_appconfig { /// F1-C bind address std::string bind_address = "127.0.10.1"; }; +} // namespace srs_cu /// Monolithic gnb application configuration. struct cu_appconfig { /// Logging configuration. - log_appconfig log_cfg; + srs_cu::log_appconfig log_cfg; /// Expert configuration. expert_execution_appconfig expert_execution_cfg; /// NR-U - cu_nru_appconfig nru_cfg; + srs_cu::cu_nru_appconfig nru_cfg; /// F1AP - cu_f1ap_appconfig f1ap_cfg; + srs_cu::cu_f1ap_appconfig f1ap_cfg; + + /// Buffer pool configuration. + buffer_pool_appconfig buffer_pool_config; /// TODO fill in the rest of the configuration }; diff --git a/apps/cu/cu_appconfig_cli11_schema.cpp b/apps/cu/cu_appconfig_cli11_schema.cpp index d5ae9e4c96..1c90ea37dc 100644 --- a/apps/cu/cu_appconfig_cli11_schema.cpp +++ b/apps/cu/cu_appconfig_cli11_schema.cpp @@ -16,7 +16,7 @@ using namespace srsran; // TODO this is common between DU and CU. -static void configure_cli11_log_args(CLI::App& app, log_appconfig& log_params) +static void configure_cli11_log_args(CLI::App& app, srs_cu::log_appconfig& log_params) { auto level_check = [](const std::string& value) -> std::string { if (value == "info" || value == "debug" || value == "warning" || value == "error") { @@ -83,12 +83,12 @@ static void configure_cli11_log_args(CLI::App& app, log_appconfig& log_params) }); } -static void configure_cli11_f1ap_args(CLI::App& app, cu_f1ap_appconfig& f1ap_params) +static void configure_cli11_f1ap_args(CLI::App& app, srs_cu::cu_f1ap_appconfig& f1ap_params) { add_option(app, "--bind_address", f1ap_params.bind_address, "F1-C bind address")->capture_default_str(); } -static void configure_cli11_nru_args(CLI::App& app, cu_nru_appconfig& nru_cfg) +static void configure_cli11_nru_args(CLI::App& app, srs_cu::cu_nru_appconfig& nru_cfg) { add_option(app, "--bind_addr", From 910c1a68339a93fc41c29f3935aa9f4ccba53ebb Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Thu, 13 Jun 2024 15:07:26 +0100 Subject: [PATCH 14/49] apps,cu: add buffer pool configuration --- apps/cu/cu.cpp | 2 +- apps/cu/cu_appconfig_cli11_schema.cpp | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/apps/cu/cu.cpp b/apps/cu/cu.cpp index 9abce69a43..ca0fd2022f 100644 --- a/apps/cu/cu.cpp +++ b/apps/cu/cu.cpp @@ -226,7 +226,7 @@ int main(int argc, char** argv) // TODO // Setup size of byte buffer pool. - // TODO byte_buffer_pool + init_byte_buffer_segment_pool(cu_cfg.buffer_pool_config.nof_segments, cu_cfg.buffer_pool_config.segment_size); // Log build info cu_logger.info("Built in {} mode using {}", get_build_mode(), get_build_info()); diff --git a/apps/cu/cu_appconfig_cli11_schema.cpp b/apps/cu/cu_appconfig_cli11_schema.cpp index 1c90ea37dc..4c3a93f6eb 100644 --- a/apps/cu/cu_appconfig_cli11_schema.cpp +++ b/apps/cu/cu_appconfig_cli11_schema.cpp @@ -98,6 +98,14 @@ static void configure_cli11_nru_args(CLI::App& app, srs_cu::cu_nru_appconfig& nr add_option(app, "--udp_max_rx_msgs", nru_cfg.udp_rx_max_msgs, "Maximum amount of messages RX in a single syscall"); } +static void configure_cli11_buffer_pool_args(CLI::App& app, buffer_pool_appconfig& config) +{ + app.add_option("--nof_segments", config.nof_segments, "Number of segments allocated by the buffer pool") + ->capture_default_str(); + app.add_option("--segment_size", config.segment_size, "Size of each buffer pool segment in bytes") + ->capture_default_str(); +} + void srsran::configure_cli11_with_cu_appconfig_schema(CLI::App& app, cu_appconfig& cu_parsed_cfg) { cu_appconfig& cu_cfg = cu_parsed_cfg; @@ -115,4 +123,8 @@ void srsran::configure_cli11_with_cu_appconfig_schema(CLI::App& app, cu_appconfi CLI::App* cu_up_subcmd = add_subcommand(app, "cu_up", "CU-UP parameters")->configurable(); CLI::App* nru_subcmd = add_subcommand(*cu_up_subcmd, "nru", "NR-U parameters")->configurable(); configure_cli11_nru_args(*nru_subcmd, cu_parsed_cfg.nru_cfg); + + // Buffer pool section. + CLI::App* buffer_pool_subcmd = app.add_subcommand("buffer_pool", "Buffer pool configuration")->configurable(); + configure_cli11_buffer_pool_args(*buffer_pool_subcmd, cu_parsed_cfg.buffer_pool_config); } From eb1028aeae16b13247955269a1dd8b7ebd034ea6 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Mon, 17 Jun 2024 12:45:49 +0100 Subject: [PATCH 15/49] apps,gnb,du: make DU pcap container contain RLC and MAC pcaps --- apps/du/du.cpp | 27 +++++------- apps/gnb/gnb.cpp | 27 ++++-------- apps/units/flexible_du/du_high/pcap_factory.h | 43 +++++++++---------- 3 files changed, 40 insertions(+), 57 deletions(-) diff --git a/apps/du/du.cpp b/apps/du/du.cpp index 0a713d5f6e..5b9f3e1ec5 100644 --- a/apps/du/du.cpp +++ b/apps/du/du.cpp @@ -9,7 +9,6 @@ */ #include "srsran/gtpu/gtpu_config.h" -#include "srsran/pcap/mac_pcap.h" #include "srsran/support/build_info/build_info.h" #include "srsran/support/cpu_features.h" #include "srsran/support/event_tracing.h" @@ -42,13 +41,13 @@ #include -#include "../units/flexible_du/du_high/pcap_factory.h" #include "apps/services/application_message_banners.h" #include "apps/services/application_tracer.h" #include "apps/services/core_isolation_manager.h" #include "apps/services/metrics_plotter_json.h" #include "apps/services/metrics_plotter_stdout.h" #include "apps/services/stdin_command_dispatcher.h" +#include "apps/units/flexible_du/du_high/pcap_factory.h" #include "apps/units/flexible_du/split_dynamic/dynamic_du_unit_cli11_schema.h" #include "apps/units/flexible_du/split_dynamic/dynamic_du_unit_config_validator.h" #include "apps/units/flexible_du/split_dynamic/dynamic_du_unit_logger_registrator.h" @@ -248,16 +247,12 @@ int main(int argc, char** argv) io_broker_config io_broker_cfg(low_prio_cpu_mask); std::unique_ptr epoll_broker = create_io_broker(io_broker_type::epoll, io_broker_cfg); - srsran::modules::flexible_du::du_dlt_pcaps du_dlt_pcaps = - modules::flexible_du::create_dlt_pcaps(du_unit_cfg.du_high_cfg.config.pcaps, workers); - std::unique_ptr mac_p = - modules::flexible_du::create_mac_pcap(du_unit_cfg.du_high_cfg.config.pcaps, workers); - std::unique_ptr rlc_p = - modules::flexible_du::create_rlc_pcap(du_unit_cfg.du_high_cfg.config.pcaps, workers); + srsran::modules::flexible_du::du_pcaps du_pcaps = + modules::flexible_du::create_pcaps(du_unit_cfg.du_high_cfg.config.pcaps, workers); // Instantiate F1-C client gateway. std::unique_ptr f1c_gw = create_f1c_client_gateway( - du_cfg.f1ap_cfg.cu_cp_address, du_cfg.f1ap_cfg.bind_address, *epoll_broker, *du_dlt_pcaps.f1ap); + du_cfg.f1ap_cfg.cu_cp_address, du_cfg.f1ap_cfg.bind_address, *epoll_broker, *du_pcaps.f1ap); // Create manager of timers for DU, which will be driven by the PHY slot ticks. timer_manager app_timers{256}; @@ -266,7 +261,7 @@ int main(int argc, char** argv) // TODO: Simplify this and use factory. gtpu_demux_creation_request du_f1u_gtpu_msg = {}; du_f1u_gtpu_msg.cfg.warn_on_drop = true; - du_f1u_gtpu_msg.gtpu_pcap = du_dlt_pcaps.f1u.get(); + du_f1u_gtpu_msg.gtpu_pcap = du_pcaps.f1u.get(); std::unique_ptr du_f1u_gtpu_demux = create_gtpu_demux(du_f1u_gtpu_msg); udp_network_gateway_config du_f1u_gw_config = {}; du_f1u_gw_config.bind_address = du_cfg.nru_cfg.bind_address; @@ -277,7 +272,7 @@ int main(int argc, char** argv) *epoll_broker, workers.get_du_high_executor_mapper(0).ue_mapper().mac_ul_pdu_executor(to_du_ue_index(0))); std::unique_ptr du_f1u_conn = - srs_du::create_split_f1u_gw({du_f1u_gw.get(), du_f1u_gtpu_demux.get(), *du_dlt_pcaps.f1u, GTPU_PORT}); + srs_du::create_split_f1u_gw({du_f1u_gw.get(), du_f1u_gtpu_demux.get(), *du_pcaps.f1u, GTPU_PORT}); // Set up the JSON log channel used by metrics. srslog::sink& json_sink = @@ -297,7 +292,7 @@ int main(int argc, char** argv) srsran::sctp_network_connector_config e2_du_nw_config = generate_e2ap_nw_config(du_cfg, E2_DU_PPID); // Create E2AP GW remote connector. - e2_gateway_remote_connector e2_gw{*epoll_broker, e2_du_nw_config, *du_dlt_pcaps.e2ap}; + e2_gateway_remote_connector e2_gw{*epoll_broker, e2_du_nw_config, *du_pcaps.e2ap}; // Create metrics log helper. metrics_log_helper metrics_logger(srslog::fetch_basic_logger("METRICS")); @@ -310,8 +305,8 @@ int main(int argc, char** argv) *f1c_gw, *du_f1u_conn, app_timers, - *mac_p, - *rlc_p, + *du_pcaps.mac, + *du_pcaps.rlc, metrics_stdout, metrics_json, metrics_logger, @@ -345,9 +340,7 @@ int main(int argc, char** argv) } du_logger.info("Closing PCAP files..."); - mac_p->close(); - rlc_p->close(); - du_dlt_pcaps.close(); + du_pcaps.close(); du_logger.info("PCAP files successfully closed."); du_logger.info("Stopping executors..."); diff --git a/apps/gnb/gnb.cpp b/apps/gnb/gnb.cpp index 9fc16162e0..87c6af63c4 100644 --- a/apps/gnb/gnb.cpp +++ b/apps/gnb/gnb.cpp @@ -8,8 +8,6 @@ * */ -#include "srsran/pcap/dlt_pcap.h" -#include "srsran/pcap/rlc_pcap.h" #include "srsran/support/build_info/build_info.h" #include "srsran/support/cpu_features.h" #include "srsran/support/event_tracing.h" @@ -55,15 +53,15 @@ #include -#include "../units/cu_cp/pcap_factory.h" -#include "../units/cu_up/pcap_factory.h" -#include "../units/flexible_du/du_high/pcap_factory.h" #include "apps/services/application_message_banners.h" #include "apps/services/core_isolation_manager.h" #include "apps/services/metrics_plotter_json.h" #include "apps/services/metrics_plotter_stdout.h" #include "apps/units/cu_cp/cu_cp_builder.h" +#include "apps/units/cu_cp/pcap_factory.h" #include "apps/units/cu_up/cu_up_builder.h" +#include "apps/units/cu_up/pcap_factory.h" +#include "apps/units/flexible_du/du_high/pcap_factory.h" #include "apps/units/flexible_du/split_dynamic/dynamic_du_unit_cli11_schema.h" #include "apps/units/flexible_du/split_dynamic/dynamic_du_unit_config_validator.h" #include "apps/units/flexible_du/split_dynamic/dynamic_du_unit_logger_registrator.h" @@ -303,13 +301,8 @@ int main(int argc, char** argv) modules::cu_cp::create_dlt_pcap(cu_cp_config.pcap_cfg, *workers.get_executor_getter()); srsran::modules::cu_up::cu_up_dlt_pcaps cu_up_dlt_pcaps = modules::cu_up::create_dlt_pcaps(cu_up_config.pcap_cfg, *workers.get_executor_getter()); - - srsran::modules::flexible_du::du_dlt_pcaps du_dlt_pcaps = - modules::flexible_du::create_dlt_pcaps(du_unit_cfg.du_high_cfg.config.pcaps, workers); - std::unique_ptr mac_p = - modules::flexible_du::create_mac_pcap(du_unit_cfg.du_high_cfg.config.pcaps, workers); - std::unique_ptr rlc_p = - modules::flexible_du::create_rlc_pcap(du_unit_cfg.du_high_cfg.config.pcaps, workers); + srsran::modules::flexible_du::du_pcaps du_pcaps = + modules::flexible_du::create_pcaps(du_unit_cfg.du_high_cfg.config.pcaps, workers); std::unique_ptr f1c_gw = create_f1c_local_connector(f1c_local_connector_config{*cu_cp_dlt_pcaps.f1ap}); @@ -351,7 +344,7 @@ int main(int argc, char** argv) srsran::sctp_network_connector_config e2_du_nw_config = generate_e2ap_nw_config(gnb_cfg, E2_DU_PPID); // Create E2AP GW remote connector. - e2_gateway_remote_connector e2_gw{*epoll_broker, e2_du_nw_config, *du_dlt_pcaps.e2ap}; + e2_gateway_remote_connector e2_gw{*epoll_broker, e2_du_nw_config, *du_pcaps.e2ap}; // Create CU-CP config. cu_cp_build_dependencies cu_cp_dependencies; @@ -401,8 +394,8 @@ int main(int argc, char** argv) *f1c_gw, *f1u_conn->get_f1u_du_gateway(), app_timers, - *mac_p, - *rlc_p, + *du_pcaps.mac, + *du_pcaps.rlc, metrics_stdout, metrics_json, metrics_logger, @@ -450,11 +443,9 @@ int main(int argc, char** argv) } gnb_logger.info("Closing PCAP files..."); - mac_p->close(); - rlc_p->close(); cu_cp_dlt_pcaps.close(); cu_up_dlt_pcaps.close(); - du_dlt_pcaps.close(); + du_pcaps.close(); gnb_logger.info("PCAP files successfully closed."); gnb_logger.info("Stopping executors..."); diff --git a/apps/units/flexible_du/du_high/pcap_factory.h b/apps/units/flexible_du/du_high/pcap_factory.h index bd5694b1e9..5aef3218a3 100644 --- a/apps/units/flexible_du/du_high/pcap_factory.h +++ b/apps/units/flexible_du/du_high/pcap_factory.h @@ -19,22 +19,29 @@ namespace srsran { namespace modules { namespace flexible_du { -struct du_dlt_pcaps { +struct du_pcaps { + // DLT PCAPs std::unique_ptr f1ap; std::unique_ptr f1u; std::unique_ptr e2ap; + + // MAC and RLC PCAPs + std::unique_ptr mac; + std::unique_ptr rlc; void close() { f1ap.reset(); f1u.reset(); e2ap.reset(); + mac.reset(); + rlc.reset(); } }; /// Creates the DLT PCAP of the DU. -inline du_dlt_pcaps create_dlt_pcaps(const du_high_unit_pcap_config& pcap_cfg, worker_manager& workers) +inline du_pcaps create_pcaps(const du_high_unit_pcap_config& pcap_cfg, worker_manager& workers) { - du_dlt_pcaps pcaps; + du_pcaps pcaps; pcaps.f1ap = pcap_cfg.f1ap.enabled ? create_f1ap_pcap(pcap_cfg.f1ap.filename, workers.get_executor("pcap_exec")) : create_null_dlt_pcap(); @@ -45,33 +52,25 @@ inline du_dlt_pcaps create_dlt_pcaps(const du_high_unit_pcap_config& pcap_cfg, w pcaps.e2ap = pcap_cfg.e2ap.enabled ? create_e2ap_pcap(pcap_cfg.e2ap.filename, workers.get_executor("pcap_exec")) : create_null_dlt_pcap(); - return pcaps; -} - -/// Creates the MAC PCAP of the DU. -inline std::unique_ptr create_mac_pcap(const du_high_unit_pcap_config& pcap_cfg, worker_manager& workers) -{ if (pcap_cfg.mac.type != "dlt" && pcap_cfg.mac.type != "udp") { report_error("Invalid type for MAC PCAP. type={}\n", pcap_cfg.mac.type); } - return pcap_cfg.mac.enabled ? create_mac_pcap(pcap_cfg.mac.filename, - pcap_cfg.mac.type == "dlt" ? mac_pcap_type::dlt : mac_pcap_type::udp, - workers.get_executor("mac_pcap_exec")) - : create_null_mac_pcap(); -} + pcaps.mac = pcap_cfg.mac.enabled + ? create_mac_pcap(pcap_cfg.mac.filename, + pcap_cfg.mac.type == "dlt" ? mac_pcap_type::dlt : mac_pcap_type::udp, + workers.get_executor("mac_pcap_exec")) + : create_null_mac_pcap(); -/// Creates the RLC PCAP of the DU. -inline std::unique_ptr create_rlc_pcap(const du_high_unit_pcap_config& pcap_cfg, worker_manager& workers) -{ if (pcap_cfg.rlc.rb_type != "all" && pcap_cfg.rlc.rb_type != "srb" && pcap_cfg.rlc.rb_type != "drb") { report_error("Invalid rb_type for RLC PCAP. rb_type={}\n", pcap_cfg.rlc.rb_type); } - return pcap_cfg.rlc.enabled ? create_rlc_pcap(pcap_cfg.rlc.filename, - workers.get_executor("rlc_pcap_exec"), - pcap_cfg.rlc.rb_type != "drb", - pcap_cfg.rlc.rb_type != "srb") - : create_null_rlc_pcap(); + pcaps.rlc = pcap_cfg.rlc.enabled ? create_rlc_pcap(pcap_cfg.rlc.filename, + workers.get_executor("rlc_pcap_exec"), + pcap_cfg.rlc.rb_type != "drb", + pcap_cfg.rlc.rb_type != "srb") + : create_null_rlc_pcap(); + return pcaps; } } // namespace flexible_du From e47a8fb9c0b57de2e4318d2ec631e8f940b9890e Mon Sep 17 00:00:00 2001 From: Supreeth Herle Date: Wed, 12 Jun 2024 14:18:16 +0200 Subject: [PATCH 16/49] sched: configure only 1 ZP CSI-RS resource to use in all cases with nof. ports less than or equal to 4 --- lib/scheduler/config/csi_helper.cpp | 136 +++++++++++++++------------- 1 file changed, 71 insertions(+), 65 deletions(-) diff --git a/lib/scheduler/config/csi_helper.cpp b/lib/scheduler/config/csi_helper.cpp index 481f23708b..8a100021b0 100644 --- a/lib/scheduler/config/csi_helper.cpp +++ b/lib/scheduler/config/csi_helper.cpp @@ -151,24 +151,20 @@ bool srsran::csi_helper::derive_valid_csi_rs_slot_offsets(csi_builder_params& static zp_csi_rs_resource make_default_zp_csi_rs_resource(const csi_builder_params& params) { - zp_csi_rs_resource res{}; - res.id = static_cast(0); - res.res_mapping.nof_ports = params.nof_ports; - if (params.nof_ports == 1) { - res.res_mapping.fd_alloc.resize(12); - res.res_mapping.fd_alloc.set(8, true); - res.res_mapping.cdm = csi_rs_cdm_type::no_CDM; - } else if (params.nof_ports == 2) { - res.res_mapping.fd_alloc.resize(6); - res.res_mapping.fd_alloc.set(4, true); - res.res_mapping.cdm = csi_rs_cdm_type::fd_CDM2; - } else if (params.nof_ports == 4) { - res.res_mapping.fd_alloc.resize(3); - res.res_mapping.fd_alloc.set(2, true); - res.res_mapping.cdm = csi_rs_cdm_type::fd_CDM2; - } else { + if (params.nof_ports > 4) { report_error("Unsupported number of antenna ports={}", params.nof_ports); } + + zp_csi_rs_resource res{}; + res.id = static_cast(0); + // [Implementation-defined] The reason for using row 4 of Table 7.4.1.5.3-1 in TS 38.221 even in case of nof. ports < + // 4 is due to some RUs not supporting more than 1 ZP CSI-RS resource per symbol. Also, the specification does not + // restrict from using row 4 even in case of nof. ports < 4. + // Freq Alloc -> Row4. + res.res_mapping.nof_ports = 4; + res.res_mapping.fd_alloc.resize(3); + res.res_mapping.fd_alloc.set(2, true); + res.res_mapping.cdm = csi_rs_cdm_type::fd_CDM2; res.res_mapping.first_ofdm_symbol_in_td = 8; res.res_mapping.freq_density = csi_rs_freq_density_type::one; res.res_mapping.freq_band_rbs = get_csi_freq_occupation_rbs(params.nof_rbs, params.nof_rbs); @@ -181,38 +177,15 @@ static zp_csi_rs_resource make_default_zp_csi_rs_resource(const csi_builder_para std::vector srsran::csi_helper::make_periodic_zp_csi_rs_resource_list(const csi_builder_params& params) { - std::vector list; - - if (params.nof_ports == 1) { - // 4 zp-CSI-RS resources at different frequency locations. - list.resize(4, make_default_zp_csi_rs_resource(params)); - - for (unsigned res_id = 0; res_id != list.size(); ++res_id) { - list[res_id].id = static_cast(res_id); - list[res_id].res_mapping.fd_alloc.reset(); - list[res_id].res_mapping.fd_alloc.set(8 + res_id, true); - } - - } else if (params.nof_ports == 2) { - // 2 zp-CSI-RS resources at different frequency locations. - list.resize(2, make_default_zp_csi_rs_resource(params)); - - for (unsigned res_id = 0; res_id != list.size(); ++res_id) { - list[res_id].id = static_cast(res_id); - list[res_id].res_mapping.fd_alloc.reset(); - list[res_id].res_mapping.fd_alloc.set(4 + res_id, true); - } - - } else if (params.nof_ports == 4) { - // 1 zp-CSI-RS resource. - - list.resize(1, make_default_zp_csi_rs_resource(params)); - list[0].id = static_cast(0); - - } else { - report_error("Unsupported number of antenna ports"); + if (params.nof_ports > 4) { + report_error("Unsupported number of antenna ports {}", params.nof_ports); } + std::vector list; + // 1 zp-CSI-RS resource. + list.resize(1, make_default_zp_csi_rs_resource(params)); + list[0].id = static_cast(0); + for (auto& res : list) { res.offset = params.zp_csi_slot_offset; res.period = params.csi_rs_period; @@ -223,23 +196,15 @@ srsran::csi_helper::make_periodic_zp_csi_rs_resource_list(const csi_builder_para zp_csi_rs_resource_set srsran::csi_helper::make_periodic_zp_csi_rs_resource_set(const csi_builder_params& params) { - zp_csi_rs_resource_set zp_set{}; - - zp_set.id = static_cast(0); - - if (params.nof_ports == 1) { - zp_set.zp_csi_rs_res_list = {static_cast(0), - static_cast(1), - static_cast(2), - static_cast(3)}; - } else if (params.nof_ports == 2) { - zp_set.zp_csi_rs_res_list = {static_cast(0), static_cast(1)}; - } else if (params.nof_ports == 4) { - zp_set.zp_csi_rs_res_list = {static_cast(0)}; - } else { + if (params.nof_ports > 4) { report_error("Unsupported number of antenna ports {}", params.nof_ports); } + zp_csi_rs_resource_set zp_set{}; + + zp_set.id = static_cast(0); + zp_set.zp_csi_rs_res_list = {static_cast(0)}; + return zp_set; } @@ -396,8 +361,48 @@ static std::vector make_nzp_csi_rs_resource_sets() return sets; } +/// \brief Returns the subcarrier location given the bit location set in \c frequencyDomainAllocation of CSI-RS-Resource +/// and size of the \c frequencyDomainAllocation bitmap. +static unsigned get_subcarrier_location_from_fd_alloc_bit_location(int fd_alloc_bit_location, + uint8_t fd_alloc_bitmap_size) +{ + // See TS 38.211, clause 7.4.1.5.3. + switch (fd_alloc_bitmap_size) { + case 4: { + srsran_assert(fd_alloc_bit_location < 4, + "Invalid bit location={} in frequency domain allocation of CSI-RS for row 1", + fd_alloc_bit_location); + return fd_alloc_bit_location; + } + case 12: { + srsran_assert(fd_alloc_bit_location < 12, + "Invalid bit location={} in frequency domain allocation of CSI-RS for row 2", + fd_alloc_bit_location); + return fd_alloc_bit_location; + } + case 3: { + srsran_assert(fd_alloc_bit_location < 3, + "Invalid bit location={} in frequency domain allocation of CSI-RS for row 4", + fd_alloc_bit_location); + return 4 * fd_alloc_bit_location; + } + case 6: { + srsran_assert(fd_alloc_bit_location < 6, + "Invalid bit location={} in frequency domain allocation of CSI-RS for row other", + fd_alloc_bit_location); + return 2 * fd_alloc_bit_location; + } + default: + report_fatal_error("Invalid CSI-RS row"); + } +} + static std::vector make_csi_im_resources(const csi_builder_params& params) { + if (params.nof_ports > 4) { + report_error("Unsupported number of antenna ports={}", params.nof_ports); + } + std::vector res(1); // Make CSI-IM resource match in REs, symbols, slots with zp-CSI-RS> @@ -407,11 +412,12 @@ static std::vector make_csi_im_resources(const csi_builder_para res[0].res_id = static_cast(0); res[0].csi_im_res_element_pattern.emplace(); res[0].csi_im_res_element_pattern->pattern_type = csi_im_resource::csi_im_resource_element_pattern_type::pattern1; - res[0].csi_im_res_element_pattern->subcarrier_location = zp0.res_mapping.fd_alloc.find_lowest() * params.nof_ports; - res[0].csi_im_res_element_pattern->symbol_location = zp0.res_mapping.first_ofdm_symbol_in_td; - res[0].freq_band_rbs = zp0.res_mapping.freq_band_rbs; - res[0].csi_res_period = *zp0.period; - res[0].csi_res_offset = *zp0.offset; + res[0].csi_im_res_element_pattern->subcarrier_location = get_subcarrier_location_from_fd_alloc_bit_location( + zp0.res_mapping.fd_alloc.find_lowest(), zp0.res_mapping.fd_alloc.size()); + res[0].csi_im_res_element_pattern->symbol_location = zp0.res_mapping.first_ofdm_symbol_in_td; + res[0].freq_band_rbs = zp0.res_mapping.freq_band_rbs; + res[0].csi_res_period = *zp0.period; + res[0].csi_res_offset = *zp0.offset; return res; } From d367e601d9e2951a1fedac8ba4f6032e435fe946 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Mon, 17 Jun 2024 15:03:16 +0200 Subject: [PATCH 17/49] Revert "support: make TSAN options always included" This reverts commit 9046ff9a076933fc2d0dcdf3c55500556f38bcbd. --- apps/gnb/gnb.cpp | 1 + .../srsran/support/tsan_options.h | 9 ++++++--- lib/support/CMakeLists.txt | 20 +++++++++---------- 3 files changed, 16 insertions(+), 14 deletions(-) rename lib/support/tsan_options.cpp => include/srsran/support/tsan_options.h (95%) diff --git a/apps/gnb/gnb.cpp b/apps/gnb/gnb.cpp index 87c6af63c4..e19d7a24bd 100644 --- a/apps/gnb/gnb.cpp +++ b/apps/gnb/gnb.cpp @@ -42,6 +42,7 @@ #include "apps/gnb/adapters/e2_gateway_remote_connector.h" #include "apps/services/e2_metric_connector_manager.h" +#include "srsran/support/tsan_options.h" #include "apps/units/cu_cp/cu_cp_config_translators.h" #include "apps/units/cu_cp/cu_cp_logger_registrator.h" diff --git a/lib/support/tsan_options.cpp b/include/srsran/support/tsan_options.h similarity index 95% rename from lib/support/tsan_options.cpp rename to include/srsran/support/tsan_options.h index 86bd6ed36f..2e75c9924b 100644 --- a/lib/support/tsan_options.cpp +++ b/include/srsran/support/tsan_options.h @@ -10,7 +10,7 @@ * */ -#ifdef __SANITIZE_THREAD__ +#pragma once // Options taken from Mozilla project // abort_on_error=1 - Causes TSan to abort instead of using exit(). @@ -24,7 +24,9 @@ // allocations the same way we would handle them with a regular allocator and // also uncovers potential bugs that might occur in these situations. +#ifdef __cplusplus extern "C" { +#endif const char* __tsan_default_options() { @@ -45,6 +47,7 @@ const char* __tsan_default_suppressions() "race:libusb*\n" "race:libuhd*\n"; } -} -#endif // __SANITIZE_THREAD__ +#ifdef __cplusplus +} +#endif \ No newline at end of file diff --git a/lib/support/CMakeLists.txt b/lib/support/CMakeLists.txt index 6038ff698f..076e10a0c0 100644 --- a/lib/support/CMakeLists.txt +++ b/lib/support/CMakeLists.txt @@ -11,25 +11,23 @@ add_subdirectory(network) add_subdirectory(version) set(SOURCES + backtrace.cpp + math_utils.cpp executors/priority_task_queue.cpp executors/priority_task_worker.cpp - executors/task_execution_manager.cpp + executors/unique_thread.cpp executors/task_worker.cpp executors/task_worker_pool.cpp - executors/unique_thread.cpp - backtrace.cpp + executors/task_execution_manager.cpp + timers.cpp bit_encoding.cpp - byte_buffer.cpp - byte_buffer_chain.cpp config_yaml.cpp - cpu_architecture_info.cpp - event_tracing.cpp signal_handling.cpp + event_tracing.cpp sysinfo.cpp - timers.cpp - math_utils.cpp - tsan_options.cpp - ) + byte_buffer.cpp + byte_buffer_chain.cpp + cpu_architecture_info.cpp) add_library(srsran_support STATIC ${SOURCES}) target_link_libraries(srsran_support srsran_network ${CMAKE_THREAD_LIBS_INIT} ${YAMLCPP_LIBRARY} From 40c6607e74e3888608f740c406aa3efa0f5d63b7 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Mon, 17 Jun 2024 15:19:51 +0200 Subject: [PATCH 18/49] gnb: comment the TSAN options include --- apps/gnb/gnb.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/apps/gnb/gnb.cpp b/apps/gnb/gnb.cpp index e19d7a24bd..42c28846f9 100644 --- a/apps/gnb/gnb.cpp +++ b/apps/gnb/gnb.cpp @@ -42,7 +42,12 @@ #include "apps/gnb/adapters/e2_gateway_remote_connector.h" #include "apps/services/e2_metric_connector_manager.h" + +// Include ThreadSanitizer (TSAN) options if thread sanitization is enabled. +// This helps prevent false alarms from the thread sanitizer. +#ifdef __SANITIZE_THREAD__ #include "srsran/support/tsan_options.h" +#endif // __SANITIZE_THREAD__ #include "apps/units/cu_cp/cu_cp_config_translators.h" #include "apps/units/cu_cp/cu_cp_logger_registrator.h" From 732678c3efd36839ae15244d7178f71febc4b981 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Mon, 17 Jun 2024 15:35:33 +0200 Subject: [PATCH 19/49] du: add TSAN options to DU --- apps/du/du.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/apps/du/du.cpp b/apps/du/du.cpp index 5b9f3e1ec5..8af9af19f2 100644 --- a/apps/du/du.cpp +++ b/apps/du/du.cpp @@ -39,6 +39,12 @@ #include "apps/gnb/adapters/e2_gateway_remote_connector.h" #include "apps/services/e2_metric_connector_manager.h" +// Include ThreadSanitizer (TSAN) options if thread sanitization is enabled. +// This helps prevent false alarms from the thread sanitizer. +#ifdef __SANITIZE_THREAD__ +#include "srsran/support/tsan_options.h" +#endif // __SANITIZE_THREAD__ + #include #include "apps/services/application_message_banners.h" From 65e1c30c23a4a85b13a8bd75c84d48b8c0635006 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Mon, 17 Jun 2024 15:36:59 +0200 Subject: [PATCH 20/49] support: reorder source files --- lib/support/CMakeLists.txt | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/lib/support/CMakeLists.txt b/lib/support/CMakeLists.txt index 076e10a0c0..f9ab5a1822 100644 --- a/lib/support/CMakeLists.txt +++ b/lib/support/CMakeLists.txt @@ -11,23 +11,24 @@ add_subdirectory(network) add_subdirectory(version) set(SOURCES - backtrace.cpp - math_utils.cpp executors/priority_task_queue.cpp executors/priority_task_worker.cpp - executors/unique_thread.cpp + executors/task_execution_manager.cpp executors/task_worker.cpp executors/task_worker_pool.cpp - executors/task_execution_manager.cpp - timers.cpp + executors/unique_thread.cpp + backtrace.cpp bit_encoding.cpp + byte_buffer.cpp + byte_buffer_chain.cpp config_yaml.cpp - signal_handling.cpp + cpu_architecture_info.cpp event_tracing.cpp + signal_handling.cpp sysinfo.cpp - byte_buffer.cpp - byte_buffer_chain.cpp - cpu_architecture_info.cpp) + timers.cpp + math_utils.cpp + ) add_library(srsran_support STATIC ${SOURCES}) target_link_libraries(srsran_support srsran_network ${CMAKE_THREAD_LIBS_INIT} ${YAMLCPP_LIBRARY} From 021e62c88adeda110ae9517ebd3ffb3a34a12932 Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Mon, 17 Jun 2024 16:53:03 +0200 Subject: [PATCH 21/49] gnb,du: always include tsan options --- apps/du/du.cpp | 2 -- apps/gnb/gnb.cpp | 2 -- 2 files changed, 4 deletions(-) diff --git a/apps/du/du.cpp b/apps/du/du.cpp index 8af9af19f2..249963e308 100644 --- a/apps/du/du.cpp +++ b/apps/du/du.cpp @@ -41,9 +41,7 @@ // Include ThreadSanitizer (TSAN) options if thread sanitization is enabled. // This helps prevent false alarms from the thread sanitizer. -#ifdef __SANITIZE_THREAD__ #include "srsran/support/tsan_options.h" -#endif // __SANITIZE_THREAD__ #include diff --git a/apps/gnb/gnb.cpp b/apps/gnb/gnb.cpp index 42c28846f9..d09e5567c6 100644 --- a/apps/gnb/gnb.cpp +++ b/apps/gnb/gnb.cpp @@ -45,9 +45,7 @@ // Include ThreadSanitizer (TSAN) options if thread sanitization is enabled. // This helps prevent false alarms from the thread sanitizer. -#ifdef __SANITIZE_THREAD__ #include "srsran/support/tsan_options.h" -#endif // __SANITIZE_THREAD__ #include "apps/units/cu_cp/cu_cp_config_translators.h" #include "apps/units/cu_cp/cu_cp_logger_registrator.h" From 0359ffdc1d359cafe506d9dcecaa2079a76d964c Mon Sep 17 00:00:00 2001 From: Xavier Arteaga Date: Mon, 17 Jun 2024 17:07:01 +0200 Subject: [PATCH 22/49] gnb,du: make TSAN options comment more verbose --- apps/du/du.cpp | 2 +- apps/gnb/gnb.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/du/du.cpp b/apps/du/du.cpp index 249963e308..2bcd1419a6 100644 --- a/apps/du/du.cpp +++ b/apps/du/du.cpp @@ -40,7 +40,7 @@ #include "apps/services/e2_metric_connector_manager.h" // Include ThreadSanitizer (TSAN) options if thread sanitization is enabled. -// This helps prevent false alarms from the thread sanitizer. +// This include is not unused - it helps prevent false alarms from the thread sanitizer. #include "srsran/support/tsan_options.h" #include diff --git a/apps/gnb/gnb.cpp b/apps/gnb/gnb.cpp index d09e5567c6..a1e7093c20 100644 --- a/apps/gnb/gnb.cpp +++ b/apps/gnb/gnb.cpp @@ -44,7 +44,7 @@ #include "apps/services/e2_metric_connector_manager.h" // Include ThreadSanitizer (TSAN) options if thread sanitization is enabled. -// This helps prevent false alarms from the thread sanitizer. +// This include is not unused - it helps prevent false alarms from the thread sanitizer. #include "srsran/support/tsan_options.h" #include "apps/units/cu_cp/cu_cp_config_translators.h" From 2022acc8b4812a24b71ea271aabd24f112b55715 Mon Sep 17 00:00:00 2001 From: sauka Date: Thu, 13 Jun 2024 12:34:41 +0300 Subject: [PATCH 23/49] ofh: add start_symbol to prach context --- lib/ofh/support/prach_context_repository.h | 27 +++-- .../ofh_uplink_request_handler_impl.cpp | 98 ++++++++++++------- ...ata_flow_uplane_uplink_prach_impl_test.cpp | 4 +- ...h_uplane_prach_data_flow_notifier_test.cpp | 6 +- ...ane_prach_symbol_data_flow_writer_test.cpp | 12 +-- 5 files changed, 95 insertions(+), 52 deletions(-) diff --git a/lib/ofh/support/prach_context_repository.h b/lib/ofh/support/prach_context_repository.h index 874cde17f7..f9ca14e6b0 100644 --- a/lib/ofh/support/prach_context_repository.h +++ b/lib/ofh/support/prach_context_repository.h @@ -21,6 +21,7 @@ #include "srsran/srslog/srslog.h" #include #include +#include namespace srsran { namespace ofh { @@ -57,7 +58,8 @@ class prach_context prach_context() = default; /// Constructs an uplink PRACH context with the given PRACH buffer and PRACH buffer context. - prach_context(const prach_buffer_context& context, prach_buffer& buffer) : context_info({context, &buffer}) + prach_context(const prach_buffer_context& context, prach_buffer& buffer, std::optional start_symbol_) : + context_info({context, &buffer}) { srsran_assert(context.nof_fd_occasions == 1, "Only supporting one frequency domain occasion"); srsran_assert(context.nof_td_occasions == 1, "Only supporting one time domain occasion"); @@ -70,7 +72,8 @@ class prach_context freq_mapping_info = prach_frequency_mapping_get(preamble_info.scs, context.pusch_scs); - nof_symbols = preamble_info.nof_symbols; + nof_symbols = preamble_info.nof_symbols; + start_symbol = start_symbol_.value_or(context_info.context.start_symbol); // Initialize statistics. for (unsigned i = 0; i != nof_symbols; ++i) { @@ -104,7 +107,14 @@ class prach_context /// Writes the given IQ buffer corresponding to the given symbol and port. void write_iq(unsigned port, unsigned symbol, unsigned re_start, span iq_buffer) { - symbol -= context_info.context.start_symbol; + if (is_long_preamble(context_info.context.format)) { + // Some RUs always set PRACH symbolId to 0 when long format is used ignoring the value indicated in C-Plane. + if (symbol >= start_symbol) { + symbol -= start_symbol; + } + } else { + symbol -= start_symbol; + } srsran_assert(context_info.buffer, "No valid PRACH buffer in the context"); srsran_assert(symbol < nof_symbols, "Invalid symbol index"); @@ -155,6 +165,8 @@ class prach_context prach_frequency_mapping_information freq_mapping_info; /// Number of OFDM symbols used by the stored PRACH. unsigned nof_symbols; + /// OFDM symbol index within the slot marking the start of PRACH preamble after the cyclic prefix. + unsigned start_symbol; }; /// PRACH context repository. @@ -191,11 +203,14 @@ class prach_context_repository } /// Adds the given entry to the repository at slot. - void add(const prach_buffer_context& context, prach_buffer& buffer_, slot_point slot = slot_point()) + void add(const prach_buffer_context& context, + prach_buffer& buffer_, + std::optional start_symbol, + std::optional slot) { std::lock_guard lock(mutex); - slot_point current_slot = slot.valid() ? slot : context.slot; + slot_point current_slot = slot.value_or(context.slot); if (logger) { if (!entry(current_slot).empty()) { @@ -206,7 +221,7 @@ class prach_context_repository } } - entry(current_slot) = prach_context(context, buffer_); + entry(current_slot) = prach_context(context, buffer_, start_symbol); } /// Function to write the uplink PRACH buffer. diff --git a/lib/ofh/transmitter/ofh_uplink_request_handler_impl.cpp b/lib/ofh/transmitter/ofh_uplink_request_handler_impl.cpp index 3809a7e980..8cdb18bc90 100644 --- a/lib/ofh/transmitter/ofh_uplink_request_handler_impl.cpp +++ b/lib/ofh/transmitter/ofh_uplink_request_handler_impl.cpp @@ -55,6 +55,56 @@ uplink_request_handler_impl::uplink_request_handler_impl(const uplink_request_ha srsran_assert(frame_pool, "Invalid frame pool"); } +/// Determines slot index where U-Plane packet is expected for long format PRACH. +static slot_point get_long_prach_length_slots(const prach_buffer_context& context) +{ + static constexpr unsigned nof_symbols_per_slot = get_nsymb_per_slot(cyclic_prefix::NORMAL); + srsran_assert(is_long_preamble(context.format), "Long PRACH format expected"); + + // Get preamble information. + prach_preamble_information preamble_info = get_prach_preamble_long_info(context.format); + + double pusch_symbol_duration_msec = + static_cast(SUBFRAME_DURATION_MSEC) / + static_cast(get_nof_slots_per_subframe(context.pusch_scs) * nof_symbols_per_slot); + + double len_msecs = (preamble_info.cp_length.to_seconds() + preamble_info.symbol_length().to_seconds()) * 1000; + unsigned nof_symbols = std::ceil(len_msecs / pusch_symbol_duration_msec); + + unsigned prach_length_slots = + std::ceil(static_cast(context.start_symbol + nof_symbols) / static_cast(nof_symbols_per_slot)); + + // Subtract one to account for the current slot. + return (context.slot + (prach_length_slots - 1)); +} + +/// \brief Determine PRACH start symbol index. +/// +/// Determine startSymbolId as the last symbol that starts right at or before the PRACH preamble (after the cyclic +/// prefix). According to O-RAN.WG4.CUS.0.R003 section 4.4.3 if the SCS value provided by "frameStructure" is less than +/// 15 kHz (e.g. for long preamble PRACH formats), then the symbol timing used to determine startSymbolId is based on +/// the numerology of 15 kHz SCS. +/// +/// \param context PRACH context storing PRACH time and frequency mapping parameters. +/// \return OFDM symbol index that marks the start of PRACH preamble after the cyclic prefix. +static unsigned get_prach_start_symbol(const prach_buffer_context& context) +{ + // Get preamble information (assuming only a single occasion is supported). + prach_preamble_information preamble_info = + is_long_preamble(context.format) + ? get_prach_preamble_long_info(context.format) + : get_prach_preamble_short_info(context.format, to_ra_subcarrier_spacing(context.pusch_scs), true); + + subcarrier_spacing scs = is_short_preamble(context.format) ? static_cast(preamble_info.scs) + : subcarrier_spacing::kHz15; + + double symbol_duration_sec = + phy_time_unit::from_units_of_kappa((144U + 2048U) >> to_numerology_value(scs)).to_seconds(); + double cp_length_sec = preamble_info.cp_length.to_seconds(); + + return context.start_symbol + unsigned(cp_length_sec / symbol_duration_sec); +} + void uplink_request_handler_impl::handle_prach_occasion(const prach_buffer_context& context, prach_buffer& buffer) { logger.debug("Registering PRACH context entry for slot '{}' and sector#{}", context.slot, context.sector); @@ -66,36 +116,26 @@ void uplink_request_handler_impl::handle_prach_occasion(const prach_buffer_conte // Open Fronthaul parameters timeOffset and cpLength are expressed in multiple of \f$T_s\f$ units. static constexpr double ref_srate_Hz = 30.72e6; - // Get preamble information (assuming only single occasion is supported). - prach_preamble_information preamble_info = - is_long_preamble(context.format) - ? get_prach_preamble_long_info(context.format) - : get_prach_preamble_short_info(context.format, to_ra_subcarrier_spacing(context.pusch_scs), true); - // Store the context in the repository, use correct slot index for long format accounting for PRACH duration. if (is_short_preamble(context.format)) { - ul_prach_repo->add(context, buffer); + ul_prach_repo->add(context, buffer, std::nullopt, std::nullopt); } else { - static constexpr unsigned nof_symbols_per_slot = get_nsymb_per_slot(cyclic_prefix::NORMAL); - - double symbol_duration_msec = - static_cast(SUBFRAME_DURATION_MSEC) / - static_cast(get_nof_slots_per_subframe(context.pusch_scs) * nof_symbols_per_slot); - - double len_msecs = (preamble_info.cp_length.to_seconds() + preamble_info.symbol_length().to_seconds()) * 1000; - unsigned nof_symbols = std::ceil(len_msecs / symbol_duration_msec); + // Determine slot index where the PRACH U-Plane is expected. + slot_point slot = get_long_prach_length_slots(context); + // Determine PRACH start symbol. + unsigned start_symbol = get_prach_start_symbol(context); - unsigned prach_length_slots = - std::ceil(static_cast(context.start_symbol + nof_symbols) / static_cast(nof_symbols_per_slot)); - - // Subtract one to account for the current slot. - slot_point slot = context.slot + (prach_length_slots - 1); - ul_prach_repo->add(context, buffer, slot); + ul_prach_repo->add(context, buffer, start_symbol, slot); } if (!is_prach_cp_enabled) { return; } + // Get preamble information (assuming only single occasion is supported). + prach_preamble_information preamble_info = + is_long_preamble(context.format) + ? get_prach_preamble_long_info(context.format) + : get_prach_preamble_short_info(context.format, to_ra_subcarrier_spacing(context.pusch_scs), true); // Get frequency mapping information. prach_frequency_mapping_information freq_mapping_info = @@ -106,24 +146,12 @@ void uplink_request_handler_impl::handle_prach_occasion(const prach_buffer_conte preamble_info.cp_length += phy_time_unit::from_units_of_kappa(16); } unsigned cp_length = preamble_info.cp_length.to_samples(ref_srate_Hz); - - // Determine startSymbolId as the last symbol that starts right at or before the PRACH preamble (after the cyclic - // prefix). According to O-RAN.WG4.CUS.0.R003 section 4.4.3 if the SCS value provided by "frameStructure" is less than - // 15 kHz (e.g. for long preamble PRACH formats), then the symbol timing used to determine startSymbolId is based on - // the numerology of 15 kHz SCS. - subcarrier_spacing scs = is_short_preamble(context.format) ? static_cast(preamble_info.scs) - : subcarrier_spacing::kHz15; - - unsigned pusch_symbol_duration = - phy_time_unit::from_units_of_kappa((144U + 2048U) >> to_numerology_value(scs)).to_samples(ref_srate_Hz); - unsigned prach_start_symbol = context.start_symbol + (cp_length / pusch_symbol_duration); - - unsigned K = (1000 * scs_to_khz(context.pusch_scs)) / ra_scs_to_Hz(preamble_info.scs); + unsigned K = (1000 * scs_to_khz(context.pusch_scs)) / ra_scs_to_Hz(preamble_info.scs); data_flow_cplane_scheduling_prach_context cp_prach_context; cp_prach_context.slot = context.slot; cp_prach_context.nof_repetitions = preamble_info.nof_symbols; - cp_prach_context.start_symbol = prach_start_symbol; + cp_prach_context.start_symbol = get_prach_start_symbol(context); cp_prach_context.prach_scs = preamble_info.scs; cp_prach_context.scs = context.pusch_scs; cp_prach_context.prach_nof_rb = freq_mapping_info.nof_rb_ra * K; diff --git a/tests/unittests/ofh/receiver/ofh_data_flow_uplane_uplink_prach_impl_test.cpp b/tests/unittests/ofh/receiver/ofh_data_flow_uplane_uplink_prach_impl_test.cpp index 5b747e9cab..9ab09251eb 100644 --- a/tests/unittests/ofh/receiver/ofh_data_flow_uplane_uplink_prach_impl_test.cpp +++ b/tests/unittests/ofh/receiver/ofh_data_flow_uplane_uplink_prach_impl_test.cpp @@ -79,7 +79,7 @@ class data_flow_uplane_uplink_prach_impl_fixture : public ::testing::TestWithPar buffer_context.pusch_scs = srsran::subcarrier_spacing::kHz30; buffer_context.start_symbol = 0; - repo->add(buffer_context, buffer); + repo->add(buffer_context, buffer, std::nullopt, std::nullopt); results.uplane_results.params.slot = slot; results.uplane_results.params.symbol_id = 0; @@ -98,7 +98,7 @@ class data_flow_uplane_uplink_prach_impl_fixture : public ::testing::TestWithPar // Fill the contexts ul_cplane_context_repo_ptr->add(slot, eaxc, context); - prach_context_repo->add(buffer_context, buffer); + prach_context_repo->add(buffer_context, buffer, std::nullopt, std::nullopt); } data_flow_uplane_uplink_prach_impl_config get_config() diff --git a/tests/unittests/ofh/receiver/ofh_uplane_prach_data_flow_notifier_test.cpp b/tests/unittests/ofh/receiver/ofh_uplane_prach_data_flow_notifier_test.cpp index e3d8fa9f70..05c89aa8d4 100644 --- a/tests/unittests/ofh/receiver/ofh_uplane_prach_data_flow_notifier_test.cpp +++ b/tests/unittests/ofh/receiver/ofh_uplane_prach_data_flow_notifier_test.cpp @@ -47,7 +47,7 @@ TEST(ofh_uplane_prach_data_flow_notifier, unwritten_buffer_does_not_notify) context.pusch_scs = srsran::subcarrier_spacing::kHz30; context.start_symbol = 0; - repo->add(context, buffer); + repo->add(context, buffer, std::nullopt, std::nullopt); sender.notify_prach(slot); ASSERT_FALSE(repo->get(slot).empty()); @@ -74,7 +74,7 @@ TEST(ofh_uplane_prach_data_flow_notifier, completed_long_prach_buffer_triggers_n context.start_symbol = 0; static_vector samples(839); - repo->add(context, buffer); + repo->add(context, buffer, std::nullopt, std::nullopt); ASSERT_FALSE(repo->get(slot).empty()); // Fill the grid. @@ -108,7 +108,7 @@ TEST(ofh_uplane_prach_data_flow_notifier, completed_short_prach_buffer_triggers_ context.start_symbol = 0; static_vector samples(139); - repo->add(context, buffer); + repo->add(context, buffer, std::nullopt, std::nullopt); ASSERT_FALSE(repo->get(slot).empty()); // Fill the grid. diff --git a/tests/unittests/ofh/receiver/ofh_uplane_prach_symbol_data_flow_writer_test.cpp b/tests/unittests/ofh/receiver/ofh_uplane_prach_symbol_data_flow_writer_test.cpp index 2296ea59f0..6b3d6d2758 100644 --- a/tests/unittests/ofh/receiver/ofh_uplane_prach_symbol_data_flow_writer_test.cpp +++ b/tests/unittests/ofh/receiver/ofh_uplane_prach_symbol_data_flow_writer_test.cpp @@ -48,7 +48,7 @@ class ofh_uplane_prach_symbol_data_flow_writer_fixture : public ::testing::TestW buffer_context.pusch_scs = srsran::subcarrier_spacing::kHz30; buffer_context.start_symbol = 0; - repo->add(buffer_context, buffer); + repo->add(buffer_context, buffer, std::nullopt, std::nullopt); results.params.slot = slot; results.params.symbol_id = 0; @@ -107,7 +107,7 @@ TEST_P(ofh_uplane_prach_symbol_data_flow_writer_fixture, decoded_prbs_before_pra buffer_context.pusch_scs = subcarrier_spacing::kHz60; buffer_context.format = prach_format_type::zero; unsigned nof_symbols_ = 1U; - repo->add(buffer_context, buffer); + repo->add(buffer_context, buffer, std::nullopt, std::nullopt); auto& section = results.sections.back(); section.nof_prbs = 11; @@ -129,7 +129,7 @@ TEST_P(ofh_uplane_prach_symbol_data_flow_writer_fixture, prbs_at_the_beginning_w buffer_context.pusch_scs = subcarrier_spacing::kHz60; buffer_context.format = prach_format_type::zero; unsigned nof_symbols_ = 1U; - repo->add(buffer_context, buffer); + repo->add(buffer_context, buffer, std::nullopt, std::nullopt); auto& section = results.sections.back(); section.nof_prbs = 1; @@ -153,7 +153,7 @@ TEST_P(ofh_uplane_prach_symbol_data_flow_writer_fixture, 60kHz_long_format_one_m preamble_length = 839; nof_symbols = 1U; buffer = prach_buffer_dummy(nof_symbols, is_long_preamble(buffer_context.format)); - repo->add(buffer_context, buffer); + repo->add(buffer_context, buffer, std::nullopt, std::nullopt); auto& section = results.sections.back(); section.nof_prbs = 81; @@ -176,7 +176,7 @@ TEST_P(ofh_uplane_prach_symbol_data_flow_writer_fixture, 60kHz_long_format_one_m preamble_length = 839; nof_symbols = 1U; buffer = prach_buffer_dummy(nof_symbols, is_long_preamble(buffer_context.format)); - repo->add(buffer_context, buffer); + repo->add(buffer_context, buffer, std::nullopt, std::nullopt); auto& section = results.sections.back(); section.nof_prbs = 96; @@ -218,7 +218,7 @@ TEST_P(ofh_uplane_prach_symbol_data_flow_writer_fixture, decoded_prbs_with_start buffer = prach_buffer_dummy(nof_symbols, is_long_preamble(buffer_context.format)); // Offset the start symbol. buffer_context.start_symbol = 2; - repo->add(buffer_context, buffer); + repo->add(buffer_context, buffer, std::nullopt, std::nullopt); auto& section = results.sections.back(); section.nof_prbs = (format == prach_format_type::zero) ? 72 : 12; From dc588dd157ed66c79ceedc8417cbff2dbccadb1b Mon Sep 17 00:00:00 2001 From: asaezper Date: Fri, 7 Jun 2024 17:43:36 +0200 Subject: [PATCH 24/49] ci: force x86-64-v3 in package builds to increase scalability --- .gitlab/ci/build.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab/ci/build.yml b/.gitlab/ci/build.yml index 0be9a2430b..44bdb406bb 100644 --- a/.gitlab/ci/build.yml +++ b/.gitlab/ci/build.yml @@ -18,7 +18,6 @@ variables: AMD64_AVX512_TAG: amd64-avx2-avx512 ARM64_TAG: arm64 - AMD64_LIB_BUILDER_TAG: on-prem-amd64 AMD64_VIAVI_BUILDER_TAG: on-prem-amd64-avx2-avx512 INFRASTRUCTURE_TAG: @@ -921,7 +920,8 @@ package: KUBERNETES_MEMORY_LIMIT: 12Gi DEB_BUILD_OPTIONS: parallel=${KUBERNETES_CPU_LIMIT} MAKEFLAGS: -j${KUBERNETES_CPU_LIMIT} - tags: ["${AMD64_LIB_BUILDER_TAG}"] + extraopts: -DAUTO_DETECT_ISA=False -DCMAKE_CXX_FLAGS="-march=x86-64-v3" + tags: ["${AMD64_AVX2_TAG}"] parallel: matrix: - OS_VERSION: "20.04" @@ -938,7 +938,7 @@ install-package: variables: PROJECT_NAME: srsran-project RELEASE_VERSION: "99.9" - tags: ["${AMD64_LIB_BUILDER_TAG}"] + tags: ["${AMD64_AVX2_TAG}"] script: - gnb --version parallel: @@ -1916,7 +1916,7 @@ basic package: variables: <<: *package_variables OS_VERSION: "24.04" - tags: ["${AMD64_LIB_BUILDER_TAG}"] + tags: ["${AMD64_AVX2_TAG}"] needs: [] basic tsan: From 468b0da3fdf57797120054a177562da5266e4a77 Mon Sep 17 00:00:00 2001 From: asaezper Date: Fri, 7 Jun 2024 18:31:50 +0200 Subject: [PATCH 25/49] ci: build basic avx512 using x86-64-v4 flag --- .gitlab/ci/build.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.gitlab/ci/build.yml b/.gitlab/ci/build.yml index 44bdb406bb..4e5a450bb3 100644 --- a/.gitlab/ci/build.yml +++ b/.gitlab/ci/build.yml @@ -17,9 +17,7 @@ variables: AMD64_AVX2_TAG: amd64-avx2 AMD64_AVX512_TAG: amd64-avx2-avx512 ARM64_TAG: arm64 - - AMD64_VIAVI_BUILDER_TAG: on-prem-amd64-avx2-avx512 - + INFRASTRUCTURE_TAG: description: Computer architecture and supported instruction sets options: @@ -1977,8 +1975,8 @@ basic avx512 dpdk: ENABLE_ZEROMQ: "False" ENABLE_DPDK: "True" DPDK_VERSION: "23.11" - AUTO_DETECT_ISA: "True" - ENABLE_AVX512: "True" + AUTO_DETECT_ISA: "False" + BUILD_ARGS: -DCMAKE_CXX_FLAGS="-march=x86-64-v4" FORCE_DEBUG_INFO: "True" ASSERT_LEVEL: MINIMAL SAVE_ARTIFACTS: "True" @@ -1986,7 +1984,7 @@ basic avx512 dpdk: KUBERNETES_CPU_LIMIT: 14 KUBERNETES_MEMORY_REQUEST: 20Gi KUBERNETES_MEMORY_LIMIT: 20Gi - tags: ["${AMD64_VIAVI_BUILDER_TAG}"] + tags: ["${AMD64_AVX512_TAG}"] artifacts: <<: *build_artifacts expire_in: 3 day From e6a29835301b51c7edb5b0e8767bc69a31b5fe94 Mon Sep 17 00:00:00 2001 From: asaezper Date: Mon, 10 Jun 2024 15:27:45 +0200 Subject: [PATCH 26/49] ci,libs: build alternative uhd and dpdk with specific flags --- .gitlab/ci/build.yml | 25 +-- .gitlab/ci/builders.yml | 189 ++++++++++++++------ .gitlab/ci/e2e/.env | 4 +- docker/scripts/builder.sh | 9 + docker/scripts/install_dpdk_dependencies.sh | 4 +- 5 files changed, 162 insertions(+), 69 deletions(-) diff --git a/.gitlab/ci/build.yml b/.gitlab/ci/build.yml index 4e5a450bb3..bf10a527d3 100644 --- a/.gitlab/ci/build.yml +++ b/.gitlab/ci/build.yml @@ -417,9 +417,9 @@ variables: ENABLE_DPDK: "True" ASSERT_LEVEL: PARANOID AUTO_DETECT_ISA: "False" - DPDK_VERSION: "23.11" + DPDK_VERSION: "23.11_avx2" BUILD_ARGS: -DCMAKE_CXX_FLAGS="-march=x86-64-v3" - tags: ["${AMD64_TAG}"] + tags: ["${AMD64_AVX2_TAG}"] .smoke valgrind: extends: .build_and_unit @@ -1144,18 +1144,18 @@ ubuntu dpdk: ASSERT_LEVEL: PARANOID AUTO_DETECT_ISA: "True" ENABLE_AVX512: "False" - tags: ["${AMD64_TAG}"] + tags: ["${AMD64_AVX2_TAG}"] parallel: matrix: - OS: ubuntu-24.04 COMPILER: [gcc, clang] - DPDK_VERSION: ["23.11"] + DPDK_VERSION: ["23.11_avx2"] - OS: ubuntu-23.10 COMPILER: [gcc, clang] - DPDK_VERSION: ["22.11.3", "23.11"] + DPDK_VERSION: ["22.11.3_avx2", "23.11_avx2"] - OS: ubuntu-22.04 COMPILER: [gcc, clang] - DPDK_VERSION: ["22.11.3", "23.11"] + DPDK_VERSION: ["22.11.3_avx2", "23.11_avx2"] - OS: ubuntu-20.04 COMPILER: [gcc, clang] DPDK_VERSION: ["22.11.3", "23.11"] @@ -1397,7 +1397,7 @@ build uhd alt: TEST_MODE: none ASSERT_LEVEL: PARANOID AUTO_DETECT_ISA: "False" - tags: ["${AMD64_TAG}"] + tags: ["${AMD64_AVX2_TAG}"] parallel: matrix: - OS: ubuntu-24.04 @@ -1723,7 +1723,6 @@ rhel-8 arm neon: ENABLE_ZEROMQ: "False" ENABLE_DPDK: "True" COMPILER: [gcc, clang] - DPDK_VERSION: ["22.11.3", "23.11"] ubuntu-20.04 amd64 avx2 dpdk: extends: .build_and_unit @@ -1737,6 +1736,7 @@ ubuntu-20.04 amd64 avx2 dpdk: matrix: - OS: ubuntu-20.04 <<: *basic_combinations_dpdk + DPDK_VERSION: ["22.11.3", "23.11"] AUTO_DETECT_ISA: "True" ENABLE_AVX512: "False" @@ -1752,6 +1752,7 @@ ubuntu-20.04 amd64 avx512 dpdk: matrix: - OS: ubuntu-20.04 <<: *basic_combinations_dpdk + DPDK_VERSION: ["22.11.3", "23.11"] AUTO_DETECT_ISA: "True" ENABLE_AVX512: "True" @@ -1767,6 +1768,7 @@ ubuntu-22.04 amd64 avx2 dpdk: matrix: - OS: ubuntu-22.04 <<: *basic_combinations_dpdk + DPDK_VERSION: ["22.11.3_avx2", "23.11_avx2"] AUTO_DETECT_ISA: "True" ENABLE_AVX512: "False" @@ -1782,6 +1784,7 @@ ubuntu-22.04 amd64 avx512 dpdk: matrix: - OS: ubuntu-22.04 <<: *basic_combinations_dpdk + DPDK_VERSION: ["22.11.3_avx512", "23.11_avx512"] AUTO_DETECT_ISA: "True" ENABLE_AVX512: "True" @@ -1797,6 +1800,7 @@ ubuntu-23.10 amd64 avx2 dpdk: matrix: - OS: ubuntu-23.10 <<: *basic_combinations_dpdk + DPDK_VERSION: ["22.11.3_avx2", "23.11_avx2"] AUTO_DETECT_ISA: "True" ENABLE_AVX512: "False" @@ -1812,6 +1816,7 @@ ubuntu-23.10 amd64 avx512 dpdk: matrix: - OS: ubuntu-23.10 <<: *basic_combinations_dpdk + DPDK_VERSION: ["22.11.3_avx512", "23.11_avx512"] AUTO_DETECT_ISA: "True" ENABLE_AVX512: "True" @@ -1827,7 +1832,7 @@ ubuntu-24.04 amd64 avx2 dpdk: matrix: - OS: ubuntu-24.04 <<: *basic_combinations_dpdk - DPDK_VERSION: "23.11" + DPDK_VERSION: "23.11_avx2" AUTO_DETECT_ISA: "True" ENABLE_AVX512: "False" @@ -1843,7 +1848,7 @@ ubuntu-24.04 amd64 avx512 dpdk: matrix: - OS: ubuntu-24.04 <<: *basic_combinations_dpdk - DPDK_VERSION: "23.11" + DPDK_VERSION: "23.11_avx512" AUTO_DETECT_ISA: "True" ENABLE_AVX512: "True" diff --git a/.gitlab/ci/builders.yml b/.gitlab/ci/builders.yml index 5cbbc4f32c..6c5519e2f1 100644 --- a/.gitlab/ci/builders.yml +++ b/.gitlab/ci/builders.yml @@ -46,17 +46,16 @@ tox python in builder: ################################################################################ # UHD builder ################################################################################ -ubuntu-uhd-builder: +.ubuntu-uhd-builder: stage: dependencies image: ubuntu:${os_version} rules: - if: $ON_MR - tags: - - on-prem-${arch_name} variables: os_version: "" arch_name: "" uhd_version: "" + target_arch: "" GIT_STRATEGY: none KUBERNETES_CPU_REQUEST: 4 KUBERNETES_CPU_LIMIT: 4 @@ -84,7 +83,7 @@ ubuntu-uhd-builder: - | build_uhd() { docker/scripts/install_uhd_dependencies.sh build - docker/scripts/build_uhd.sh ${uhd_version} native ${KUBERNETES_CPU_REQUEST} + docker/scripts/build_uhd.sh ${uhd_version} ${target_arch} ${KUBERNETES_CPU_REQUEST} mkdir -p ${CI_PROJECT_DIR}/.gitlab/ci/builders/uhd cp -r /opt/uhd/${uhd_version} ${CI_PROJECT_DIR}/.gitlab/ci/builders/uhd/${uhd_version} } @@ -111,33 +110,63 @@ ubuntu-uhd-builder: expire_in: 8 hours parallel: matrix: - - os_version: "20.04" - arch_name: [amd64, arm64] + - &uhd_2004_matrix + os_version: "20.04" uhd_version: ["4.6.0.0", "4.4.0.0", "4.3.0.0", "4.1.0.5"] # uhd 3.15.0.0 default - - os_version: "22.04" - arch_name: [amd64, arm64] + - &uhd_2204_matrix + os_version: "22.04" uhd_version: ["4.6.0.0", "4.4.0.0", "4.3.0.0"] # "4.1.0.5" default - - os_version: "23.10" - arch_name: [amd64, arm64] + - &uhd_2310_matrix + os_version: "23.10" uhd_version: ["4.6.0.0"] # "4.4.0.0" default # - os_version: "24.04" - # arch_name: [amd64, arm64] # uhd_version: [] # "4.6.0.0" default +ubuntu-uhd-builder no isa: + extends: .ubuntu-uhd-builder + tags: + - amd64 + variables: + arch_name: amd64 + target_arch: x86-64 # x86-64-vX not supported in compilers' versions + parallel: + matrix: + - *uhd_2004_matrix + +ubuntu-uhd-builder avx2: + extends: .ubuntu-uhd-builder + tags: + - amd64-avx2 + variables: + arch_name: amd64 + target_arch: x86-64-v3 + parallel: + matrix: + - *uhd_2204_matrix + - *uhd_2310_matrix + +ubuntu-uhd-builder arm64: + extends: .ubuntu-uhd-builder + tags: + - arm64 + variables: + arch_name: arm64 + target_arch: armv8.2-a+crypto+fp16+dotprod + ################################################################################ # DPDK builder ################################################################################ -ubuntu-dpdk-builder: +.ubuntu-dpdk-builder: stage: dependencies image: ubuntu:${os_version} rules: - if: $ON_MR - tags: - - on-prem-${arch_name} variables: os_version: "" arch_name: "" dpdk_version: "" + extra_arch_name: "" + target_arch: "" GIT_STRATEGY: none KUBERNETES_CPU_REQUEST: 6 KUBERNETES_CPU_LIMIT: 6 @@ -152,7 +181,7 @@ ubuntu-dpdk-builder: DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y --no-install-recommends curl apt-transport-https ca-certificates xz-utils cd ${CI_PROJECT_DIR} - http_code=$(curl -w "%{http_code}" --header "PRIVATE-TOKEN: $CODEBOT_TOKEN" "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/dpdk/ubuntu-${os_version}-${arch_name}-${dpdk_version}/dpdk.tar.gz" -o output.tar.gz) + http_code=$(curl -w "%{http_code}" --header "PRIVATE-TOKEN: $CODEBOT_TOKEN" "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/dpdk/ubuntu-${os_version}-${arch_name}-${dpdk_version}${extra_arch_name}/dpdk.tar.gz" -o output.tar.gz) if [[ $http_code == "200" ]]; then tar -xf output.tar.gz return 0 @@ -164,18 +193,20 @@ ubuntu-dpdk-builder: # Install dependencies and compile - | build_dpdk() { + unset PIP_EXTRA_INDEX_URL + unset PIP_INDEX_URL docker/scripts/install_dpdk_dependencies.sh build - docker/scripts/build_dpdk.sh ${dpdk_version} native ${KUBERNETES_CPU_REQUEST} + docker/scripts/build_dpdk.sh ${dpdk_version} ${target_arch} ${KUBERNETES_CPU_REQUEST} mkdir -p ${CI_PROJECT_DIR}/.gitlab/ci/builders/dpdk - cp -r /opt/dpdk/${dpdk_version} ${CI_PROJECT_DIR}/.gitlab/ci/builders/dpdk/${dpdk_version} + cp -r /opt/dpdk/${dpdk_version} ${CI_PROJECT_DIR}/.gitlab/ci/builders/dpdk/${dpdk_version}${extra_arch_name} } # Publish compiled version to the registry - | publish_to_registry() { cd ${CI_PROJECT_DIR} - tar -czf dpdk.tar.gz .gitlab/ci/builders/dpdk/${dpdk_version} - curl --fail --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file dpdk.tar.gz "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/dpdk/ubuntu-${os_version}-${arch_name}-${dpdk_version}/dpdk.tar.gz" + tar -czf dpdk.tar.gz .gitlab/ci/builders/dpdk/${dpdk_version}${extra_arch_name} + curl --fail --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file dpdk.tar.gz "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/dpdk/ubuntu-${os_version}-${arch_name}-${dpdk_version}${extra_arch_name}/dpdk.tar.gz" } script: - | @@ -192,19 +223,66 @@ ubuntu-dpdk-builder: expire_in: 8 hours parallel: matrix: - - os_version: "20.04" - arch_name: [amd64, arm64] + - &dpdk_2004_matrix + os_version: "20.04" dpdk_version: ["21.08", "22.11.3", "23.11"] - - os_version: "22.04" - arch_name: [amd64, arm64] + - &dpdk_2204_matrix + os_version: "22.04" dpdk_version: ["21.08", "22.11.3", "23.11"] - - os_version: "23.10" - arch_name: [amd64, arm64] + - &dpdk_2310_matrix + os_version: "23.10" dpdk_version: ["22.11.3", "23.11"] - - os_version: "24.04" - arch_name: [amd64, arm64] + - &dpdk_2404_matrix + os_version: "24.04" dpdk_version: ["23.11"] +ubuntu-dpdk-builder no isa: + extends: .ubuntu-dpdk-builder + tags: + - amd64-avx2 + variables: + arch_name: amd64 + target_arch: x86-64 + parallel: + matrix: + - *dpdk_2004_matrix + +ubuntu-dpdk-builder avx2: + extends: .ubuntu-dpdk-builder + tags: + - amd64-avx2 + variables: + arch_name: amd64 + extra_arch_name: _avx2 + target_arch: x86-64-v3 + parallel: + matrix: + - *dpdk_2204_matrix + - *dpdk_2310_matrix + - *dpdk_2404_matrix + +ubuntu-dpdk-builder avx512: + extends: .ubuntu-dpdk-builder + tags: + - amd64-avx2-avx512 + variables: + arch_name: amd64 + extra_arch_name: _avx512 + target_arch: x86-64-v4 + parallel: + matrix: + - *dpdk_2204_matrix + - *dpdk_2310_matrix + - *dpdk_2404_matrix + +ubuntu-dpdk-builder arm64: + extends: .ubuntu-dpdk-builder + tags: + - arm64 + variables: + arch_name: arm64 + target_arch: armv8.2-a+crypto+fp16+dotprod + ################################################################################ # Common ################################################################################ @@ -323,17 +401,15 @@ image-build-publish [ubuntu, 20.04, amd64]: PLATFORM: amd64 needs: - builder version - - job: ubuntu-uhd-builder + - job: ubuntu-uhd-builder no isa parallel: matrix: - os_version: "20.04" - arch_name: amd64 uhd_version: ["4.6.0.0", "4.4.0.0", "4.3.0.0", "4.1.0.5"] - - job: ubuntu-dpdk-builder + - job: ubuntu-dpdk-builder no isa parallel: matrix: - os_version: "20.04" - arch_name: amd64 dpdk_version: ["21.08", "22.11.3", "23.11"] alternative-tag [ubuntu, 20.04, amd64]: @@ -357,17 +433,15 @@ image-build-publish [ubuntu, 20.04, arm64]: PLATFORM: arm64 needs: - builder version - - job: ubuntu-uhd-builder + - job: ubuntu-uhd-builder arm64 parallel: matrix: - os_version: "20.04" - arch_name: arm64 uhd_version: ["4.6.0.0", "4.4.0.0", "4.3.0.0", "4.1.0.5"] - - job: ubuntu-dpdk-builder + - job: ubuntu-dpdk-builder arm64 parallel: matrix: - os_version: "20.04" - arch_name: arm64 dpdk_version: ["21.08", "22.11.3", "23.11"] alternative-tag [ubuntu, 20.04, arm64]: @@ -409,17 +483,20 @@ image-build-publish [ubuntu, 22.04, amd64]: PLATFORM: amd64 needs: - builder version - - job: ubuntu-uhd-builder + - job: ubuntu-uhd-builder avx2 parallel: matrix: - os_version: "22.04" - arch_name: amd64 uhd_version: ["4.6.0.0", "4.4.0.0", "4.3.0.0"] - - job: ubuntu-dpdk-builder + - job: ubuntu-dpdk-builder avx2 + parallel: + matrix: + - os_version: "22.04" + dpdk_version: ["21.08", "22.11.3", "23.11"] + - job: ubuntu-dpdk-builder avx512 parallel: matrix: - os_version: "22.04" - arch_name: amd64 dpdk_version: ["21.08", "22.11.3", "23.11"] alternative-tag [ubuntu, 22.04, amd64]: @@ -443,17 +520,15 @@ image-build-publish [ubuntu, 22.04, arm64]: PLATFORM: arm64 needs: - builder version - - job: ubuntu-uhd-builder + - job: ubuntu-uhd-builder arm64 parallel: matrix: - os_version: "22.04" - arch_name: arm64 uhd_version: ["4.6.0.0", "4.4.0.0", "4.3.0.0"] - - job: ubuntu-dpdk-builder + - job: ubuntu-dpdk-builder arm64 parallel: matrix: - os_version: "22.04" - arch_name: arm64 dpdk_version: ["21.08", "22.11.3", "23.11"] alternative-tag [ubuntu, 22.04, arm64]: @@ -495,17 +570,20 @@ image-build-publish [ubuntu, 23.10, amd64]: PLATFORM: amd64 needs: - builder version - - job: ubuntu-uhd-builder + - job: ubuntu-uhd-builder avx2 parallel: matrix: - os_version: "23.10" - arch_name: amd64 uhd_version: ["4.6.0.0"] # "4.4.0.0" default - - job: ubuntu-dpdk-builder + - job: ubuntu-dpdk-builder avx2 + parallel: + matrix: + - os_version: "23.10" + dpdk_version: ["22.11.3", "23.11"] + - job: ubuntu-dpdk-builder avx512 parallel: matrix: - os_version: "23.10" - arch_name: amd64 dpdk_version: ["22.11.3", "23.11"] alternative-tag [ubuntu, 23.10, amd64]: @@ -529,17 +607,15 @@ image-build-publish [ubuntu, 23.10, arm64]: PLATFORM: arm64 needs: - builder version - - job: ubuntu-uhd-builder + - job: ubuntu-uhd-builder arm64 parallel: matrix: - os_version: "23.10" - arch_name: arm64 uhd_version: ["4.6.0.0"] # "4.4.0.0" default - - job: ubuntu-dpdk-builder + - job: ubuntu-dpdk-builder arm64 parallel: matrix: - os_version: "23.10" - arch_name: arm64 dpdk_version: ["22.11.3", "23.11"] alternative-tag [ubuntu, 23.10, arm64]: @@ -581,11 +657,15 @@ image-build-publish [ubuntu, 24.04, amd64]: PLATFORM: amd64 needs: - builder version - - job: ubuntu-dpdk-builder + - job: ubuntu-dpdk-builder avx2 + parallel: + matrix: + - os_version: "24.04" + dpdk_version: ["23.11"] + - job: ubuntu-dpdk-builder avx512 parallel: matrix: - os_version: "24.04" - arch_name: amd64 dpdk_version: ["23.11"] alternative-tag [ubuntu, 24.04, amd64]: @@ -609,11 +689,10 @@ image-build-publish [ubuntu, 24.04, arm64]: PLATFORM: arm64 needs: - builder version - - job: ubuntu-dpdk-builder + - job: ubuntu-dpdk-builder arm64 parallel: matrix: - os_version: "24.04" - arch_name: arm64 dpdk_version: ["23.11"] alternative-tag [ubuntu, 24.04, arm64]: diff --git a/.gitlab/ci/e2e/.env b/.gitlab/ci/e2e/.env index f69dc75531..41ff6d6aee 100644 --- a/.gitlab/ci/e2e/.env +++ b/.gitlab/ci/e2e/.env @@ -1,12 +1,12 @@ SRSGNB_REGISTRY_URI=registry.gitlab.com/softwareradiosystems/srsgnb RETINA_REGISTRY_PREFIX=registry.gitlab.com/softwareradiosystems/ci/retina -RETINA_VERSION=0.49.11 +RETINA_VERSION=0.49.12 UBUNTU_VERSION=24.04 AMARISOFT_VERSION=2023-09-08 SRSUE_VERSION=23.11 OPEN5GS_VERSION=2.7.0 PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin METRICS_SERVER_VERSION=1.7.2 -DPDK_VERSION=23.11 +DPDK_VERSION=23.11_avx512 ZMQ_HOSTLABEL_0=kubernetes.io/hostname=k8s-worker-vm2 ZMQ_HOSTLABEL_1=kubernetes.io/hostname=k8s-worker-vm2 diff --git a/docker/scripts/builder.sh b/docker/scripts/builder.sh index 0626adabce..c95210e3a3 100755 --- a/docker/scripts/builder.sh +++ b/docker/scripts/builder.sh @@ -166,6 +166,15 @@ fi # Setup DPDK if [[ -n "$DPDK_VERSION" ]]; then [ ! -d "/opt/dpdk/$DPDK_VERSION" ] && echo "DPDK version not found" && exit 1 + # Create alias for _avx2 / _avx512 versions + if [[ $DPDK_VERSION == *_* ]]; then + DPDK_VERSION_BASE=${DPDK_VERSION%_*} + if [ -e "/opt/dpdk/$DPDK_VERSION_BASE" ]; then + rm -Rf "/opt/dpdk/$DPDK_VERSION_BASE" + fi + ln -s "/opt/dpdk/$DPDK_VERSION" "/opt/dpdk/$DPDK_VERSION_BASE" + DPDK_VERSION=$DPDK_VERSION_BASE + fi export DPDK_DIR="/opt/dpdk/$DPDK_VERSION" echo "DPDK_DIR set to $DPDK_DIR" fi diff --git a/docker/scripts/install_dpdk_dependencies.sh b/docker/scripts/install_dpdk_dependencies.sh index 6e7760bc14..058da46211 100755 --- a/docker/scripts/install_dpdk_dependencies.sh +++ b/docker/scripts/install_dpdk_dependencies.sh @@ -29,12 +29,12 @@ main() { DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y --no-install-recommends \ curl apt-transport-https ca-certificates xz-utils \ python3-pip ninja-build g++ git build-essential pkg-config libnuma-dev libfdt-dev pciutils - pip3 install --break-system-packages meson pyelftools + pip3 install meson pyelftools || pip3 install --break-system-packages meson pyelftools fi if [[ "$mode" == "all" || "$mode" == "run" ]]; then DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y --no-install-recommends \ python3-pip libnuma-dev pciutils libfdt-dev - pip3 install --break-system-packages pyelftools + pip3 install pyelftools || pip3 install --break-system-packages pyelftools fi else echo "OS $ID not supported" From bcfcef454983004a9ff3b88dc5647a344338341e Mon Sep 17 00:00:00 2001 From: asaezper Date: Mon, 17 Jun 2024 19:07:39 +0200 Subject: [PATCH 27/49] ci: update run viavi pipeline script --- .gitlab/run_viavi_pipeline.py | 130 ++++++++++++++-------------------- 1 file changed, 52 insertions(+), 78 deletions(-) diff --git a/.gitlab/run_viavi_pipeline.py b/.gitlab/run_viavi_pipeline.py index 0745b36ec0..ff7ed6b21e 100644 --- a/.gitlab/run_viavi_pipeline.py +++ b/.gitlab/run_viavi_pipeline.py @@ -1,21 +1,43 @@ import argparse + try: import gitlab except ImportError: print("Install Gitlab Python library: https://pypi.org/project/python-gitlab/") exit(1) + def main(): """ Entrypoint runner. """ parser = argparse.ArgumentParser(description="Viavi CI runner.") - parser.add_argument("--token", required=True, help="Gitlab private token: https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token") + parser.add_argument( + "--token", + required=True, + help="Gitlab private token: https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token", + ) parser.add_argument("--branch", required=True, help="srsgnb git branch.") - parser.add_argument("--campaign-path", required=True, help="Campaign path. E.g: C:\\ci\\CI 4x4 ORAN-FH.xml") - parser.add_argument("--testname", required=True, help='Testname in the campaign. E.g: "32UE static DL + UL UDP - Dell"') - parser.add_argument("--timeout", required=True, help="Timeout in seconds for the test") - parser.add_argument("--loglevel", default="warning", required=False, choices=["info", "warning", "error"], help="srsgnb loglevel") + parser.add_argument( + "--campaign-path", + required=True, + help="Campaign path. E.g: C:\\ci\\CI 4x4 ORAN-FH.xml", + ) + parser.add_argument( + "--testname", + required=True, + help='Testname in the campaign. E.g: "32UE static DL + UL UDP - Dell"', + ) + parser.add_argument( + "--timeout", required=True, help="Timeout in seconds for the test" + ) + parser.add_argument( + "--loglevel", + default="warning", + required=False, + choices=["info", "warning", "error"], + help="srsgnb loglevel", + ) args = parser.parse_args() private_token = args.token @@ -25,86 +47,38 @@ def main(): timeout = args.timeout loglevel = args.loglevel - INFRASTRUCTURE_TAG = "on-prem-amd64-avx2-avx512" + INFRASTRUCTURE_TAG = "amd64-avx2-avx512" OS_NAME = "ubuntu-24.04" COMPILER = "gcc" TESTMODE = "none" - MAKE_ARGS = "-j7" - BUILD_ARGS = "-DCMAKE_BUILD_TYPE=Release -DAUTO_DETECT_ISA=True -DENABLE_UHD=False -DENABLE_DPDK=True -DENABLE_ZEROMQ=False -DENABLE_AVX512=True" - DPDK_VERSION = "23.11" + MAKE_ARGS = "-j6" + BUILD_ARGS = '-DCMAKE_BUILD_TYPE=Release -DFORCE_DEBUG_INFO=True -DENABLE_UHD=False -DENABLE_DPDK=True -DENABLE_ZEROMQ=False -DAUTO_DETECT_ISA=False -DCMAKE_CXX_FLAGS="-march=x86-64-v4"' + DPDK_VERSION = "23.11_avx512" TESTBED = "viavi" MARKERS = "viavi_manual" - PYARGS = f"--viavi-manual-campaign-filename \"{campaign_name}\" --viavi-manual-test-name \"{test_name}\" --viavi-manual-test-timeout {timeout}" - RETINA_ARGS = "gnb.all.pcap=False gnb.all.enable_metrics=False" + PYARGS = f'--viavi-manual-campaign-filename "{campaign_name}" --viavi-manual-test-name "{test_name}" --viavi-manual-test-timeout {timeout} --retina-pod-timeout 900' + RETINA_ARGS = "gnb.all.pcap=True gnb.all.rlc_enable=True gnb.all.rlc_rb_type=srb" variables = [ - { - 'key': 'INFRASTRUCTURE_TAG', - 'value': INFRASTRUCTURE_TAG - }, - { - 'key': 'OS', - 'value': OS_NAME - }, - { - 'key': 'COMPILER', - 'value': COMPILER - }, - { - 'key': 'TEST_MODE', - 'value': TESTMODE - }, - { - 'key': 'BUILD_ARGS', - 'value': BUILD_ARGS - }, - { - 'key': 'MAKE_ARGS', - 'value': MAKE_ARGS - }, - { - 'key': 'UHD_VERSION', - 'value': '' - }, - { - 'key': 'DPDK_VERSION', - 'value': DPDK_VERSION - }, - { - 'key': 'TESTBED', - 'value': TESTBED - }, - { - 'key': 'MARKERS', - 'value': MARKERS - }, - { - 'key': 'KEYWORDS', - 'value': '' - }, - { - 'key': 'PYTEST_ARGS', - 'value': PYARGS - }, - { - 'key': 'RETINA_ARGS', - 'value': RETINA_ARGS - }, - { - 'key': 'E2E_LOG_LEVEL', - 'value': loglevel - }, - { - 'key': 'GROUP', - 'value': 'viavi', - }, - { - 'key': "PIPELINE_DESCRIPTION", - 'value': "Viavi manual test" - } + {"key": "INFRASTRUCTURE_TAG", "value": INFRASTRUCTURE_TAG}, + {"key": "OS", "value": OS_NAME}, + {"key": "COMPILER", "value": COMPILER}, + {"key": "TEST_MODE", "value": TESTMODE}, + {"key": "BUILD_ARGS", "value": BUILD_ARGS}, + {"key": "MAKE_ARGS", "value": MAKE_ARGS}, + {"key": "UHD_VERSION", "value": ""}, + {"key": "DPDK_VERSION", "value": DPDK_VERSION}, + {"key": "TESTBED", "value": TESTBED}, + {"key": "MARKERS", "value": MARKERS}, + {"key": "KEYWORDS", "value": ""}, + {"key": "PYTEST_ARGS", "value": PYARGS}, + {"key": "RETINA_ARGS", "value": RETINA_ARGS}, + {"key": "E2E_LOG_LEVEL", "value": loglevel}, + {"key": "GROUP", "value": "viavi"}, + {"key": "PIPELINE_DESCRIPTION", "value": "Viavi manual test"}, ] print(f"Creating Viavi pipeline for branch {branch}...") @@ -112,9 +86,9 @@ def main(): print(f" - DPDK_VERSION {DPDK_VERSION}") print(f" - OS {OS_NAME}") - gl = gitlab.Gitlab('https://gitlab.com', private_token=private_token) - project = gl.projects.get('softwareradiosystems/srsgnb') - pipeline = project.pipelines.create({'ref': branch, 'variables': variables}) + gl = gitlab.Gitlab("https://gitlab.com", private_token=private_token) + project = gl.projects.get("softwareradiosystems/srsgnb") + pipeline = project.pipelines.create({"ref": branch, "variables": variables}) pipeline_url = pipeline.web_url From baf394f9b6e4ea480b5dceabc7a9dd561f45c8d1 Mon Sep 17 00:00:00 2001 From: qarlosalberto Date: Mon, 17 Jun 2024 22:01:46 +0200 Subject: [PATCH 28/49] ci: resume criteria in viavi test --- tests/e2e/tests/steps/kpis.py | 7 +- tests/e2e/tests/steps/stub.py | 2 +- tests/e2e/tests/viavi.py | 124 +++++++++++++++++++++++++++++----- 3 files changed, 113 insertions(+), 20 deletions(-) diff --git a/tests/e2e/tests/steps/kpis.py b/tests/e2e/tests/steps/kpis.py index b66e2a7380..95c31f5b1c 100644 --- a/tests/e2e/tests/steps/kpis.py +++ b/tests/e2e/tests/steps/kpis.py @@ -81,8 +81,11 @@ def get_kpis( dl_nof_ko_aggregate += ue_info.dl_nof_ko kpis.nof_ko_aggregate += ue_info.ul_nof_ko + ue_info.dl_nof_ko - kpis.ul_bler_aggregate = ul_nof_ko_aggregate / (ul_nof_ok_aggregate + ul_nof_ko_aggregate) - kpis.dl_bler_aggregate = dl_nof_ko_aggregate / (dl_nof_ok_aggregate + dl_nof_ko_aggregate) + tota_ul_ko_ok = ul_nof_ok_aggregate + ul_nof_ko_aggregate + total_dl_ko_ok = dl_nof_ok_aggregate + dl_nof_ko_aggregate + + kpis.ul_bler_aggregate = 0 if not tota_ul_ko_ok else ul_nof_ko_aggregate / tota_ul_ko_ok + kpis.dl_bler_aggregate = 0 if not total_dl_ko_ok else dl_nof_ko_aggregate / total_dl_ko_ok # UE for ue in ue_array: diff --git a/tests/e2e/tests/steps/stub.py b/tests/e2e/tests/steps/stub.py index 9c4721c17a..cf940c3285 100644 --- a/tests/e2e/tests/steps/stub.py +++ b/tests/e2e/tests/steps/stub.py @@ -714,7 +714,7 @@ def _get_metrics_msg(stub: RanStub, name: str, fail_if_kos: bool = False) -> str nof_kos = 0 for ue_info in metrics.ue_array: nof_kos = ue_info.dl_nof_ko + ue_info.ul_nof_ko - if nof_kos: + if nof_kos and fail_if_kos: return f"{name} has {nof_kos} KOs / retrxs" return "" diff --git a/tests/e2e/tests/viavi.py b/tests/e2e/tests/viavi.py index ecdb83ccdb..ab186f50d2 100644 --- a/tests/e2e/tests/viavi.py +++ b/tests/e2e/tests/viavi.py @@ -27,6 +27,8 @@ from retina.protocol.gnb_pb2 import GNBStartInfo from retina.protocol.gnb_pb2_grpc import GNBStub from retina.viavi.client import CampaignStatusEnum, Viavi +from rich.console import Console +from rich.table import Table from .steps.configuration import configure_metric_server_for_gnb from .steps.kpis import get_kpis, KPIs @@ -58,6 +60,19 @@ class _ViaviConfiguration: warning_as_errors: bool = True +# pylint: disable=too-many-instance-attributes +@dataclass +class _ViaviResult: + """ + Viavi result + """ + + criteria_name: str + expected: float + current: float + is_ok: bool + + def load_yaml_config(config_filename: str) -> List[_ViaviConfiguration]: """ Load yaml config @@ -117,6 +132,7 @@ def viavi_manual_test_timeout(request): @mark.viavi_manual # pylint: disable=too-many-arguments, too-many-locals def test_viavi_manual( + capsys: pytest.CaptureFixture[str], # Retina retina_manager: RetinaTestManager, retina_data: RetinaTestData, @@ -142,6 +158,7 @@ def test_viavi_manual( ) _test_viavi( + capsys=capsys, # Retina retina_manager=retina_manager, retina_data=retina_data, @@ -178,6 +195,7 @@ def test_viavi_manual( ) # pylint: disable=too-many-arguments, too-many-locals def test_viavi( + capsys: pytest.CaptureFixture[str], # Retina retina_manager: RetinaTestManager, retina_data: RetinaTestData, @@ -199,6 +217,7 @@ def test_viavi( Runs a test using Viavi """ _test_viavi( + capsys=capsys, # Retina retina_manager=retina_manager, retina_data=retina_data, @@ -232,6 +251,7 @@ def test_viavi( ) # pylint: disable=too-many-arguments, too-many-locals def test_viavi_debug( + capsys: pytest.CaptureFixture[str], # Retina retina_manager: RetinaTestManager, retina_data: RetinaTestData, @@ -253,6 +273,7 @@ def test_viavi_debug( Runs a test using Viavi """ _test_viavi( + capsys=capsys, # Retina retina_manager=retina_manager, retina_data=retina_data, @@ -274,6 +295,7 @@ def test_viavi_debug( # pylint: disable=too-many-arguments, too-many-locals def _test_viavi( + capsys: pytest.CaptureFixture[str], # Retina retina_manager: RetinaTestManager, retina_data: RetinaTestData, @@ -384,7 +406,7 @@ def _test_viavi( logging.info("Folder with Viavi report: %s", report_folder) logging.info("Downloading Viavi report") viavi.download_directory(report_folder, Path(test_log_folder).joinpath("viavi")) - check_metrics_criteria(test_declaration, gnb, viavi, metrics_summary, test_declaration.fail_if_kos) + check_metrics_criteria(test_declaration, gnb, viavi, metrics_summary, test_declaration.fail_if_kos, capsys) except HTTPError: logging.error("Viavi Reports could not be downloaded") @@ -398,6 +420,7 @@ def check_metrics_criteria( viavi: Viavi, metrics_summary: Optional[MetricsSummary], fail_if_kos: bool, + capsys: pytest.CaptureFixture[str], ): """ Check pass/fail criteria @@ -409,42 +432,109 @@ def check_metrics_criteria( viavi_failure_manager = viavi.get_test_failures() kpis: KPIs = get_kpis(gnb, viavi_failure_manager=viavi_failure_manager, metrics_summary=metrics_summary) - is_ok &= check_and_print_criteria( - "DL bitrate", kpis.dl_brate_aggregate, test_configuration.expected_dl_bitrate, operator.gt, False + criteria_result: List[_ViaviResult] = [] + criteria_dl_brate_aggregate = check_criteria( + kpis.dl_brate_aggregate, test_configuration.expected_dl_bitrate, operator.gt ) - is_ok &= check_and_print_criteria( - "UL bitrate", kpis.ul_brate_aggregate, test_configuration.expected_ul_bitrate, operator.gt, False + criteria_result.append( + _ViaviResult( + "DL bitrate", test_configuration.expected_dl_bitrate, kpis.dl_brate_aggregate, criteria_dl_brate_aggregate + ) ) - is_ok &= ( - check_and_print_criteria("Number of KOs and/or retrxs", kpis.nof_ko_aggregate, 0, operator.eq, not fail_if_kos) - or not fail_if_kos + + criteria_ul_brate_aggregate = check_criteria( + kpis.ul_brate_aggregate, test_configuration.expected_ul_bitrate, operator.gt + ) + criteria_result.append( + _ViaviResult( + "UL bitrate", test_configuration.expected_ul_bitrate, kpis.ul_brate_aggregate, criteria_ul_brate_aggregate + ) + ) + + criteria_nof_ko_aggregate = check_criteria(kpis.nof_ko_aggregate, 0, operator.eq) or not fail_if_kos + criteria_result.append( + _ViaviResult("Number of KOs and/or retrxs", 0, kpis.nof_ko_aggregate, criteria_nof_ko_aggregate) ) # Check procedure table viavi_failure_manager.print_failures(_OMIT_VIAVI_FAILURE_LIST) - is_ok &= viavi_failure_manager.get_number_of_failures(_OMIT_VIAVI_FAILURE_LIST) == 0 + criteria_procedure_table = viavi_failure_manager.get_number_of_failures(_OMIT_VIAVI_FAILURE_LIST) == 0 + criteria_result.append( + _ViaviResult( + "Procedure table", + 0, + viavi_failure_manager.get_number_of_failures(_OMIT_VIAVI_FAILURE_LIST), + criteria_procedure_table, + ) + ) + is_ok = ( + criteria_dl_brate_aggregate + and criteria_ul_brate_aggregate + and criteria_nof_ko_aggregate + and criteria_procedure_table + ) + + create_table(criteria_result, capsys) if not is_ok: - pytest.fail("Test didn't pass all the criteria") + criteria_errors_str = [] + for criteria in criteria_result: + if not criteria.is_ok: + criteria_errors_str.append(criteria.criteria_name) + pytest.fail("Test didn't pass the following criteria: " + ", ".join(criteria_errors_str)) + + +def create_table(results: List[_ViaviResult], capsys): + """ + Create a table with the results + """ + table = Table(title="Viavi Results") + + table.add_column("Criteria Name", justify="left", style="cyan", no_wrap=True) + table.add_column("Expected", justify="right", style="magenta") + table.add_column("Result", justify="right", style="magenta") + table.add_column("Pass", justify="center", style="magenta") + + for result in results: + row_style = "green" if result.is_ok else "red" + table.add_row( + result.criteria_name, + f"{get_str_number_criteria(result.expected)}", + f"{get_str_number_criteria(result.current)}", + "✅" if result.is_ok else "❌", + style=row_style, + ) + console = Console() + # Capture the table to print it in the console + with console.capture() as capture: + console.print(table) + output = "\n" + capture.get() -def check_and_print_criteria( - name: str, + # Disable temporarily the capsys to print the table + with capsys.disabled(): + logging.info(output) + + +def check_criteria( current: float, expected: float, operator_method: Callable[[float, float], bool], - force_log_info: bool = False, ) -> bool: """ - Check and print criteria + Check criteria """ is_ok = operator_method(current, expected) - (logging.info if is_ok or force_log_info else logging.error)( - f"{name} expected: {expected:.2e}, actual: {current:.2e}" - ) return is_ok +def get_str_number_criteria(number_criteria: float) -> str: + """ + Get string number criteria + """ + return f"{number_criteria:.2e}" if abs(number_criteria) > 1000 else str(number_criteria) + + def get_viavi_configuration_from_testname(campaign_filename: str, test_name: str, timeout: int) -> _ViaviConfiguration: """ Get Viavi configuration from dict From 041e762e98cdad8d2680f8f029f99a5693c033bd Mon Sep 17 00:00:00 2001 From: Andre Puschmann Date: Wed, 12 Jun 2024 13:53:17 +0200 Subject: [PATCH 29/49] configs: add baseline cu/du split configs DU will use USRP --- configs/cu.yml | 16 ++++++++++++++ configs/du_rf_b200_tdd_n78_20mhz.yml | 33 ++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) create mode 100644 configs/cu.yml create mode 100644 configs/du_rf_b200_tdd_n78_20mhz.yml diff --git a/configs/cu.yml b/configs/cu.yml new file mode 100644 index 0000000000..ae635529fb --- /dev/null +++ b/configs/cu.yml @@ -0,0 +1,16 @@ +# Example config for a locally deployed CU listening on the localhost interface for a DU connection +amf: + addr: 127.0.1.100 + bind_addr: 127.0.10.2 + +cu_cp: + f1ap: + bind_address: 127.0.10.1 + +log: + filename: /tmp/cu.log + all_level: warning + +pcap: + ngap_enable: false + ngap_filename: /tmp/cu_ngap.pcap \ No newline at end of file diff --git a/configs/du_rf_b200_tdd_n78_20mhz.yml b/configs/du_rf_b200_tdd_n78_20mhz.yml new file mode 100644 index 0000000000..7bda39ed29 --- /dev/null +++ b/configs/du_rf_b200_tdd_n78_20mhz.yml @@ -0,0 +1,33 @@ +# Example config for a DU with one 20MHz TDD cell using a USRP as RF frontend. +f1ap: + cu_cp_addr: 127.0.10.1 + bind_addr: 127.0.10.2 + +ru_sdr: + device_driver: uhd + device_args: type=b200,num_recv_frames=64,num_send_frames=64 + srate: 23.04 + otw_format: sc12 + tx_gain: 80 + rx_gain: 40 + +cell_cfg: + dl_arfcn: 650000 + band: 78 + channel_bandwidth_MHz: 20 + common_scs: 30 + plmn: "00101" + tac: 7 + pci: 1 + +log: + filename: /tmp/du.log + all_level: warning + +pcap: + mac_enable: false + mac_filename: /tmp/du_mac.pcap + f1ap_enable: false + f1ap_filename: /tmp/du_f1ap.pcap + f1u_enable: false + f1u_filename: /tmp/du_f1u.pcap \ No newline at end of file From e1e880d493123837cbf3a5cda2e063741c09ed0a Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Mon, 17 Jun 2024 16:01:02 +0100 Subject: [PATCH 30/49] cu: pass gtpu teid pool to pdu session manager to avoid teid re-use --- lib/cu_up/cu_up_impl.cpp | 8 +- lib/cu_up/cu_up_impl.h | 1 + lib/cu_up/pdu_session_manager_impl.cpp | 210 +++++++++--------- lib/cu_up/pdu_session_manager_impl.h | 1 + lib/cu_up/ue_context.h | 2 + lib/cu_up/ue_manager.cpp | 3 + lib/cu_up/ue_manager.h | 2 + .../cu_up/pdu_session_manager_test.h | 2 + tests/unittests/cu_up/ue_manager_test.cpp | 3 + 9 files changed, 122 insertions(+), 110 deletions(-) diff --git a/lib/cu_up/cu_up_impl.cpp b/lib/cu_up/cu_up_impl.cpp index fd50a8d3e5..f368108925 100644 --- a/lib/cu_up/cu_up_impl.cpp +++ b/lib/cu_up/cu_up_impl.cpp @@ -67,7 +67,12 @@ cu_up::cu_up(const cu_up_configuration& config_) : cfg(config_), main_ctrl_loop( // Connect GTPU GW adapter to NG-U session in order to send UL PDUs. gtpu_gw_adapter.connect_network_gateway(*ngu_session); - // Create TEID allocator + // Create N3 TEID allocator + gtpu_allocator_creation_request n3_alloc_msg = {}; + n3_alloc_msg.max_nof_teids = MAX_NOF_UES * MAX_NOF_PDU_SESSIONS; + n3_teid_allocator = create_gtpu_allocator(n3_alloc_msg); + + // Create F1-U TEID allocator gtpu_allocator_creation_request f1u_alloc_msg = {}; f1u_alloc_msg.max_nof_teids = MAX_NOF_UES * MAX_NOF_PDU_SESSIONS; f1u_teid_allocator = create_gtpu_allocator(f1u_alloc_msg); @@ -86,6 +91,7 @@ cu_up::cu_up(const cu_up_configuration& config_) : cfg(config_), main_ctrl_loop( *cfg.f1u_gateway, gtpu_gw_adapter, *ngu_demux, + *n3_teid_allocator, *f1u_teid_allocator, *cfg.ue_exec_pool, *cfg.gtpu_pcap, diff --git a/lib/cu_up/cu_up_impl.h b/lib/cu_up/cu_up_impl.h index e74d01397e..c8be983aef 100644 --- a/lib/cu_up/cu_up_impl.h +++ b/lib/cu_up/cu_up_impl.h @@ -78,6 +78,7 @@ class cu_up final : public cu_up_interface std::unique_ptr ngu_session; std::unique_ptr ngu_demux; std::unique_ptr ngu_echo; + std::unique_ptr n3_teid_allocator; std::unique_ptr f1u_teid_allocator; std::unique_ptr ue_mng; diff --git a/lib/cu_up/pdu_session_manager_impl.cpp b/lib/cu_up/pdu_session_manager_impl.cpp index cc02d13e8a..983b8caab4 100644 --- a/lib/cu_up/pdu_session_manager_impl.cpp +++ b/lib/cu_up/pdu_session_manager_impl.cpp @@ -33,6 +33,7 @@ pdu_session_manager_impl::pdu_session_manager_impl(ue_index_t timer_factory ue_ul_timer_factory_, timer_factory ue_ctrl_timer_factory_, f1u_cu_up_gateway& f1u_gw_, + gtpu_teid_pool& n3_teid_allocator_, gtpu_teid_pool& f1u_teid_allocator_, gtpu_tunnel_common_tx_upper_layer_notifier& gtpu_tx_notifier_, gtpu_demux_ctrl& gtpu_rx_demux_, @@ -61,7 +62,106 @@ pdu_session_manager_impl::pdu_session_manager_impl(ue_index_t gtpu_pcap(gtpu_pcap_), f1u_gw(f1u_gw_) { - (void)ue_ctrl_exec; +} + +pdu_session_setup_result pdu_session_manager_impl::setup_pdu_session(const e1ap_pdu_session_res_to_setup_item& session) +{ + pdu_session_setup_result pdu_session_result = {}; + pdu_session_result.success = false; + pdu_session_result.pdu_session_id = session.pdu_session_id; + pdu_session_result.cause = e1ap_cause_radio_network_t::unspecified; + + if (pdu_sessions.find(session.pdu_session_id) != pdu_sessions.end()) { + logger.log_error("PDU Session with {} already exists", session.pdu_session_id); + return pdu_session_result; + } + + if (pdu_sessions.size() >= MAX_NUM_PDU_SESSIONS_PER_UE) { + logger.log_error("PDU Session for {} cannot be created. Max number of PDU sessions reached", + session.pdu_session_id); + return pdu_session_result; + } + + pdu_sessions.emplace(session.pdu_session_id, std::make_unique(session, gtpu_rx_demux)); + std::unique_ptr& new_session = pdu_sessions.at(session.pdu_session_id); + const auto& ul_tunnel_info = new_session->ul_tunnel_info; + + // Get uplink transport address + logger.log_debug("PDU session uplink tunnel info: {} teid={} addr={}", + session.pdu_session_id, + ul_tunnel_info.gtp_teid.value(), + ul_tunnel_info.tp_address); + + // Allocate local TEID + // TODO + // new_session->local_teid = allocate_local_teid(new_session->pdu_session_id); + + // Advertise either local or external IP address of N3 interface + const std::string& n3_addr = net_config.n3_ext_addr.empty() || net_config.n3_ext_addr == "auto" + ? net_config.n3_bind_addr + : net_config.n3_ext_addr; + pdu_session_result.gtp_tunnel = + up_transport_layer_info(transport_layer_address::create_from_string(n3_addr), new_session->local_teid); + + // Create SDAP entity + sdap_entity_creation_message sdap_msg = {ue_index, session.pdu_session_id, &new_session->sdap_to_gtpu_adapter}; + new_session->sdap = create_sdap(sdap_msg); + + // Create GTPU entity + gtpu_tunnel_ngu_creation_message msg = {}; + msg.ue_index = ue_index; + msg.cfg.tx.peer_teid = int_to_gtpu_teid(ul_tunnel_info.gtp_teid.value()); + msg.cfg.tx.peer_addr = ul_tunnel_info.tp_address.to_string(); + msg.cfg.tx.peer_port = net_config.upf_port; + msg.cfg.rx.local_teid = new_session->local_teid; + msg.cfg.rx.t_reordering = n3_config.gtpu_reordering_timer; + msg.cfg.rx.warn_expired_t_reordering = n3_config.warn_on_drop; + msg.rx_lower = &new_session->gtpu_to_sdap_adapter; + msg.tx_upper = >pu_tx_notifier; + msg.gtpu_pcap = >pu_pcap; + msg.ue_dl_timer_factory = ue_dl_timer_factory; + new_session->gtpu = create_gtpu_tunnel_ngu(msg); + + // Connect adapters + new_session->sdap_to_gtpu_adapter.connect_gtpu(*new_session->gtpu->get_tx_lower_layer_interface()); + new_session->gtpu_to_sdap_adapter.connect_sdap(new_session->sdap->get_sdap_tx_sdu_handler()); + + // Register tunnel at demux + if (!gtpu_rx_demux.add_tunnel( + new_session->local_teid, ue_dl_exec, new_session->gtpu->get_rx_upper_layer_interface())) { + logger.log_error( + "PDU Session {} cannot be created. TEID {} already exists", session.pdu_session_id, new_session->local_teid); + return pdu_session_result; + } + + // Handle DRB setup + for (const e1ap_drb_to_setup_item_ng_ran& drb_to_setup : session.drb_to_setup_list_ng_ran) { + drb_setup_result drb_result = handle_drb_to_setup_item(*new_session, drb_to_setup); + pdu_session_result.drb_setup_results.push_back(drb_result); + } + + // Ref: TS 38.463 Sec. 8.3.1.2: + // For each PDU session for which the Security Indication IE is included in the PDU Session Resource To Setup List + // IE of the BEARER CONTEXT SETUP REQUEST message, and the Integrity Protection Indication IE or Confidentiality + // Protection Indication IE is set to "preferred", then the gNB-CU-UP should, if supported, perform user plane + // integrity protection or ciphering, respectively, for the concerned PDU session and shall notify whether it + // performed the user plane integrity protection or ciphering by including the Integrity Protection Result IE or + // Confidentiality Protection Result IE, respectively, in the PDU Session Resource Setup List IE of the BEARER + // CONTEXT SETUP RESPONSE message. + if (security_result_required(session.security_ind)) { + pdu_session_result.security_result = security_result_t{}; + auto& sec_res = pdu_session_result.security_result.value(); + sec_res.integrity_protection_result = + session.security_ind.integrity_protection_ind == integrity_protection_indication_t::not_needed + ? integrity_protection_result_t::not_performed + : integrity_protection_result_t::performed; + sec_res.confidentiality_protection_result = + session.security_ind.confidentiality_protection_ind == confidentiality_protection_indication_t::not_needed + ? confidentiality_protection_result_t::not_performed + : confidentiality_protection_result_t::performed; + } + pdu_session_result.success = true; + return pdu_session_result; } drb_setup_result pdu_session_manager_impl::handle_drb_to_setup_item(pdu_session& new_session, @@ -242,105 +342,6 @@ drb_setup_result pdu_session_manager_impl::handle_drb_to_setup_item(pdu_session& return drb_result; } -pdu_session_setup_result pdu_session_manager_impl::setup_pdu_session(const e1ap_pdu_session_res_to_setup_item& session) -{ - pdu_session_setup_result pdu_session_result = {}; - pdu_session_result.success = false; - pdu_session_result.pdu_session_id = session.pdu_session_id; - pdu_session_result.cause = e1ap_cause_radio_network_t::unspecified; - - if (pdu_sessions.find(session.pdu_session_id) != pdu_sessions.end()) { - logger.log_error("PDU Session with {} already exists", session.pdu_session_id); - return pdu_session_result; - } - - if (pdu_sessions.size() >= MAX_NUM_PDU_SESSIONS_PER_UE) { - logger.log_error("PDU Session for {} cannot be created. Max number of PDU sessions reached", - session.pdu_session_id); - return pdu_session_result; - } - - pdu_sessions.emplace(session.pdu_session_id, std::make_unique(session, gtpu_rx_demux)); - std::unique_ptr& new_session = pdu_sessions.at(session.pdu_session_id); - const auto& ul_tunnel_info = new_session->ul_tunnel_info; - - // Get uplink transport address - logger.log_debug("PDU session uplink tunnel info: {} teid={} addr={}", - session.pdu_session_id, - ul_tunnel_info.gtp_teid.value(), - ul_tunnel_info.tp_address); - - // Allocate local TEID - new_session->local_teid = allocate_local_teid(new_session->pdu_session_id); - - // Advertise either local or external IP address of N3 interface - const std::string& n3_addr = net_config.n3_ext_addr.empty() || net_config.n3_ext_addr == "auto" - ? net_config.n3_bind_addr - : net_config.n3_ext_addr; - pdu_session_result.gtp_tunnel = - up_transport_layer_info(transport_layer_address::create_from_string(n3_addr), new_session->local_teid); - - // Create SDAP entity - sdap_entity_creation_message sdap_msg = {ue_index, session.pdu_session_id, &new_session->sdap_to_gtpu_adapter}; - new_session->sdap = create_sdap(sdap_msg); - - // Create GTPU entity - gtpu_tunnel_ngu_creation_message msg = {}; - msg.ue_index = ue_index; - msg.cfg.tx.peer_teid = int_to_gtpu_teid(ul_tunnel_info.gtp_teid.value()); - msg.cfg.tx.peer_addr = ul_tunnel_info.tp_address.to_string(); - msg.cfg.tx.peer_port = net_config.upf_port; - msg.cfg.rx.local_teid = new_session->local_teid; - msg.cfg.rx.t_reordering = n3_config.gtpu_reordering_timer; - msg.cfg.rx.warn_expired_t_reordering = n3_config.warn_on_drop; - msg.rx_lower = &new_session->gtpu_to_sdap_adapter; - msg.tx_upper = >pu_tx_notifier; - msg.gtpu_pcap = >pu_pcap; - msg.ue_dl_timer_factory = ue_dl_timer_factory; - new_session->gtpu = create_gtpu_tunnel_ngu(msg); - - // Connect adapters - new_session->sdap_to_gtpu_adapter.connect_gtpu(*new_session->gtpu->get_tx_lower_layer_interface()); - new_session->gtpu_to_sdap_adapter.connect_sdap(new_session->sdap->get_sdap_tx_sdu_handler()); - - // Register tunnel at demux - if (!gtpu_rx_demux.add_tunnel( - new_session->local_teid, ue_dl_exec, new_session->gtpu->get_rx_upper_layer_interface())) { - logger.log_error( - "PDU Session {} cannot be created. TEID {} already exists", session.pdu_session_id, new_session->local_teid); - return pdu_session_result; - } - - // Handle DRB setup - for (const e1ap_drb_to_setup_item_ng_ran& drb_to_setup : session.drb_to_setup_list_ng_ran) { - drb_setup_result drb_result = handle_drb_to_setup_item(*new_session, drb_to_setup); - pdu_session_result.drb_setup_results.push_back(drb_result); - } - - // Ref: TS 38.463 Sec. 8.3.1.2: - // For each PDU session for which the Security Indication IE is included in the PDU Session Resource To Setup List - // IE of the BEARER CONTEXT SETUP REQUEST message, and the Integrity Protection Indication IE or Confidentiality - // Protection Indication IE is set to "preferred", then the gNB-CU-UP should, if supported, perform user plane - // integrity protection or ciphering, respectively, for the concerned PDU session and shall notify whether it - // performed the user plane integrity protection or ciphering by including the Integrity Protection Result IE or - // Confidentiality Protection Result IE, respectively, in the PDU Session Resource Setup List IE of the BEARER - // CONTEXT SETUP RESPONSE message. - if (security_result_required(session.security_ind)) { - pdu_session_result.security_result = security_result_t{}; - auto& sec_res = pdu_session_result.security_result.value(); - sec_res.integrity_protection_result = - session.security_ind.integrity_protection_ind == integrity_protection_indication_t::not_needed - ? integrity_protection_result_t::not_performed - : integrity_protection_result_t::performed; - sec_res.confidentiality_protection_result = - session.security_ind.confidentiality_protection_ind == confidentiality_protection_indication_t::not_needed - ? confidentiality_protection_result_t::not_performed - : confidentiality_protection_result_t::performed; - } - pdu_session_result.success = true; - return pdu_session_result; -} - pdu_session_modification_result pdu_session_manager_impl::modify_pdu_session(const e1ap_pdu_session_res_to_modify_item& session, bool new_tnl_info_required) @@ -560,12 +561,3 @@ size_t pdu_session_manager_impl::get_nof_pdu_sessions() { return pdu_sessions.size(); } - -gtpu_teid_t pdu_session_manager_impl::allocate_local_teid(pdu_session_id_t pdu_session_id) -{ - // Local TEID is the concatenation of the unique UE index and the PDU session ID - uint32_t local_teid = ue_index; - local_teid <<= 8U; - local_teid |= pdu_session_id_to_uint(pdu_session_id); - return gtpu_teid_t{local_teid}; -} diff --git a/lib/cu_up/pdu_session_manager_impl.h b/lib/cu_up/pdu_session_manager_impl.h index 9aa7630efa..20ab6172ea 100644 --- a/lib/cu_up/pdu_session_manager_impl.h +++ b/lib/cu_up/pdu_session_manager_impl.h @@ -41,6 +41,7 @@ class pdu_session_manager_impl final : public pdu_session_manager_ctrl timer_factory ue_ul_timer_factory_, timer_factory ue_ctrl_timer_factory_, f1u_cu_up_gateway& f1u_gw_, + gtpu_teid_pool& n3_teid_allocator_, gtpu_teid_pool& f1u_teid_allocator_, gtpu_tunnel_common_tx_upper_layer_notifier& gtpu_tx_notifier_, gtpu_demux_ctrl& gtpu_rx_demux_, diff --git a/lib/cu_up/ue_context.h b/lib/cu_up/ue_context.h index 4e44abc7cc..a7fe893f1a 100644 --- a/lib/cu_up/ue_context.h +++ b/lib/cu_up/ue_context.h @@ -45,6 +45,7 @@ class ue_context : public pdu_session_manager_ctrl timer_factory ue_ul_timer_factory_, timer_factory ue_ctrl_timer_factory_, f1u_cu_up_gateway& f1u_gw_, + gtpu_teid_pool& n3_teid_allocator_, gtpu_teid_pool& f1u_teid_allocator_, gtpu_tunnel_common_tx_upper_layer_notifier& gtpu_tx_notifier_, gtpu_demux_ctrl& gtpu_rx_demux_, @@ -64,6 +65,7 @@ class ue_context : public pdu_session_manager_ctrl ue_ul_timer_factory_, ue_ctrl_timer_factory_, f1u_gw_, + n3_teid_allocator_, f1u_teid_allocator_, gtpu_tx_notifier_, gtpu_rx_demux_, diff --git a/lib/cu_up/ue_manager.cpp b/lib/cu_up/ue_manager.cpp index 778db4b9a4..d00f76a604 100644 --- a/lib/cu_up/ue_manager.cpp +++ b/lib/cu_up/ue_manager.cpp @@ -20,6 +20,7 @@ ue_manager::ue_manager(network_interface_config& net_config_, f1u_cu_up_gateway& f1u_gw_, gtpu_tunnel_common_tx_upper_layer_notifier& gtpu_tx_notifier_, gtpu_demux_ctrl& gtpu_rx_demux_, + gtpu_teid_pool& n3_teid_allocator_, gtpu_teid_pool& f1u_teid_allocator_, cu_up_executor_pool& exec_pool_, dlt_pcap& gtpu_pcap_, @@ -30,6 +31,7 @@ ue_manager::ue_manager(network_interface_config& net_config_, f1u_gw(f1u_gw_), gtpu_tx_notifier(gtpu_tx_notifier_), gtpu_rx_demux(gtpu_rx_demux_), + n3_teid_allocator(n3_teid_allocator_), f1u_teid_allocator(f1u_teid_allocator_), exec_pool(exec_pool_), gtpu_pcap(gtpu_pcap_), @@ -79,6 +81,7 @@ ue_context* ue_manager::add_ue(const ue_context_cfg& ue_cfg) ue_ul_timer_factory, ue_ctrl_timer_factory, f1u_gw, + n3_teid_allocator, f1u_teid_allocator, gtpu_tx_notifier, gtpu_rx_demux, diff --git a/lib/cu_up/ue_manager.h b/lib/cu_up/ue_manager.h index c358e10089..94fd753512 100644 --- a/lib/cu_up/ue_manager.h +++ b/lib/cu_up/ue_manager.h @@ -31,6 +31,7 @@ class ue_manager : public ue_manager_ctrl f1u_cu_up_gateway& f1u_gw_, gtpu_tunnel_common_tx_upper_layer_notifier& gtpu_tx_notifier_, gtpu_demux_ctrl& gtpu_rx_demux_, + gtpu_teid_pool& n3_teid_allocator_, gtpu_teid_pool& f1u_teid_allocator_, cu_up_executor_pool& exec_pool_, dlt_pcap& gtpu_pcap_, @@ -55,6 +56,7 @@ class ue_manager : public ue_manager_ctrl f1u_cu_up_gateway& f1u_gw; gtpu_tunnel_common_tx_upper_layer_notifier& gtpu_tx_notifier; gtpu_demux_ctrl& gtpu_rx_demux; + gtpu_teid_pool& n3_teid_allocator; gtpu_teid_pool& f1u_teid_allocator; cu_up_executor_pool& exec_pool; dlt_pcap& gtpu_pcap; diff --git a/tests/unittests/cu_up/pdu_session_manager_test.h b/tests/unittests/cu_up/pdu_session_manager_test.h index 70727cb207..c547f6a356 100644 --- a/tests/unittests/cu_up/pdu_session_manager_test.h +++ b/tests/unittests/cu_up/pdu_session_manager_test.h @@ -59,6 +59,7 @@ class pdu_session_manager_test_base timers_factory, timers_factory, *f1u_gw, + *n3_allocator, *f1u_allocator, *gtpu_tx_notifier, *gtpu_rx_demux, @@ -83,6 +84,7 @@ class pdu_session_manager_test_base std::unique_ptr gtpu_tx_notifier; dummy_inner_f1u_bearer f1u_bearer; std::unique_ptr f1u_gw; + std::unique_ptr n3_allocator; std::unique_ptr f1u_allocator; std::unique_ptr pdu_session_mng; null_dlt_pcap gtpu_pcap; diff --git a/tests/unittests/cu_up/ue_manager_test.cpp b/tests/unittests/cu_up/ue_manager_test.cpp index c1efd7169e..9245585221 100644 --- a/tests/unittests/cu_up/ue_manager_test.cpp +++ b/tests/unittests/cu_up/ue_manager_test.cpp @@ -28,6 +28,7 @@ class ue_manager_test : public ::testing::Test // create required objects gtpu_rx_demux = std::make_unique(); + gtpu_n3_allocator = std::make_unique(); gtpu_f1u_allocator = std::make_unique(); gtpu_tx_notifier = std::make_unique(); f1u_gw = std::make_unique(f1u_bearer); @@ -45,6 +46,7 @@ class ue_manager_test : public ::testing::Test *f1u_gw, *gtpu_tx_notifier, *gtpu_rx_demux, + *gtpu_n3_allocator, *gtpu_f1u_allocator, *cu_up_exec_mapper, gtpu_pcap, @@ -58,6 +60,7 @@ class ue_manager_test : public ::testing::Test } std::unique_ptr gtpu_rx_demux; + std::unique_ptr gtpu_n3_allocator; std::unique_ptr gtpu_f1u_allocator; std::unique_ptr gtpu_tx_notifier; std::unique_ptr e1ap; From c0b2d1f6da68dab1cea581f19ed06117da05c8c2 Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Mon, 17 Jun 2024 17:34:43 +0100 Subject: [PATCH 31/49] cu: pass teid allocator to pdu_session so that pdu_session can de-allocated it's TEID on destruction --- include/srsran/gtpu/gtpu_teid_pool.h | 4 +-- lib/cu_up/cu_up_impl.h | 9 ++---- lib/cu_up/pdu_session.h | 20 ++++++++---- lib/cu_up/pdu_session_manager_impl.cpp | 31 ++++++++++++------- lib/cu_up/pdu_session_manager_impl.h | 1 + lib/gtpu/gtpu_teid_pool_impl.h | 17 ++++++---- tests/unittests/cu_up/cu_up_test.cpp | 2 +- tests/unittests/cu_up/cu_up_test_helpers.h | 2 +- .../cu_up/pdu_session_manager_test.cpp | 6 ++-- .../cu_up/pdu_session_manager_test.h | 1 + 10 files changed, 56 insertions(+), 37 deletions(-) diff --git a/include/srsran/gtpu/gtpu_teid_pool.h b/include/srsran/gtpu/gtpu_teid_pool.h index fb59f40661..e0d5c70685 100644 --- a/include/srsran/gtpu/gtpu_teid_pool.h +++ b/include/srsran/gtpu/gtpu_teid_pool.h @@ -28,9 +28,9 @@ class gtpu_teid_pool SRSRAN_NODISCARD virtual expected request_teid() = 0; - SRSRAN_NODISCARD virtual bool release_teid(gtpu_teid_t teid) = 0; + virtual bool release_teid(gtpu_teid_t teid) = 0; - virtual bool full() const = 0; + [[nodiscard]] virtual bool full() const = 0; virtual uint32_t get_max_nof_teids() = 0; }; diff --git a/lib/cu_up/cu_up_impl.h b/lib/cu_up/cu_up_impl.h index c8be983aef..317df0906c 100644 --- a/lib/cu_up/cu_up_impl.h +++ b/lib/cu_up/cu_up_impl.h @@ -17,16 +17,12 @@ #include "srsran/cu_up/cu_up.h" #include "srsran/cu_up/cu_up_configuration.h" #include "srsran/e1ap/cu_up/e1ap_cu_up.h" -#include "srsran/gateways/udp_network_gateway.h" #include "srsran/gtpu/gtpu_echo.h" #include "srsran/gtpu/gtpu_teid_pool.h" #include "srsran/support/async/fifo_async_task_scheduler.h" -#include "srsran/support/executors/task_executor.h" #include -#include -namespace srsran { -namespace srs_cu_up { +namespace srsran::srs_cu_up { class cu_up final : public cu_up_interface { @@ -96,5 +92,4 @@ class cu_up final : public cu_up_interface unique_timer statistics_report_timer; }; -} // namespace srs_cu_up -} // namespace srsran +} // namespace srsran::srs_cu_up diff --git a/lib/cu_up/pdu_session.h b/lib/cu_up/pdu_session.h index 9aa75f5e77..b844be79cb 100644 --- a/lib/cu_up/pdu_session.h +++ b/lib/cu_up/pdu_session.h @@ -14,6 +14,7 @@ #include "adapters/sdap_adapters.h" #include "drb_context.h" #include "srsran/gtpu/gtpu_demux.h" +#include "srsran/gtpu/gtpu_teid_pool.h" #include "srsran/gtpu/gtpu_tunnel_ngu.h" #include "srsran/ran/up_transport_layer_info.h" @@ -24,13 +25,16 @@ namespace srs_cu_up { /// \brief Context for PDU session with session-wide parameters and all contained DRBs. struct pdu_session { - pdu_session(const e1ap_pdu_session_res_to_setup_item& session, gtpu_demux_ctrl& gtpu_rx_demux_) : + pdu_session(const e1ap_pdu_session_res_to_setup_item& session, + gtpu_demux_ctrl& gtpu_rx_demux_, + gtpu_teid_pool& n3_teid_allocator_) : pdu_session_id(session.pdu_session_id), session_type(session.pdu_session_type), snssai(session.snssai), security_ind(session.security_ind), ul_tunnel_info(session.ng_ul_up_tnl_info), - gtpu_rx_demux(gtpu_rx_demux_) + gtpu_rx_demux(gtpu_rx_demux_), + n3_teid_allocator(n3_teid_allocator_) { if (session.pdu_session_res_dl_ambr.has_value()) { pdu_session_res_ambr = session.pdu_session_res_dl_ambr.value(); @@ -41,7 +45,10 @@ struct pdu_session { void stop() { if (not stopped) { - gtpu_rx_demux.remove_tunnel(local_teid); + if (local_teid.has_value()) { + gtpu_rx_demux.remove_tunnel(local_teid.value()); + n3_teid_allocator.release_teid(local_teid.value()); + } gtpu->stop(); @@ -70,9 +77,10 @@ struct pdu_session { uint64_t pdu_session_res_ambr = 0; // Tunneling info used by all DRBs/QoS flows in this PDU session - up_transport_layer_info ul_tunnel_info; // the peer GTP-U address and TEID - gtpu_teid_t local_teid; // the local teid used by the gNB for this PDU session - gtpu_demux_ctrl& gtpu_rx_demux; // The demux entity to register/remove the tunnel. + up_transport_layer_info ul_tunnel_info; // the peer GTP-U address and TEID + std::optional local_teid; // the local teid used by the gNB for this PDU session + gtpu_demux_ctrl& gtpu_rx_demux; // The demux entity to register/remove the tunnel. + gtpu_teid_pool& n3_teid_allocator; // Pool to de-allocate TEID on release drb_context* default_drb = nullptr; // non-owning pointer to default DRB, if any diff --git a/lib/cu_up/pdu_session_manager_impl.cpp b/lib/cu_up/pdu_session_manager_impl.cpp index 983b8caab4..19976dec00 100644 --- a/lib/cu_up/pdu_session_manager_impl.cpp +++ b/lib/cu_up/pdu_session_manager_impl.cpp @@ -53,6 +53,7 @@ pdu_session_manager_impl::pdu_session_manager_impl(ue_index_t ue_ul_timer_factory(ue_ul_timer_factory_), ue_ctrl_timer_factory(ue_ctrl_timer_factory_), gtpu_tx_notifier(gtpu_tx_notifier_), + n3_teid_allocator(n3_teid_allocator_), f1u_teid_allocator(f1u_teid_allocator_), gtpu_rx_demux(gtpu_rx_demux_), ue_dl_exec(ue_dl_exec_), @@ -82,26 +83,31 @@ pdu_session_setup_result pdu_session_manager_impl::setup_pdu_session(const e1ap_ return pdu_session_result; } - pdu_sessions.emplace(session.pdu_session_id, std::make_unique(session, gtpu_rx_demux)); - std::unique_ptr& new_session = pdu_sessions.at(session.pdu_session_id); - const auto& ul_tunnel_info = new_session->ul_tunnel_info; + // Allocate local TEID + expected ret = n3_teid_allocator.request_teid(); + if (ret.is_error()) { + logger.log_warning("Failed to create PDU session. Cause: could not allocate local TEID. {}", + session.pdu_session_id); + return pdu_session_result; + } + + std::unique_ptr new_session = std::make_unique(session, gtpu_rx_demux, n3_teid_allocator); + const auto& ul_tunnel_info = new_session->ul_tunnel_info; + new_session->local_teid = ret.value(); // Get uplink transport address - logger.log_debug("PDU session uplink tunnel info: {} teid={} addr={}", + logger.log_debug("PDU session uplink tunnel info: {} local_teid={} peer_teid={} peer_addr={}", session.pdu_session_id, + new_session->local_teid, ul_tunnel_info.gtp_teid.value(), ul_tunnel_info.tp_address); - // Allocate local TEID - // TODO - // new_session->local_teid = allocate_local_teid(new_session->pdu_session_id); - // Advertise either local or external IP address of N3 interface const std::string& n3_addr = net_config.n3_ext_addr.empty() || net_config.n3_ext_addr == "auto" ? net_config.n3_bind_addr : net_config.n3_ext_addr; pdu_session_result.gtp_tunnel = - up_transport_layer_info(transport_layer_address::create_from_string(n3_addr), new_session->local_teid); + up_transport_layer_info(transport_layer_address::create_from_string(n3_addr), *new_session->local_teid); // Create SDAP entity sdap_entity_creation_message sdap_msg = {ue_index, session.pdu_session_id, &new_session->sdap_to_gtpu_adapter}; @@ -113,7 +119,7 @@ pdu_session_setup_result pdu_session_manager_impl::setup_pdu_session(const e1ap_ msg.cfg.tx.peer_teid = int_to_gtpu_teid(ul_tunnel_info.gtp_teid.value()); msg.cfg.tx.peer_addr = ul_tunnel_info.tp_address.to_string(); msg.cfg.tx.peer_port = net_config.upf_port; - msg.cfg.rx.local_teid = new_session->local_teid; + msg.cfg.rx.local_teid = *new_session->local_teid; msg.cfg.rx.t_reordering = n3_config.gtpu_reordering_timer; msg.cfg.rx.warn_expired_t_reordering = n3_config.warn_on_drop; msg.rx_lower = &new_session->gtpu_to_sdap_adapter; @@ -128,7 +134,7 @@ pdu_session_setup_result pdu_session_manager_impl::setup_pdu_session(const e1ap_ // Register tunnel at demux if (!gtpu_rx_demux.add_tunnel( - new_session->local_teid, ue_dl_exec, new_session->gtpu->get_rx_upper_layer_interface())) { + *new_session->local_teid, ue_dl_exec, new_session->gtpu->get_rx_upper_layer_interface())) { logger.log_error( "PDU Session {} cannot be created. TEID {} already exists", session.pdu_session_id, new_session->local_teid); return pdu_session_result; @@ -160,6 +166,9 @@ pdu_session_setup_result pdu_session_manager_impl::setup_pdu_session(const e1ap_ ? confidentiality_protection_result_t::not_performed : confidentiality_protection_result_t::performed; } + + // PDU session creation was successful, store PDU session. + pdu_sessions.emplace(session.pdu_session_id, std::move(new_session)); pdu_session_result.success = true; return pdu_session_result; } diff --git a/lib/cu_up/pdu_session_manager_impl.h b/lib/cu_up/pdu_session_manager_impl.h index 20ab6172ea..95821b9baa 100644 --- a/lib/cu_up/pdu_session_manager_impl.h +++ b/lib/cu_up/pdu_session_manager_impl.h @@ -83,6 +83,7 @@ class pdu_session_manager_impl final : public pdu_session_manager_ctrl timer_factory ue_ul_timer_factory; timer_factory ue_ctrl_timer_factory; gtpu_tunnel_common_tx_upper_layer_notifier& gtpu_tx_notifier; + gtpu_teid_pool& n3_teid_allocator; gtpu_teid_pool& f1u_teid_allocator; gtpu_demux_ctrl& gtpu_rx_demux; task_executor& ue_dl_exec; diff --git a/lib/gtpu/gtpu_teid_pool_impl.h b/lib/gtpu/gtpu_teid_pool_impl.h index 0c2b9111d5..4b7c3e3306 100644 --- a/lib/gtpu/gtpu_teid_pool_impl.h +++ b/lib/gtpu/gtpu_teid_pool_impl.h @@ -10,9 +10,9 @@ #pragma once -#include "srsran/adt/expected.h" -#include "srsran/gtpu/gtpu_teid.h" #include "srsran/gtpu/gtpu_teid_pool.h" +#include "srsran/srslog/logger.h" +#include "srsran/srslog/srslog.h" #include "srsran/support/compiler.h" #include @@ -21,7 +21,10 @@ namespace srsran { class gtpu_teid_pool_impl final : public gtpu_teid_pool { public: - explicit gtpu_teid_pool_impl(uint32_t max_nof_teids_) : max_nof_teids(max_nof_teids_), teid_pool(max_nof_teids_) {} + explicit gtpu_teid_pool_impl(uint32_t max_nof_teids_) : + max_nof_teids(max_nof_teids_), teid_pool(max_nof_teids_), logger(srslog::fetch_basic_logger("GTPU")) + { + } SRSRAN_NODISCARD expected request_teid() override { @@ -53,11 +56,11 @@ class gtpu_teid_pool_impl final : public gtpu_teid_pool return teid; } - SRSRAN_NODISCARD bool release_teid(gtpu_teid_t teid) override + bool release_teid(gtpu_teid_t teid) override { uint32_t teid_idx = teid.value() - GTPU_TEID_MIN.value(); if (not teid_pool[teid_idx]) { - // trying to free non-allocated TEID + logger.error("Trying to free non-allocated TEID. teid={}", teid); return false; } teid_pool[teid_idx] = false; @@ -65,7 +68,7 @@ class gtpu_teid_pool_impl final : public gtpu_teid_pool return true; } - bool full() const override { return nof_teids >= max_nof_teids; } + [[nodiscard]] bool full() const override { return nof_teids >= max_nof_teids; } uint32_t get_max_nof_teids() override { return max_nof_teids; } @@ -75,5 +78,7 @@ class gtpu_teid_pool_impl final : public gtpu_teid_pool const uint32_t max_nof_teids; std::vector teid_pool; + + srslog::basic_logger& logger; }; } // namespace srsran diff --git a/tests/unittests/cu_up/cu_up_test.cpp b/tests/unittests/cu_up/cu_up_test.cpp index 493af84a13..77ee5262ee 100644 --- a/tests/unittests/cu_up/cu_up_test.cpp +++ b/tests/unittests/cu_up/cu_up_test.cpp @@ -213,7 +213,7 @@ TEST_F(cu_up_test, dl_data_flow) // teid=2, qfi=1 const uint8_t gtpu_ping_vec[] = { - 0x34, 0xff, 0x00, 0x5c, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, 0x01, 0x00, 0x45, + 0x34, 0xff, 0x00, 0x5c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x85, 0x01, 0x00, 0x01, 0x00, 0x45, 0x00, 0x00, 0x54, 0x9b, 0xfb, 0x00, 0x00, 0x40, 0x01, 0x56, 0x5a, 0xc0, 0xa8, 0x04, 0x01, 0xc0, 0xa8, 0x03, 0x02, 0x00, 0x00, 0xb8, 0xc0, 0x00, 0x02, 0x00, 0x01, 0x5d, 0x26, 0x77, 0x64, 0x00, 0x00, 0x00, 0x00, 0xb1, 0xde, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, diff --git a/tests/unittests/cu_up/cu_up_test_helpers.h b/tests/unittests/cu_up/cu_up_test_helpers.h index 8ddf8ccc9e..e22beb77ea 100644 --- a/tests/unittests/cu_up/cu_up_test_helpers.h +++ b/tests/unittests/cu_up/cu_up_test_helpers.h @@ -106,7 +106,7 @@ class dummy_gtpu_teid_pool final : public gtpu_teid_pool uint32_t get_max_nof_teids() override { return UINT32_MAX; } - uint32_t next_teid = 0; + uint32_t next_teid = 1; }; /// Dummy adapter between GTP-U and Network Gateway diff --git a/tests/unittests/cu_up/pdu_session_manager_test.cpp b/tests/unittests/cu_up/pdu_session_manager_test.cpp index 7f8d94e8f2..c85dca8b51 100644 --- a/tests/unittests/cu_up/pdu_session_manager_test.cpp +++ b/tests/unittests/cu_up/pdu_session_manager_test.cpp @@ -39,7 +39,7 @@ TEST_P(pdu_session_manager_test_set_n3_ext_addr, when_valid_pdu_session_setup_it ? net_config.n3_bind_addr : net_config.n3_ext_addr; ASSERT_EQ(setup_result.gtp_tunnel.tp_address.to_string(), tp_address_expect); - ASSERT_EQ(setup_result.drb_setup_results[0].gtp_tunnel.gtp_teid.value(), 0); + ASSERT_EQ(setup_result.drb_setup_results[0].gtp_tunnel.gtp_teid.value(), 1); ASSERT_EQ(pdu_session_mng->get_nof_pdu_sessions(), 1); // attempt to remove non-existing session @@ -378,7 +378,7 @@ TEST_F(pdu_session_manager_test, when_new_ul_info_is_requested_f1u_is_disconnect pdu_session_setup_result set_result = pdu_session_mng->setup_pdu_session(pdu_session_setup_item); ASSERT_EQ(pdu_session_mng->get_nof_pdu_sessions(), 1); drb_setup_result drb_setup_res = set_result.drb_setup_results[0]; - ASSERT_EQ(drb_setup_res.gtp_tunnel.gtp_teid, 0x0); + ASSERT_EQ(drb_setup_res.gtp_tunnel.gtp_teid, 0x1); // prepare modification request (request new UL TNL info) e1ap_pdu_session_res_to_modify_item pdu_session_modify_item = @@ -386,7 +386,7 @@ TEST_F(pdu_session_manager_test, when_new_ul_info_is_requested_f1u_is_disconnect pdu_session_modification_result mod_result = pdu_session_mng->modify_pdu_session(pdu_session_modify_item, true); drb_setup_result drb_mod_res = mod_result.drb_modification_results[0]; - ASSERT_EQ(drb_mod_res.gtp_tunnel.gtp_teid, 0x1); + ASSERT_EQ(drb_mod_res.gtp_tunnel.gtp_teid, 0x2); ASSERT_EQ(pdu_session_mng->get_nof_pdu_sessions(), 1); } diff --git a/tests/unittests/cu_up/pdu_session_manager_test.h b/tests/unittests/cu_up/pdu_session_manager_test.h index c547f6a356..f0ccfc736d 100644 --- a/tests/unittests/cu_up/pdu_session_manager_test.h +++ b/tests/unittests/cu_up/pdu_session_manager_test.h @@ -37,6 +37,7 @@ class pdu_session_manager_test_base gtpu_rx_demux = std::make_unique(); gtpu_tx_notifier = std::make_unique(); f1u_gw = std::make_unique(f1u_bearer); + n3_allocator = std::make_unique(); f1u_allocator = std::make_unique(); // create DUT object From 5e1b86e57165fb93617868be76d257fafa4c7a4f Mon Sep 17 00:00:00 2001 From: Pedro Alvarez Date: Tue, 18 Jun 2024 09:53:40 +0100 Subject: [PATCH 32/49] cu_up: make sure local teid is allocated before pdu session creation --- lib/cu_up/pdu_session.h | 20 ++++++++++---------- lib/cu_up/pdu_session_manager_impl.cpp | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/lib/cu_up/pdu_session.h b/lib/cu_up/pdu_session.h index b844be79cb..312e9b3248 100644 --- a/lib/cu_up/pdu_session.h +++ b/lib/cu_up/pdu_session.h @@ -26,12 +26,14 @@ namespace srs_cu_up { /// \brief Context for PDU session with session-wide parameters and all contained DRBs. struct pdu_session { pdu_session(const e1ap_pdu_session_res_to_setup_item& session, - gtpu_demux_ctrl& gtpu_rx_demux_, - gtpu_teid_pool& n3_teid_allocator_) : + gtpu_teid_t local_teid_, // the local teid used by the gNB for this PDU session + gtpu_demux_ctrl& gtpu_rx_demux_, + gtpu_teid_pool& n3_teid_allocator_) : pdu_session_id(session.pdu_session_id), session_type(session.pdu_session_type), snssai(session.snssai), security_ind(session.security_ind), + local_teid(local_teid_), ul_tunnel_info(session.ng_ul_up_tnl_info), gtpu_rx_demux(gtpu_rx_demux_), n3_teid_allocator(n3_teid_allocator_) @@ -45,10 +47,8 @@ struct pdu_session { void stop() { if (not stopped) { - if (local_teid.has_value()) { - gtpu_rx_demux.remove_tunnel(local_teid.value()); - n3_teid_allocator.release_teid(local_teid.value()); - } + gtpu_rx_demux.remove_tunnel(local_teid); + n3_teid_allocator.release_teid(local_teid); gtpu->stop(); @@ -77,10 +77,10 @@ struct pdu_session { uint64_t pdu_session_res_ambr = 0; // Tunneling info used by all DRBs/QoS flows in this PDU session - up_transport_layer_info ul_tunnel_info; // the peer GTP-U address and TEID - std::optional local_teid; // the local teid used by the gNB for this PDU session - gtpu_demux_ctrl& gtpu_rx_demux; // The demux entity to register/remove the tunnel. - gtpu_teid_pool& n3_teid_allocator; // Pool to de-allocate TEID on release + gtpu_teid_t local_teid; // the local teid used by the gNB for this PDU session + up_transport_layer_info ul_tunnel_info; // the peer GTP-U address and TEID + gtpu_demux_ctrl& gtpu_rx_demux; // The demux entity to register/remove the tunnel. + gtpu_teid_pool& n3_teid_allocator; // Pool to de-allocate TEID on release drb_context* default_drb = nullptr; // non-owning pointer to default DRB, if any diff --git a/lib/cu_up/pdu_session_manager_impl.cpp b/lib/cu_up/pdu_session_manager_impl.cpp index 19976dec00..a8fa07fdf1 100644 --- a/lib/cu_up/pdu_session_manager_impl.cpp +++ b/lib/cu_up/pdu_session_manager_impl.cpp @@ -84,16 +84,16 @@ pdu_session_setup_result pdu_session_manager_impl::setup_pdu_session(const e1ap_ } // Allocate local TEID - expected ret = n3_teid_allocator.request_teid(); - if (ret.is_error()) { + expected local_teid = n3_teid_allocator.request_teid(); + if (local_teid.is_error()) { logger.log_warning("Failed to create PDU session. Cause: could not allocate local TEID. {}", session.pdu_session_id); return pdu_session_result; } - std::unique_ptr new_session = std::make_unique(session, gtpu_rx_demux, n3_teid_allocator); - const auto& ul_tunnel_info = new_session->ul_tunnel_info; - new_session->local_teid = ret.value(); + std::unique_ptr new_session = + std::make_unique(session, local_teid.value(), gtpu_rx_demux, n3_teid_allocator); + const auto& ul_tunnel_info = new_session->ul_tunnel_info; // Get uplink transport address logger.log_debug("PDU session uplink tunnel info: {} local_teid={} peer_teid={} peer_addr={}", @@ -107,7 +107,7 @@ pdu_session_setup_result pdu_session_manager_impl::setup_pdu_session(const e1ap_ ? net_config.n3_bind_addr : net_config.n3_ext_addr; pdu_session_result.gtp_tunnel = - up_transport_layer_info(transport_layer_address::create_from_string(n3_addr), *new_session->local_teid); + up_transport_layer_info(transport_layer_address::create_from_string(n3_addr), new_session->local_teid); // Create SDAP entity sdap_entity_creation_message sdap_msg = {ue_index, session.pdu_session_id, &new_session->sdap_to_gtpu_adapter}; @@ -119,7 +119,7 @@ pdu_session_setup_result pdu_session_manager_impl::setup_pdu_session(const e1ap_ msg.cfg.tx.peer_teid = int_to_gtpu_teid(ul_tunnel_info.gtp_teid.value()); msg.cfg.tx.peer_addr = ul_tunnel_info.tp_address.to_string(); msg.cfg.tx.peer_port = net_config.upf_port; - msg.cfg.rx.local_teid = *new_session->local_teid; + msg.cfg.rx.local_teid = new_session->local_teid; msg.cfg.rx.t_reordering = n3_config.gtpu_reordering_timer; msg.cfg.rx.warn_expired_t_reordering = n3_config.warn_on_drop; msg.rx_lower = &new_session->gtpu_to_sdap_adapter; @@ -134,7 +134,7 @@ pdu_session_setup_result pdu_session_manager_impl::setup_pdu_session(const e1ap_ // Register tunnel at demux if (!gtpu_rx_demux.add_tunnel( - *new_session->local_teid, ue_dl_exec, new_session->gtpu->get_rx_upper_layer_interface())) { + new_session->local_teid, ue_dl_exec, new_session->gtpu->get_rx_upper_layer_interface())) { logger.log_error( "PDU Session {} cannot be created. TEID {} already exists", session.pdu_session_id, new_session->local_teid); return pdu_session_result; @@ -353,7 +353,7 @@ drb_setup_result pdu_session_manager_impl::handle_drb_to_setup_item(pdu_session& pdu_session_modification_result pdu_session_manager_impl::modify_pdu_session(const e1ap_pdu_session_res_to_modify_item& session, - bool new_tnl_info_required) + bool new_ul_tnl_info_required) { pdu_session_modification_result pdu_session_result; pdu_session_result.success = false; @@ -395,7 +395,7 @@ pdu_session_manager_impl::modify_pdu_session(const e1ap_pdu_session_res_to_modif drb_iter->second->drb_id); auto& drb = drb_iter->second; - if (new_tnl_info_required) { + if (new_ul_tnl_info_required) { // Allocate new UL TEID for DRB expected ret = f1u_teid_allocator.request_teid(); if (not ret.has_value()) { From a86d39471c5080c2271db9ea8672a3d4701d66bd Mon Sep 17 00:00:00 2001 From: Joaquim Broquetas Date: Wed, 12 Jun 2024 19:59:46 +0200 Subject: [PATCH 33/49] srsdu: allow 2.5 ms scheduling request period --- apps/units/flexible_du/du_high/du_high_config.h | 4 ++-- .../du_high/du_high_config_cli11_schema.cpp | 2 +- .../du_high/du_high_config_translators.cpp | 4 ++-- .../du_high/du_high_config_validator.cpp | 14 ++++++++++++-- include/srsran/du/du_update_config_helpers.h | 2 +- lib/du/du_update_config_helpers.cpp | 2 +- 6 files changed, 19 insertions(+), 9 deletions(-) diff --git a/apps/units/flexible_du/du_high/du_high_config.h b/apps/units/flexible_du/du_high/du_high_config.h index b07b601789..9b0c353cd4 100644 --- a/apps/units/flexible_du/du_high/du_high_config.h +++ b/apps/units/flexible_du/du_high/du_high_config.h @@ -218,8 +218,8 @@ struct du_high_unit_pucch_config { /// \brief \c SR period in milliseconds. /// Among all values given in \c periodicityAndOffset, part of \c \SchedulingRequestResourceConfig, TS 38.331, - /// these are the only ones supported. Values: {1, 2, 4, 8, 10, 16, 20, 40, 80, 160, 320}. - unsigned sr_period_msec = 20; + /// these are the only ones supported. Values: {1, 2, 2.5, 4, 5, 8, 10, 16, 20, 40, 80, 160, 320}. + float sr_period_msec = 20.0F; /// PUCCH F1 resource parameters. /// Number of symbols for PUCCH Format 1. Values {4, 14}. diff --git a/apps/units/flexible_du/du_high/du_high_config_cli11_schema.cpp b/apps/units/flexible_du/du_high/du_high_config_cli11_schema.cpp index ad37755b57..778a3d3115 100644 --- a/apps/units/flexible_du/du_high/du_high_config_cli11_schema.cpp +++ b/apps/units/flexible_du/du_high/du_high_config_cli11_schema.cpp @@ -698,7 +698,7 @@ static void configure_cli11_pucch_args(CLI::App& app, du_high_unit_pucch_config& }); add_option(app, "--sr_period_ms", pucch_params.sr_period_msec, "SR period in msec") ->capture_default_str() - ->check(CLI::IsMember({1, 2, 4, 5, 8, 10, 16, 20, 40, 80, 160, 320})); + ->check(CLI::IsMember({1.0F, 2.0F, 2.5F, 4.0F, 5.0F, 8.0F, 10.0F, 16.0F, 20.0F, 40.0F, 80.0F, 160.0F, 320.0F})); add_option(app, "--f1_nof_ue_res_harq", pucch_params.nof_ue_pucch_f1_res_harq, diff --git a/apps/units/flexible_du/du_high/du_high_config_translators.cpp b/apps/units/flexible_du/du_high/du_high_config_translators.cpp index 3a388a8462..4a793df21c 100644 --- a/apps/units/flexible_du/du_high/du_high_config_translators.cpp +++ b/apps/units/flexible_du/du_high/du_high_config_translators.cpp @@ -583,8 +583,8 @@ std::vector srsran::generate_du_cell_config(const du_high_unit_c if (sr_cng.empty()) { sr_cng.emplace_back(scheduling_request_resource_config{}); } - sr_cng.front().period = static_cast(get_nof_slots_per_subframe(base_cell.common_scs) * - base_cell.pucch_cfg.sr_period_msec); + sr_cng.front().period = static_cast( + static_cast(get_nof_slots_per_subframe(base_cell.common_scs) * base_cell.pucch_cfg.sr_period_msec)); // If any dependent parameter needs to be updated, this is the place. config_helpers::compute_nof_sr_csi_pucch_res(du_pucch_cfg, diff --git a/apps/units/flexible_du/du_high/du_high_config_validator.cpp b/apps/units/flexible_du/du_high/du_high_config_validator.cpp index 1d90fcfab0..e7600aead2 100644 --- a/apps/units/flexible_du/du_high/du_high_config_validator.cpp +++ b/apps/units/flexible_du/du_high/du_high_config_validator.cpp @@ -390,8 +390,18 @@ static bool validate_pucch_cell_unit_config(const du_high_unit_base_cell_config& return false; } - static constexpr std::array valid_sr_period_slots{1, 2, 4, 8, 10, 16, 20, 40, 80, 160, 320}; - const unsigned sr_period_slots = get_nof_slots_per_subframe(scs_common) * pucch_cfg.sr_period_msec; + static constexpr std::array valid_sr_period_slots{1, 2, 4, 5, 8, 10, 16, 20, 40, 80, 160, 320}; + const auto sr_period_slots = static_cast(get_nof_slots_per_subframe(scs_common) * pucch_cfg.sr_period_msec); + + // Check that the SR period in milliseconds leads to an integer number of slots. + if (get_nof_slots_per_subframe(scs_common) * pucch_cfg.sr_period_msec != static_cast(sr_period_slots)) { + fmt::print("SR period (i.e., {}ms) times the number of slots per subframe (i.e., {}) must be an integer number of " + "slots.\n", + pucch_cfg.sr_period_msec, + get_nof_slots_per_subframe(scs_common)); + return false; + } + if (std::find(valid_sr_period_slots.begin(), valid_sr_period_slots.end(), sr_period_slots) == valid_sr_period_slots.end()) { fmt::print("SR period of {}ms is not valid for {}kHz SCS.\n", pucch_cfg.sr_period_msec, scs_to_khz(scs_common)); diff --git a/include/srsran/du/du_update_config_helpers.h b/include/srsran/du/du_update_config_helpers.h index 5f1b95945a..edf0701251 100644 --- a/include/srsran/du/du_update_config_helpers.h +++ b/include/srsran/du/du_update_config_helpers.h @@ -36,7 +36,7 @@ unsigned compute_prach_frequency_start(const pucch_builder_params& user_params, /// \param csi_period_msec CSI period in milliseconds. void compute_nof_sr_csi_pucch_res(pucch_builder_params& user_params, unsigned max_pucch_grants_per_slot, - unsigned sr_period_msec, + float sr_period_msec, std::optional csi_period_msec); } // namespace config_helpers diff --git a/lib/du/du_update_config_helpers.cpp b/lib/du/du_update_config_helpers.cpp index 877030737a..76e63e5f0e 100644 --- a/lib/du/du_update_config_helpers.cpp +++ b/lib/du/du_update_config_helpers.cpp @@ -87,7 +87,7 @@ unsigned srsran::config_helpers::compute_prach_frequency_start(const pucch_build void srsran::config_helpers::compute_nof_sr_csi_pucch_res(pucch_builder_params& user_params, unsigned max_pucch_grants_per_slot, - unsigned sr_period_msec, + float sr_period_msec, std::optional csi_period_msec) { // [Implementation-defined] In the following, we compute the estimated number of PUCCH resources that are needed for From a0c67b45d2181dbde96528539195688b8bd161b4 Mon Sep 17 00:00:00 2001 From: sauka Date: Wed, 12 Jun 2024 19:03:53 +0300 Subject: [PATCH 34/49] ofh,ethernet: reduce sleeping time in socket-based receiver, adjust pool size --- lib/ofh/ethernet/dpdk/dpdk_ethernet_transmitter.cpp | 2 +- lib/ofh/ethernet/ethernet_receiver_impl.cpp | 9 +++------ lib/ofh/ethernet/ethernet_rx_buffer_pool.h | 4 ++-- lib/ofh/transmitter/ofh_message_transmitter_impl.cpp | 4 ++-- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/lib/ofh/ethernet/dpdk/dpdk_ethernet_transmitter.cpp b/lib/ofh/ethernet/dpdk/dpdk_ethernet_transmitter.cpp index 3fc19cec73..790007acb9 100644 --- a/lib/ofh/ethernet/dpdk/dpdk_ethernet_transmitter.cpp +++ b/lib/ofh/ethernet/dpdk/dpdk_ethernet_transmitter.cpp @@ -21,7 +21,7 @@ void dpdk_transmitter_impl::send(span> frames) // Receiving a frame burst larger than MAX_BURST_SIZE requires making several Tx bursts. for (unsigned offset = 0; offset < frames.size();) { auto frame_burst = frames.subspan(offset, std::min(MAX_BURST_SIZE, frames.size() - offset)); - offset += frames.size(); + offset += frame_burst.size(); static_vector<::rte_mbuf*, MAX_BURST_SIZE> mbufs(frame_burst.size()); if (::rte_pktmbuf_alloc_bulk(port_ctx->get_mempool(), mbufs.data(), frame_burst.size()) < 0) { diff --git a/lib/ofh/ethernet/ethernet_receiver_impl.cpp b/lib/ofh/ethernet/ethernet_receiver_impl.cpp index e70323808f..5b70c2eb97 100644 --- a/lib/ofh/ethernet/ethernet_receiver_impl.cpp +++ b/lib/ofh/ethernet/ethernet_receiver_impl.cpp @@ -134,28 +134,25 @@ void receiver_impl::receive_loop() } /// Blocking function that waits for incoming data over the socket or until the specified timeout expires. -static bool wait_for_data(int socket, std::chrono::seconds timeout) +static bool wait_for_data(int socket, std::chrono::microseconds timeout) { fd_set read_fs; FD_ZERO(&read_fs); FD_SET(socket, &read_fs); - timeval tv = {static_cast(timeout.count()), 0}; + timeval tv = {0, static_cast<__suseconds_t>(timeout.count())}; return (::select(socket + 1, &read_fs, nullptr, nullptr, &tv) > 0); } void receiver_impl::receive() { - ofh_tracer << instant_trace_event("ofh_receiver_wait_data", instant_trace_event::cpu_scope::thread); - - if (!wait_for_data(socket_fd, std::chrono::seconds(1))) { + if (!wait_for_data(socket_fd, std::chrono::microseconds(5))) { return; } trace_point tp = ofh_tracer.now(); auto exp_buffer = buffer_pool.reserve(); - if (exp_buffer.is_error()) { logger.warning("No buffer is available for receiving an Ethernet packet"); return; diff --git a/lib/ofh/ethernet/ethernet_rx_buffer_pool.h b/lib/ofh/ethernet/ethernet_rx_buffer_pool.h index 51dc4a560c..753f49e04d 100644 --- a/lib/ofh/ethernet/ethernet_rx_buffer_pool.h +++ b/lib/ofh/ethernet/ethernet_rx_buffer_pool.h @@ -25,8 +25,8 @@ namespace ether { /// Pool of buffers accessed by a socket-based Ethernet receiver. class ethernet_rx_buffer_pool { - /// Allocate 2MB of storage, evenly divided between Ethernet receive buffers. - static inline constexpr units::bytes ETH_BUFFER_POOL_SIZE{2048000}; + /// Allocate 4MB of storage, evenly divided between Ethernet receive buffers. + static inline constexpr units::bytes ETH_BUFFER_POOL_SIZE{4096000}; using rx_buffer_id_list = concurrent_queue; diff --git a/lib/ofh/transmitter/ofh_message_transmitter_impl.cpp b/lib/ofh/transmitter/ofh_message_transmitter_impl.cpp index b7885b7527..ab41efa215 100644 --- a/lib/ofh/transmitter/ofh_message_transmitter_impl.cpp +++ b/lib/ofh/transmitter/ofh_message_transmitter_impl.cpp @@ -88,9 +88,9 @@ void message_transmitter_impl::on_new_symbol(slot_symbol_point symbol_point) enqueue_messages_into_burst(interval_up, frame_burst); // Transmit the data. - trace_point tp_dpdk = ofh_tracer.now(); + trace_point tp_ether = ofh_tracer.now(); transmit_frame_burst(frame_burst); - ofh_tracer << trace_event("ofh_dpdk_tx", tp_dpdk); + ofh_tracer << trace_event("ofh_ether_tx", tp_ether); // Clear sent buffers. pool->clear_sent_frame_buffers(interval_cp_dl); From fe3ef2e6b05a9b8593a8a77c5dfb24ecf3c7d244 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Tue, 28 May 2024 18:08:33 +0200 Subject: [PATCH 35/49] sched: draft design of scheduler slicing --- include/srsran/ran/rrm.h | 8 +- include/srsran/ran/s_nssai.h | 2 + .../scheduler/config/cell_rrm_policy_config.h | 28 +++ .../scheduler/config/logical_channel_config.h | 2 + .../srsran/scheduler/scheduler_configurator.h | 4 + lib/scheduler/CMakeLists.txt | 2 + lib/scheduler/config/cell_configuration.cpp | 1 + lib/scheduler/config/cell_configuration.h | 3 + lib/scheduler/slicing/ran_slice_candidate.h | 82 +++++++++ lib/scheduler/slicing/ran_slice_id.h | 22 +++ lib/scheduler/slicing/ran_slice_instance.cpp | 52 ++++++ lib/scheduler/slicing/ran_slice_instance.h | 91 ++++++++++ lib/scheduler/slicing/slice_scheduler.cpp | 164 ++++++++++++++++++ lib/scheduler/slicing/slice_scheduler.h | 67 +++++++ 14 files changed, 527 insertions(+), 1 deletion(-) create mode 100644 include/srsran/scheduler/config/cell_rrm_policy_config.h create mode 100644 lib/scheduler/slicing/ran_slice_candidate.h create mode 100644 lib/scheduler/slicing/ran_slice_id.h create mode 100644 lib/scheduler/slicing/ran_slice_instance.cpp create mode 100644 lib/scheduler/slicing/ran_slice_instance.h create mode 100644 lib/scheduler/slicing/slice_scheduler.cpp create mode 100644 lib/scheduler/slicing/slice_scheduler.h diff --git a/include/srsran/ran/rrm.h b/include/srsran/ran/rrm.h index d6d7ce1fe1..3df72356a5 100644 --- a/include/srsran/ran/rrm.h +++ b/include/srsran/ran/rrm.h @@ -14,10 +14,16 @@ namespace srsran { -/// O-RAN.WG3.E2SM-RC-R003-v3.00 Section 8.4.3.6 +/// Identifier of a RRM Policy Member. +/// \remark See O-RAN.WG3.E2SM-RC-R003-v3.00 Section 8.4.3.6 struct rrm_policy_member { std::string plmn_id; s_nssai_t s_nssai; + + bool operator==(const rrm_policy_member& other) const + { + return plmn_id == other.plmn_id and s_nssai == other.s_nssai; + } }; struct rrm_policy_ratio_group { diff --git a/include/srsran/ran/s_nssai.h b/include/srsran/ran/s_nssai.h index 4fd6fbb9c8..3f48b5a854 100644 --- a/include/srsran/ran/s_nssai.h +++ b/include/srsran/ran/s_nssai.h @@ -20,6 +20,8 @@ struct s_nssai_t { uint8_t sst = 0; /// Slice Differentiator (max 24bits). std::optional sd; + + bool operator==(const s_nssai_t& other) const { return sst == other.sst && sd == other.sd; } }; } // namespace srsran diff --git a/include/srsran/scheduler/config/cell_rrm_policy_config.h b/include/srsran/scheduler/config/cell_rrm_policy_config.h new file mode 100644 index 0000000000..f0f93b4ab0 --- /dev/null +++ b/include/srsran/scheduler/config/cell_rrm_policy_config.h @@ -0,0 +1,28 @@ +/* + * + * Copyright 2021-2024 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#pragma once + +#include "srsran/ran/rrm.h" +#include "srsran/ran/s_nssai.h" + +namespace srsran { + +/// Cell-specific Default RAN slice configuration. +struct cell_rrm_policy_config { + /// RRM Policy identifier. + rrm_policy_member rrc_member; + /// Sets the minimum percentage of PRBs to be allocated to this group. + unsigned min_prb_ratio = 0; + /// Sets the maximum percentage of PRBs to be allocated to this group. + unsigned max_prb_ratio = MAX_NOF_PRBS; +}; + +} // namespace srsran \ No newline at end of file diff --git a/include/srsran/scheduler/config/logical_channel_config.h b/include/srsran/scheduler/config/logical_channel_config.h index 775fe2c0ad..edc4937c23 100644 --- a/include/srsran/scheduler/config/logical_channel_config.h +++ b/include/srsran/scheduler/config/logical_channel_config.h @@ -12,6 +12,7 @@ #include "srsran/adt/optional.h" #include "srsran/ran/lcid.h" +#include "srsran/ran/rrm.h" #include "srsran/ran/sr_configuration.h" #include "srsran/scheduler/config/logical_channel_group.h" @@ -26,6 +27,7 @@ struct logical_channel_config { std::optional sr_id; bool lc_sr_mask; bool lc_sr_delay_timer_applied; + rrm_policy_member rrm_policy; }; } // namespace srsran diff --git a/include/srsran/scheduler/scheduler_configurator.h b/include/srsran/scheduler/scheduler_configurator.h index ebb6c5723d..082a212421 100644 --- a/include/srsran/scheduler/scheduler_configurator.h +++ b/include/srsran/scheduler/scheduler_configurator.h @@ -31,6 +31,7 @@ #include "srsran/ran/subcarrier_spacing.h" #include "srsran/ran/tdd/tdd_ul_dl_config.h" #include "srsran/scheduler/config/bwp_configuration.h" +#include "srsran/scheduler/config/cell_rrm_policy_config.h" #include "srsran/scheduler/config/dmrs.h" #include "srsran/scheduler/config/logical_channel_config.h" #include "srsran/scheduler/config/serving_cell_config.h" @@ -91,6 +92,9 @@ struct sched_cell_configuration_request_message { /// List of nzp-CSI-RS resources common to all UEs. std::vector nzp_csi_rs_res_list; + /// List of RAN slices to support in the scheduler. + std::vector rrm_policy_members; + unsigned ntn_cs_koffset = 0; }; diff --git a/lib/scheduler/CMakeLists.txt b/lib/scheduler/CMakeLists.txt index 7403266254..e39265f6b6 100644 --- a/lib/scheduler/CMakeLists.txt +++ b/lib/scheduler/CMakeLists.txt @@ -37,6 +37,8 @@ set(SOURCES support/dci_builder.cpp support/sch_pdu_builder.cpp support/csi_report_helpers.cpp + slicing/slice_scheduler.cpp + slicing/ran_slice_instance.cpp cell_scheduler.cpp scheduler_factory.cpp scheduler_impl.cpp diff --git a/lib/scheduler/config/cell_configuration.cpp b/lib/scheduler/config/cell_configuration.cpp index d30709c22a..8a9c92ce6c 100644 --- a/lib/scheduler/config/cell_configuration.cpp +++ b/lib/scheduler/config/cell_configuration.cpp @@ -36,6 +36,7 @@ cell_configuration::cell_configuration(const scheduler_expert_config& pucch_guardbands(msg.pucch_guardbands), zp_csi_rs_list(msg.zp_csi_rs_list), nzp_csi_rs_list(msg.nzp_csi_rs_res_list), + rrm_policy_members(msg.rrm_policy_members), // SSB derived params. ssb_case(band_helper::get_ssb_pattern(msg.dl_carrier.band, msg.ssb_config.scs)), paired_spectrum(band_helper::is_paired_spectrum(msg.dl_carrier.band)), diff --git a/lib/scheduler/config/cell_configuration.h b/lib/scheduler/config/cell_configuration.h index 3cc7fab624..a06bcbec51 100644 --- a/lib/scheduler/config/cell_configuration.h +++ b/lib/scheduler/config/cell_configuration.h @@ -55,6 +55,9 @@ class cell_configuration /// List of nzp-CSI-RS resources. std::vector nzp_csi_rs_list; + /// List of RRM Policy members configured for this cell. + std::vector rrm_policy_members; + // Derived Parameters. ssb_pattern_case ssb_case; bool paired_spectrum; diff --git a/lib/scheduler/slicing/ran_slice_candidate.h b/lib/scheduler/slicing/ran_slice_candidate.h new file mode 100644 index 0000000000..a4988e8df1 --- /dev/null +++ b/lib/scheduler/slicing/ran_slice_candidate.h @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021-2024 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#pragma once + +#include "ran_slice_instance.h" + +namespace srsran { +namespace detail { + +template +class common_ran_slice_candidate +{ + struct candidate_deleter { + void operator()(ran_slice_instance* p) + { + if (p != nullptr) { + if constexpr (IsDl) { + p->pdsch_completed(); + } else { + p->pusch_completed(); + } + } + } + }; + +public: + common_ran_slice_candidate(ran_slice_instance* instance_) : inst(instance_, candidate_deleter{}) {} + + ran_slice_id_t id() const { return inst->id; } + [[nodiscard]] const cell_rrm_policy_config& cfg() const { return inst->cfg; } + scheduler_policy& policy() { return *inst->policy; } + + bool is_candidate(du_ue_index_t ue_idx) const { return inst->is_candidate(ue_idx); } + bool is_candidate(du_ue_index_t ue_idx, lcid_t lcid) const { return inst->is_candidate(ue_idx, lcid); } + + /// Signal that the allocations for this slice are complete. + void clear() { inst.reset(); } + + /// Register that a new grant was allocated for a given UE. + void store_grant(unsigned nof_rbs) + { + if constexpr (IsDl) { + inst->store_pdsch_grant(nof_rbs); + } else { + inst->store_pusch_grant(nof_rbs); + } + } + + /// Remaining bytes to allocate for the given slice. + [[nodiscard]] unsigned remaining_rbs() const + { + if constexpr (IsDl) { + return inst->cfg.max_prb_ratio < inst->pdsch_rb_count ? 0 : inst->cfg.max_prb_ratio - inst->pdsch_rb_count; + } + return inst->cfg.max_prb_ratio < inst->pusch_rb_count ? 0 : inst->cfg.max_prb_ratio - inst->pusch_rb_count; + } + +protected: + std::unique_ptr inst; +}; + +} // namespace detail + +/// \brief Handle to fetch and update the state of a RAN slice in a given DL slot. +/// +/// On destruction, the slice is marked as completed for the current slot and won't be considered as a candidate again. +using dl_ran_slice_candidate = detail::common_ran_slice_candidate; + +/// Interface to fetch and update the state of a RAN slice in a given UL slot. +/// +/// On destruction, the slice is marked as completed for the current slot and won't be considered as a candidate again. +using ul_ran_slice_candidate = detail::common_ran_slice_candidate; + +} // namespace srsran \ No newline at end of file diff --git a/lib/scheduler/slicing/ran_slice_id.h b/lib/scheduler/slicing/ran_slice_id.h new file mode 100644 index 0000000000..0c9b21b797 --- /dev/null +++ b/lib/scheduler/slicing/ran_slice_id.h @@ -0,0 +1,22 @@ +/* + * + * Copyright 2021-2024 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#pragma once + +#include "srsran/adt/strong_type.h" +#include + +namespace srsran { + +/// RAN slice identifier that should be unique for a given cell,PLMN,S-NSSAI. +struct ran_slice_id_tag {}; +using ran_slice_id_t = strong_type; + +} // namespace srsran \ No newline at end of file diff --git a/lib/scheduler/slicing/ran_slice_instance.cpp b/lib/scheduler/slicing/ran_slice_instance.cpp new file mode 100644 index 0000000000..c264602e52 --- /dev/null +++ b/lib/scheduler/slicing/ran_slice_instance.cpp @@ -0,0 +1,52 @@ +/* + * + * Copyright 2021-2024 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#include "ran_slice_instance.h" + +using namespace srsran; + +ran_slice_instance::ran_slice_instance(ran_slice_id_t id_, + const cell_configuration& cell_cfg_, + const cell_rrm_policy_config& cfg_) : + id(id_), cell_cfg(&cell_cfg_), cfg(cfg_) +{ +} + +void ran_slice_instance::slot_indication() +{ + pdsch_rb_count = 0; + pusch_rb_count = 0; + pdsch_stopped = false; + pusch_stopped = false; +} + +void ran_slice_instance::set_logical_channel(du_ue_index_t ue_idx, lcid_t lcid) +{ + if (not bearers.contains(ue_idx)) { + bearers.emplace(ue_idx, MAX_NOF_RB_LCIDS); + } + bearers[ue_idx].set(lcid); +} + +void ran_slice_instance::rem_logical_channel(du_ue_index_t ue_idx, lcid_t lcid) +{ + if (not bearers.contains(ue_idx)) { + return; + } + bearers[ue_idx].reset(lcid); + if (bearers[ue_idx].none()) { + bearers.erase(ue_idx); + } +} + +void ran_slice_instance::rem_ue(du_ue_index_t ue_idx) +{ + bearers.erase(ue_idx); +} diff --git a/lib/scheduler/slicing/ran_slice_instance.h b/lib/scheduler/slicing/ran_slice_instance.h new file mode 100644 index 0000000000..8018aa9f46 --- /dev/null +++ b/lib/scheduler/slicing/ran_slice_instance.h @@ -0,0 +1,91 @@ +/* + * + * Copyright 2021-2024 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#pragma once + +#include "../config/cell_configuration.h" +#include "../policy/scheduler_policy.h" +#include "ran_slice_id.h" +#include "srsran/scheduler/config/cell_rrm_policy_config.h" + +namespace srsran { + +/// This class stores all the internal information relative to a RAN slice instantiation. +class ran_slice_instance +{ +public: + constexpr static int skip_slice_prio = std::numeric_limits::min(); + + ran_slice_instance(ran_slice_id_t id_, const cell_configuration& cell_cfg_, const cell_rrm_policy_config& cfg_); + + void slot_indication(); + + bool active() const { return not bearers.empty(); } + + int get_dl_prio() + { + if (not active() or pdsch_stopped or cfg.max_prb_ratio <= pdsch_rb_count) { + return skip_slice_prio; + } + return cfg.min_prb_ratio > pdsch_rb_count ? cfg.min_prb_ratio - pdsch_rb_count : 0; + } + + int get_ul_prio() + { + if (not active() or pusch_stopped or cfg.max_prb_ratio <= pusch_rb_count) { + return skip_slice_prio; + } + return cfg.min_prb_ratio > pusch_rb_count ? cfg.min_prb_ratio - pusch_rb_count : 0; + } + + /// Save PDSCH grant. + void store_pdsch_grant(unsigned crbs) { pdsch_rb_count += crbs; } + + /// Save PUSCH grant. + void store_pusch_grant(unsigned crbs) { pusch_rb_count += crbs; } + + /// Mark the allocation of PDSCH for this slice and the current slot as complete. + void pdsch_completed() { pdsch_stopped = true; } + + /// Mark the allocation of PUSCH for this slice and the current slot as complete. + void pusch_completed() { pusch_stopped = true; } + + /// Determine if a UE is a candidate for this slice. + bool is_candidate(du_ue_index_t ue_idx) const { return bearers.contains(ue_idx); } + + /// Determine if a (UE, LCID) is a candidate for this slice. + bool is_candidate(du_ue_index_t ue_idx, lcid_t lcid) const + { + return is_candidate(ue_idx) and bearers[ue_idx].test(lcid); + } + + void set_logical_channel(du_ue_index_t ue_idx, lcid_t lcid); + void rem_logical_channel(du_ue_index_t ue_idx, lcid_t lcid); + void rem_ue(du_ue_index_t ue_idx); + + ran_slice_id_t id; + const cell_configuration* cell_cfg; + cell_rrm_policy_config cfg; + + std::unique_ptr policy; + + slotted_id_table, MAX_NOF_DU_UES> bearers; + + /// Counter of how many RBs have been scheduled for PDSCH in the current slot for this slice. + unsigned pdsch_rb_count = 0; + /// Counter of how many RBs have been scheduled for PUSCH in the current slot for this slice. + unsigned pusch_rb_count = 0; + +private: + bool pdsch_stopped = false; + bool pusch_stopped = false; +}; + +} // namespace srsran \ No newline at end of file diff --git a/lib/scheduler/slicing/slice_scheduler.cpp b/lib/scheduler/slicing/slice_scheduler.cpp new file mode 100644 index 0000000000..c1c5a12932 --- /dev/null +++ b/lib/scheduler/slicing/slice_scheduler.cpp @@ -0,0 +1,164 @@ +/* + * + * Copyright 2021-2024 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#include "slice_scheduler.h" +#include "../policy/scheduler_policy_factory.h" +#include "srsran/srslog/srslog.h" + +using namespace srsran; + +slice_scheduler::slice_scheduler(const cell_configuration& cell_cfg_, const cell_rrm_policy_config& cfg_) : + cell_cfg(cell_cfg_), logger(srslog::fetch_basic_logger("SCHED")) +{ + // Create a number of slices equal to the number of configured RRM Policy members + 1 (default slice). + slices.reserve(cell_cfg.rrm_policy_members.size() + 1); + sorted_dl_prios.reserve(cell_cfg.rrm_policy_members.size() + 1); + sorted_ul_prios.reserve(cell_cfg.rrm_policy_members.size() + 1); + + // Create RAN slice instances. + ran_slice_id_t id_count{0}; + // Default slice. + slices.emplace_back(id_count, cell_cfg, cell_rrm_policy_config{}); + slices.back().policy = + create_scheduler_strategy(scheduler_strategy_params{"time_rr", &logger}, cell_cfg.expert_cfg.ue); + sorted_dl_prios.emplace_back(id_count); + sorted_ul_prios.emplace_back(id_count); + // Configured RRM policy members + for (const cell_rrm_policy_config& rrm : cell_cfg.rrm_policy_members) { + slices.emplace_back(id_count, cell_cfg, rrm); + slices.back().policy = + create_scheduler_strategy(scheduler_strategy_params{"time_rr", &logger}, cell_cfg.expert_cfg.ue); + sorted_dl_prios.emplace_back(id_count); + sorted_ul_prios.emplace_back(id_count); + ++id_count; + } +} + +void slice_scheduler::slot_indication() +{ + for (auto& slice : slices) { + slice.slot_indication(); + } + + // Compute slice priorities. + for (slice_prio_context& ctx : sorted_dl_prios) { + ctx.prio = slices[ctx.id.value()].get_dl_prio(); + } + for (slice_prio_context& ctx : sorted_ul_prios) { + ctx.prio = slices[ctx.id.value()].get_ul_prio(); + } + + // Sort slices by descending priority. + std::sort(sorted_dl_prios.begin(), sorted_dl_prios.end(), std::greater<>{}); + std::sort(sorted_ul_prios.begin(), sorted_ul_prios.end(), std::greater<>{}); +} + +void slice_scheduler::add_ue(du_ue_index_t ue_idx, const ue_configuration& ue_cfg) +{ + for (const logical_channel_config& lc_cfg : ue_cfg.logical_channels()) { + ran_slice_instance& sl_inst = get_slice(lc_cfg.rrm_policy); + sl_inst.set_logical_channel(ue_idx, lc_cfg.lcid); + } +} + +void slice_scheduler::reconf_ue(du_ue_index_t ue_idx, + const ue_configuration& next_ue_cfg, + const ue_configuration& prev_ue_cfg) +{ + // Remove old bearers. + for (const logical_channel_config& lc_cfg : prev_ue_cfg.logical_channels()) { + ran_slice_instance& sl_inst = get_slice(lc_cfg.rrm_policy); + sl_inst.rem_logical_channel(ue_idx, lc_cfg.lcid); + } + + // Add new bearers. + for (const logical_channel_config& lc_cfg : next_ue_cfg.logical_channels()) { + ran_slice_instance& sl_inst = get_slice(lc_cfg.rrm_policy); + sl_inst.set_logical_channel(ue_idx, lc_cfg.lcid); + } +} + +void slice_scheduler::rem_ue(du_ue_index_t ue_idx) +{ + for (auto& slice : slices) { + slice.rem_ue(ue_idx); + } +} + +ran_slice_instance& slice_scheduler::get_slice(const rrm_policy_member& rrm) +{ + auto it = std::find_if( + slices.begin(), slices.end(), [&rrm](const ran_slice_instance& slice) { return slice.cfg.rrc_member == rrm; }); + if (it == slices.end()) { + // Slice with the provided RRM policy member was not found. Return default slice. + return slices.front(); + } + return *it; +} + +dl_ran_slice_candidate slice_scheduler::get_next_dl_candidate() +{ + if (slices.size() == 1) { + return create_dl_candidate(); + } + + // Check if the slice priority hasn't changed. If it did, recompute priorities. + slice_prio_context* slice_prio_ctxt = &sorted_dl_prios.front(); + // Recompute priority + int prio = slices[slice_prio_ctxt->id.value()].get_dl_prio(); + if (prio != slice_prio_ctxt->prio) { + // Priority changed + slice_prio_ctxt->prio = prio; + // Check if sort needs to be called again + // Note: This assumes that only the previous front slice was used in scheduling. + if (prio < sorted_dl_prios[1].prio) { + // Slices need to be reordered. + std::sort(sorted_dl_prios.begin(), sorted_dl_prios.end(), std::greater<>{}); + } + } + + return create_dl_candidate(); +} + +ul_ran_slice_candidate slice_scheduler::get_next_ul_candidate() +{ + if (slices.size() == 1) { + return create_ul_candidate(); + } + + // Check if the slice priority hasn't changed. If it did, recompute priorities. + slice_prio_context* slice_prio_ctxt = &sorted_ul_prios.front(); + // Recompute priority + int prio = slices[slice_prio_ctxt->id.value()].get_ul_prio(); + if (prio != slice_prio_ctxt->prio) { + // Priority changed + slice_prio_ctxt->prio = prio; + // Check if sort needs to be called again + // Note: This assumes that only the previous front slice was used in scheduling. + if (prio < sorted_ul_prios[1].prio) { + // Slices need to be reordered. + std::sort(sorted_ul_prios.begin(), sorted_ul_prios.end(), std::greater<>{}); + } + } + + return create_ul_candidate(); +} + +dl_ran_slice_candidate slice_scheduler::create_dl_candidate() +{ + bool has_candidates = sorted_dl_prios[0].prio != ran_slice_instance::skip_slice_prio; + return dl_ran_slice_candidate{has_candidates ? &slices[sorted_dl_prios[0].id.value()] : nullptr}; +} + +ul_ran_slice_candidate slice_scheduler::create_ul_candidate() +{ + bool has_candidates = sorted_ul_prios[0].prio != ran_slice_instance::skip_slice_prio; + return ul_ran_slice_candidate{has_candidates ? &slices[sorted_ul_prios[0].id.value()] : nullptr}; +} diff --git a/lib/scheduler/slicing/slice_scheduler.h b/lib/scheduler/slicing/slice_scheduler.h new file mode 100644 index 0000000000..6d9a76c14b --- /dev/null +++ b/lib/scheduler/slicing/slice_scheduler.h @@ -0,0 +1,67 @@ +/* + * + * Copyright 2021-2024 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#pragma once + +#include "../policy/scheduler_policy.h" +#include "ran_slice_candidate.h" +#include "ran_slice_instance.h" + +namespace srsran { + +/// Inter-slice Scheduler. +class slice_scheduler +{ +public: + slice_scheduler(const cell_configuration& cell_cfg_, const cell_rrm_policy_config& cfg_); + + /// Reset the state of the slices. + void slot_indication(); + + /// Update the state of the slice with the provided UE configs. + void add_ue(du_ue_index_t ue_idx, const ue_configuration& ue_cfg); + void reconf_ue(du_ue_index_t ue_idx, const ue_configuration& next_ue_cfg, const ue_configuration& prev_ue_cfg); + void rem_ue(du_ue_index_t ue_idx); + + /// Get next RAN slice for PDSCH scheduling. + dl_ran_slice_candidate get_next_dl_candidate(); + + /// Get next RAN slice for PUSCH scheduling. + ul_ran_slice_candidate get_next_ul_candidate(); + +private: + struct slice_prio_context { + ran_slice_id_t id; + // Cached values. + int prio = 0; + + slice_prio_context(ran_slice_id_t id_) : id(id_) {} + + /// Compares priorities between two slice contexts. + bool operator<(const slice_prio_context& rhs) const { return prio < rhs.prio; } + bool operator>(const slice_prio_context& rhs) const { return prio > rhs.prio; } + }; + + ran_slice_instance& get_slice(const rrm_policy_member& rrm); + + dl_ran_slice_candidate create_dl_candidate(); + ul_ran_slice_candidate create_ul_candidate(); + + const cell_configuration& cell_cfg; + srslog::basic_logger& logger; + + std::vector slices; + + /// List of slice IDs sorted by priority. + std::vector sorted_dl_prios; + std::vector sorted_ul_prios; +}; + +} // namespace srsran \ No newline at end of file From 862d963d52501f9d00605c0c2a1cba6881a6db2c Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Fri, 14 Jun 2024 14:04:16 +0200 Subject: [PATCH 36/49] sched: rename prb ratios parameters for slicing --- ...m_policy_config.h => slice_rrm_policy_config.h} | 10 +++++----- include/srsran/scheduler/scheduler_configurator.h | 4 ++-- lib/scheduler/config/cell_configuration.h | 2 +- lib/scheduler/slicing/ran_slice_candidate.h | 10 +++++----- lib/scheduler/slicing/ran_slice_instance.cpp | 6 +++--- lib/scheduler/slicing/ran_slice_instance.h | 14 +++++++------- lib/scheduler/slicing/slice_scheduler.cpp | 6 +++--- lib/scheduler/slicing/slice_scheduler.h | 2 +- 8 files changed, 27 insertions(+), 27 deletions(-) rename include/srsran/scheduler/config/{cell_rrm_policy_config.h => slice_rrm_policy_config.h} (64%) diff --git a/include/srsran/scheduler/config/cell_rrm_policy_config.h b/include/srsran/scheduler/config/slice_rrm_policy_config.h similarity index 64% rename from include/srsran/scheduler/config/cell_rrm_policy_config.h rename to include/srsran/scheduler/config/slice_rrm_policy_config.h index f0f93b4ab0..9cce2490cf 100644 --- a/include/srsran/scheduler/config/cell_rrm_policy_config.h +++ b/include/srsran/scheduler/config/slice_rrm_policy_config.h @@ -16,13 +16,13 @@ namespace srsran { /// Cell-specific Default RAN slice configuration. -struct cell_rrm_policy_config { +struct slice_rrm_policy_config { /// RRM Policy identifier. rrm_policy_member rrc_member; - /// Sets the minimum percentage of PRBs to be allocated to this group. - unsigned min_prb_ratio = 0; - /// Sets the maximum percentage of PRBs to be allocated to this group. - unsigned max_prb_ratio = MAX_NOF_PRBS; + /// Sets the minimum number of PRBs to be allocated to this group. + unsigned min_prb = 0; + /// Sets the maximum number of PRBs to be allocated to this group. + unsigned max_prb = MAX_NOF_PRBS; }; } // namespace srsran \ No newline at end of file diff --git a/include/srsran/scheduler/scheduler_configurator.h b/include/srsran/scheduler/scheduler_configurator.h index 082a212421..b4b3be3814 100644 --- a/include/srsran/scheduler/scheduler_configurator.h +++ b/include/srsran/scheduler/scheduler_configurator.h @@ -31,11 +31,11 @@ #include "srsran/ran/subcarrier_spacing.h" #include "srsran/ran/tdd/tdd_ul_dl_config.h" #include "srsran/scheduler/config/bwp_configuration.h" -#include "srsran/scheduler/config/cell_rrm_policy_config.h" #include "srsran/scheduler/config/dmrs.h" #include "srsran/scheduler/config/logical_channel_config.h" #include "srsran/scheduler/config/serving_cell_config.h" #include "srsran/scheduler/config/si_scheduling_config.h" +#include "srsran/scheduler/config/slice_rrm_policy_config.h" #include "srsran/scheduler/scheduler_dci.h" namespace srsran { @@ -93,7 +93,7 @@ struct sched_cell_configuration_request_message { std::vector nzp_csi_rs_res_list; /// List of RAN slices to support in the scheduler. - std::vector rrm_policy_members; + std::vector rrm_policy_members; unsigned ntn_cs_koffset = 0; }; diff --git a/lib/scheduler/config/cell_configuration.h b/lib/scheduler/config/cell_configuration.h index a06bcbec51..c96b1544cf 100644 --- a/lib/scheduler/config/cell_configuration.h +++ b/lib/scheduler/config/cell_configuration.h @@ -56,7 +56,7 @@ class cell_configuration std::vector nzp_csi_rs_list; /// List of RRM Policy members configured for this cell. - std::vector rrm_policy_members; + std::vector rrm_policy_members; // Derived Parameters. ssb_pattern_case ssb_case; diff --git a/lib/scheduler/slicing/ran_slice_candidate.h b/lib/scheduler/slicing/ran_slice_candidate.h index a4988e8df1..edb0b8f4be 100644 --- a/lib/scheduler/slicing/ran_slice_candidate.h +++ b/lib/scheduler/slicing/ran_slice_candidate.h @@ -34,9 +34,9 @@ class common_ran_slice_candidate public: common_ran_slice_candidate(ran_slice_instance* instance_) : inst(instance_, candidate_deleter{}) {} - ran_slice_id_t id() const { return inst->id; } - [[nodiscard]] const cell_rrm_policy_config& cfg() const { return inst->cfg; } - scheduler_policy& policy() { return *inst->policy; } + ran_slice_id_t id() const { return inst->id; } + [[nodiscard]] const slice_rrm_policy_config& cfg() const { return inst->cfg; } + scheduler_policy& policy() { return *inst->policy; } bool is_candidate(du_ue_index_t ue_idx) const { return inst->is_candidate(ue_idx); } bool is_candidate(du_ue_index_t ue_idx, lcid_t lcid) const { return inst->is_candidate(ue_idx, lcid); } @@ -58,9 +58,9 @@ class common_ran_slice_candidate [[nodiscard]] unsigned remaining_rbs() const { if constexpr (IsDl) { - return inst->cfg.max_prb_ratio < inst->pdsch_rb_count ? 0 : inst->cfg.max_prb_ratio - inst->pdsch_rb_count; + return inst->cfg.max_prb < inst->pdsch_rb_count ? 0 : inst->cfg.max_prb - inst->pdsch_rb_count; } - return inst->cfg.max_prb_ratio < inst->pusch_rb_count ? 0 : inst->cfg.max_prb_ratio - inst->pusch_rb_count; + return inst->cfg.max_prb < inst->pusch_rb_count ? 0 : inst->cfg.max_prb - inst->pusch_rb_count; } protected: diff --git a/lib/scheduler/slicing/ran_slice_instance.cpp b/lib/scheduler/slicing/ran_slice_instance.cpp index c264602e52..f5d80c4eac 100644 --- a/lib/scheduler/slicing/ran_slice_instance.cpp +++ b/lib/scheduler/slicing/ran_slice_instance.cpp @@ -12,9 +12,9 @@ using namespace srsran; -ran_slice_instance::ran_slice_instance(ran_slice_id_t id_, - const cell_configuration& cell_cfg_, - const cell_rrm_policy_config& cfg_) : +ran_slice_instance::ran_slice_instance(ran_slice_id_t id_, + const cell_configuration& cell_cfg_, + const slice_rrm_policy_config& cfg_) : id(id_), cell_cfg(&cell_cfg_), cfg(cfg_) { } diff --git a/lib/scheduler/slicing/ran_slice_instance.h b/lib/scheduler/slicing/ran_slice_instance.h index 8018aa9f46..9df00b65ab 100644 --- a/lib/scheduler/slicing/ran_slice_instance.h +++ b/lib/scheduler/slicing/ran_slice_instance.h @@ -13,7 +13,7 @@ #include "../config/cell_configuration.h" #include "../policy/scheduler_policy.h" #include "ran_slice_id.h" -#include "srsran/scheduler/config/cell_rrm_policy_config.h" +#include "srsran/scheduler/config/slice_rrm_policy_config.h" namespace srsran { @@ -23,7 +23,7 @@ class ran_slice_instance public: constexpr static int skip_slice_prio = std::numeric_limits::min(); - ran_slice_instance(ran_slice_id_t id_, const cell_configuration& cell_cfg_, const cell_rrm_policy_config& cfg_); + ran_slice_instance(ran_slice_id_t id_, const cell_configuration& cell_cfg_, const slice_rrm_policy_config& cfg_); void slot_indication(); @@ -31,18 +31,18 @@ class ran_slice_instance int get_dl_prio() { - if (not active() or pdsch_stopped or cfg.max_prb_ratio <= pdsch_rb_count) { + if (not active() or pdsch_stopped or cfg.max_prb <= pdsch_rb_count) { return skip_slice_prio; } - return cfg.min_prb_ratio > pdsch_rb_count ? cfg.min_prb_ratio - pdsch_rb_count : 0; + return cfg.min_prb > pdsch_rb_count ? cfg.min_prb - pdsch_rb_count : 0; } int get_ul_prio() { - if (not active() or pusch_stopped or cfg.max_prb_ratio <= pusch_rb_count) { + if (not active() or pusch_stopped or cfg.max_prb <= pusch_rb_count) { return skip_slice_prio; } - return cfg.min_prb_ratio > pusch_rb_count ? cfg.min_prb_ratio - pusch_rb_count : 0; + return cfg.min_prb > pusch_rb_count ? cfg.min_prb - pusch_rb_count : 0; } /// Save PDSCH grant. @@ -72,7 +72,7 @@ class ran_slice_instance ran_slice_id_t id; const cell_configuration* cell_cfg; - cell_rrm_policy_config cfg; + slice_rrm_policy_config cfg; std::unique_ptr policy; diff --git a/lib/scheduler/slicing/slice_scheduler.cpp b/lib/scheduler/slicing/slice_scheduler.cpp index c1c5a12932..f554b17ef3 100644 --- a/lib/scheduler/slicing/slice_scheduler.cpp +++ b/lib/scheduler/slicing/slice_scheduler.cpp @@ -14,7 +14,7 @@ using namespace srsran; -slice_scheduler::slice_scheduler(const cell_configuration& cell_cfg_, const cell_rrm_policy_config& cfg_) : +slice_scheduler::slice_scheduler(const cell_configuration& cell_cfg_, const slice_rrm_policy_config& cfg_) : cell_cfg(cell_cfg_), logger(srslog::fetch_basic_logger("SCHED")) { // Create a number of slices equal to the number of configured RRM Policy members + 1 (default slice). @@ -25,13 +25,13 @@ slice_scheduler::slice_scheduler(const cell_configuration& cell_cfg_, const cell // Create RAN slice instances. ran_slice_id_t id_count{0}; // Default slice. - slices.emplace_back(id_count, cell_cfg, cell_rrm_policy_config{}); + slices.emplace_back(id_count, cell_cfg, slice_rrm_policy_config{}); slices.back().policy = create_scheduler_strategy(scheduler_strategy_params{"time_rr", &logger}, cell_cfg.expert_cfg.ue); sorted_dl_prios.emplace_back(id_count); sorted_ul_prios.emplace_back(id_count); // Configured RRM policy members - for (const cell_rrm_policy_config& rrm : cell_cfg.rrm_policy_members) { + for (const slice_rrm_policy_config& rrm : cell_cfg.rrm_policy_members) { slices.emplace_back(id_count, cell_cfg, rrm); slices.back().policy = create_scheduler_strategy(scheduler_strategy_params{"time_rr", &logger}, cell_cfg.expert_cfg.ue); diff --git a/lib/scheduler/slicing/slice_scheduler.h b/lib/scheduler/slicing/slice_scheduler.h index 6d9a76c14b..b4c9646e1f 100644 --- a/lib/scheduler/slicing/slice_scheduler.h +++ b/lib/scheduler/slicing/slice_scheduler.h @@ -20,7 +20,7 @@ namespace srsran { class slice_scheduler { public: - slice_scheduler(const cell_configuration& cell_cfg_, const cell_rrm_policy_config& cfg_); + slice_scheduler(const cell_configuration& cell_cfg_, const slice_rrm_policy_config& cfg_); /// Reset the state of the slices. void slot_indication(); From 9bc62a30e4fbdea81066c422002f9fb592ded27c Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Fri, 14 Jun 2024 15:25:47 +0200 Subject: [PATCH 37/49] sched: improve of documentation of slice scheduler and fix minor bugs --- .../config/slice_rrm_policy_config.h | 2 +- lib/scheduler/slicing/ran_slice_candidate.h | 5 ++-- lib/scheduler/slicing/ran_slice_id.h | 2 +- lib/scheduler/slicing/ran_slice_instance.cpp | 2 +- lib/scheduler/slicing/ran_slice_instance.h | 27 ++++++++++--------- lib/scheduler/slicing/slice_scheduler.cpp | 5 ++-- lib/scheduler/slicing/slice_scheduler.h | 2 +- 7 files changed, 25 insertions(+), 20 deletions(-) diff --git a/include/srsran/scheduler/config/slice_rrm_policy_config.h b/include/srsran/scheduler/config/slice_rrm_policy_config.h index 9cce2490cf..e326a157c7 100644 --- a/include/srsran/scheduler/config/slice_rrm_policy_config.h +++ b/include/srsran/scheduler/config/slice_rrm_policy_config.h @@ -25,4 +25,4 @@ struct slice_rrm_policy_config { unsigned max_prb = MAX_NOF_PRBS; }; -} // namespace srsran \ No newline at end of file +} // namespace srsran diff --git a/lib/scheduler/slicing/ran_slice_candidate.h b/lib/scheduler/slicing/ran_slice_candidate.h index edb0b8f4be..0746764f71 100644 --- a/lib/scheduler/slicing/ran_slice_candidate.h +++ b/lib/scheduler/slicing/ran_slice_candidate.h @@ -15,6 +15,7 @@ namespace srsran { namespace detail { +/// \brief RAN slice that is the next candidate for allocation in a given slot and cell. template class common_ran_slice_candidate { @@ -38,8 +39,8 @@ class common_ran_slice_candidate [[nodiscard]] const slice_rrm_policy_config& cfg() const { return inst->cfg; } scheduler_policy& policy() { return *inst->policy; } - bool is_candidate(du_ue_index_t ue_idx) const { return inst->is_candidate(ue_idx); } - bool is_candidate(du_ue_index_t ue_idx, lcid_t lcid) const { return inst->is_candidate(ue_idx, lcid); } + bool is_candidate(du_ue_index_t ue_idx) const { return inst->contains(ue_idx); } + bool is_candidate(du_ue_index_t ue_idx, lcid_t lcid) const { return inst->contains(ue_idx, lcid); } /// Signal that the allocations for this slice are complete. void clear() { inst.reset(); } diff --git a/lib/scheduler/slicing/ran_slice_id.h b/lib/scheduler/slicing/ran_slice_id.h index 0c9b21b797..9ded24a622 100644 --- a/lib/scheduler/slicing/ran_slice_id.h +++ b/lib/scheduler/slicing/ran_slice_id.h @@ -19,4 +19,4 @@ namespace srsran { struct ran_slice_id_tag {}; using ran_slice_id_t = strong_type; -} // namespace srsran \ No newline at end of file +} // namespace srsran diff --git a/lib/scheduler/slicing/ran_slice_instance.cpp b/lib/scheduler/slicing/ran_slice_instance.cpp index f5d80c4eac..bc38b3b274 100644 --- a/lib/scheduler/slicing/ran_slice_instance.cpp +++ b/lib/scheduler/slicing/ran_slice_instance.cpp @@ -27,7 +27,7 @@ void ran_slice_instance::slot_indication() pusch_stopped = false; } -void ran_slice_instance::set_logical_channel(du_ue_index_t ue_idx, lcid_t lcid) +void ran_slice_instance::add_logical_channel(du_ue_index_t ue_idx, lcid_t lcid) { if (not bearers.contains(ue_idx)) { bearers.emplace(ue_idx, MAX_NOF_RB_LCIDS); diff --git a/lib/scheduler/slicing/ran_slice_instance.h b/lib/scheduler/slicing/ran_slice_instance.h index 9df00b65ab..ab9b7ac8d5 100644 --- a/lib/scheduler/slicing/ran_slice_instance.h +++ b/lib/scheduler/slicing/ran_slice_instance.h @@ -21,7 +21,8 @@ namespace srsran { class ran_slice_instance { public: - constexpr static int skip_slice_prio = std::numeric_limits::min(); + constexpr static int skip_slice_prio = std::numeric_limits::min(); + constexpr static int default_slice_prio = 0; ran_slice_instance(ran_slice_id_t id_, const cell_configuration& cell_cfg_, const slice_rrm_policy_config& cfg_); @@ -34,7 +35,7 @@ class ran_slice_instance if (not active() or pdsch_stopped or cfg.max_prb <= pdsch_rb_count) { return skip_slice_prio; } - return cfg.min_prb > pdsch_rb_count ? cfg.min_prb - pdsch_rb_count : 0; + return cfg.min_prb > pdsch_rb_count ? cfg.min_prb - pdsch_rb_count : default_slice_prio; } int get_ul_prio() @@ -42,7 +43,7 @@ class ran_slice_instance if (not active() or pusch_stopped or cfg.max_prb <= pusch_rb_count) { return skip_slice_prio; } - return cfg.min_prb > pusch_rb_count ? cfg.min_prb - pusch_rb_count : 0; + return cfg.min_prb > pusch_rb_count ? cfg.min_prb - pusch_rb_count : default_slice_prio; } /// Save PDSCH grant. @@ -57,17 +58,19 @@ class ran_slice_instance /// Mark the allocation of PUSCH for this slice and the current slot as complete. void pusch_completed() { pusch_stopped = true; } - /// Determine if a UE is a candidate for this slice. - bool is_candidate(du_ue_index_t ue_idx) const { return bearers.contains(ue_idx); } + /// Determine if at least one bearer of the given UE is currently managed by this slice. + bool contains(du_ue_index_t ue_idx) const { return bearers.contains(ue_idx); } - /// Determine if a (UE, LCID) is a candidate for this slice. - bool is_candidate(du_ue_index_t ue_idx, lcid_t lcid) const - { - return is_candidate(ue_idx) and bearers[ue_idx].test(lcid); - } + /// Determine if a (UE, LCID) tuple are managed by this slice. + bool contains(du_ue_index_t ue_idx, lcid_t lcid) const { return contains(ue_idx) and bearers[ue_idx].test(lcid); } - void set_logical_channel(du_ue_index_t ue_idx, lcid_t lcid); + /// Add a new (UE, LCID) to the list of bearers managed by this slice. + void add_logical_channel(du_ue_index_t ue_idx, lcid_t lcid); + + /// Remove a (UE, LCID) from the list of bearers managed by this slice. void rem_logical_channel(du_ue_index_t ue_idx, lcid_t lcid); + + /// Remove a UE and all associated LCIDs from the list of bearers managed by this slice. void rem_ue(du_ue_index_t ue_idx); ran_slice_id_t id; @@ -88,4 +91,4 @@ class ran_slice_instance bool pusch_stopped = false; }; -} // namespace srsran \ No newline at end of file +} // namespace srsran diff --git a/lib/scheduler/slicing/slice_scheduler.cpp b/lib/scheduler/slicing/slice_scheduler.cpp index f554b17ef3..a91ec871f4 100644 --- a/lib/scheduler/slicing/slice_scheduler.cpp +++ b/lib/scheduler/slicing/slice_scheduler.cpp @@ -30,6 +30,7 @@ slice_scheduler::slice_scheduler(const cell_configuration& cell_cfg_, const slic create_scheduler_strategy(scheduler_strategy_params{"time_rr", &logger}, cell_cfg.expert_cfg.ue); sorted_dl_prios.emplace_back(id_count); sorted_ul_prios.emplace_back(id_count); + ++id_count; // Configured RRM policy members for (const slice_rrm_policy_config& rrm : cell_cfg.rrm_policy_members) { slices.emplace_back(id_count, cell_cfg, rrm); @@ -64,7 +65,7 @@ void slice_scheduler::add_ue(du_ue_index_t ue_idx, const ue_configuration& ue_cf { for (const logical_channel_config& lc_cfg : ue_cfg.logical_channels()) { ran_slice_instance& sl_inst = get_slice(lc_cfg.rrm_policy); - sl_inst.set_logical_channel(ue_idx, lc_cfg.lcid); + sl_inst.add_logical_channel(ue_idx, lc_cfg.lcid); } } @@ -81,7 +82,7 @@ void slice_scheduler::reconf_ue(du_ue_index_t ue_idx, // Add new bearers. for (const logical_channel_config& lc_cfg : next_ue_cfg.logical_channels()) { ran_slice_instance& sl_inst = get_slice(lc_cfg.rrm_policy); - sl_inst.set_logical_channel(ue_idx, lc_cfg.lcid); + sl_inst.add_logical_channel(ue_idx, lc_cfg.lcid); } } diff --git a/lib/scheduler/slicing/slice_scheduler.h b/lib/scheduler/slicing/slice_scheduler.h index b4c9646e1f..1b8ad7aa86 100644 --- a/lib/scheduler/slicing/slice_scheduler.h +++ b/lib/scheduler/slicing/slice_scheduler.h @@ -64,4 +64,4 @@ class slice_scheduler std::vector sorted_ul_prios; }; -} // namespace srsran \ No newline at end of file +} // namespace srsran From 9c104d5106a871401d1e319d6c247f3d16dfecc4 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Fri, 14 Jun 2024 18:32:45 +0200 Subject: [PATCH 38/49] sched: unit test for default slice scheduler --- lib/scheduler/slicing/ran_slice_candidate.h | 20 +-- lib/scheduler/slicing/ran_slice_id.h | 2 +- lib/scheduler/slicing/ran_slice_instance.cpp | 4 +- lib/scheduler/slicing/ran_slice_instance.h | 12 +- lib/scheduler/slicing/slice_scheduler.cpp | 38 +++--- lib/scheduler/slicing/slice_scheduler.h | 17 ++- tests/unittests/scheduler/CMakeLists.txt | 1 + .../scheduler/slicing/CMakeLists.txt | 18 +++ .../slicing/slice_scheduler_test.cpp | 129 ++++++++++++++++++ .../scheduler/test_utils/CMakeLists.txt | 7 +- .../test_utils/config_generators.cpp | 71 ++++++++++ .../scheduler/test_utils/config_generators.h | 32 +++++ 12 files changed, 300 insertions(+), 51 deletions(-) create mode 100644 tests/unittests/scheduler/slicing/CMakeLists.txt create mode 100644 tests/unittests/scheduler/slicing/slice_scheduler_test.cpp create mode 100644 tests/unittests/scheduler/test_utils/config_generators.cpp diff --git a/lib/scheduler/slicing/ran_slice_candidate.h b/lib/scheduler/slicing/ran_slice_candidate.h index 0746764f71..97aa71f71c 100644 --- a/lib/scheduler/slicing/ran_slice_candidate.h +++ b/lib/scheduler/slicing/ran_slice_candidate.h @@ -19,21 +19,8 @@ namespace detail { template class common_ran_slice_candidate { - struct candidate_deleter { - void operator()(ran_slice_instance* p) - { - if (p != nullptr) { - if constexpr (IsDl) { - p->pdsch_completed(); - } else { - p->pusch_completed(); - } - } - } - }; - public: - common_ran_slice_candidate(ran_slice_instance* instance_) : inst(instance_, candidate_deleter{}) {} + common_ran_slice_candidate(ran_slice_instance& instance_) : inst(&instance_) {} ran_slice_id_t id() const { return inst->id; } [[nodiscard]] const slice_rrm_policy_config& cfg() const { return inst->cfg; } @@ -42,9 +29,6 @@ class common_ran_slice_candidate bool is_candidate(du_ue_index_t ue_idx) const { return inst->contains(ue_idx); } bool is_candidate(du_ue_index_t ue_idx, lcid_t lcid) const { return inst->contains(ue_idx, lcid); } - /// Signal that the allocations for this slice are complete. - void clear() { inst.reset(); } - /// Register that a new grant was allocated for a given UE. void store_grant(unsigned nof_rbs) { @@ -65,7 +49,7 @@ class common_ran_slice_candidate } protected: - std::unique_ptr inst; + ran_slice_instance* inst = nullptr; }; } // namespace detail diff --git a/lib/scheduler/slicing/ran_slice_id.h b/lib/scheduler/slicing/ran_slice_id.h index 9ded24a622..0485b10294 100644 --- a/lib/scheduler/slicing/ran_slice_id.h +++ b/lib/scheduler/slicing/ran_slice_id.h @@ -17,6 +17,6 @@ namespace srsran { /// RAN slice identifier that should be unique for a given cell,PLMN,S-NSSAI. struct ran_slice_id_tag {}; -using ran_slice_id_t = strong_type; +using ran_slice_id_t = strong_type; } // namespace srsran diff --git a/lib/scheduler/slicing/ran_slice_instance.cpp b/lib/scheduler/slicing/ran_slice_instance.cpp index bc38b3b274..e5d9516d6b 100644 --- a/lib/scheduler/slicing/ran_slice_instance.cpp +++ b/lib/scheduler/slicing/ran_slice_instance.cpp @@ -23,8 +23,8 @@ void ran_slice_instance::slot_indication() { pdsch_rb_count = 0; pusch_rb_count = 0; - pdsch_stopped = false; - pusch_stopped = false; + pdsch_complete = false; + pusch_complete = false; } void ran_slice_instance::add_logical_channel(du_ue_index_t ue_idx, lcid_t lcid) diff --git a/lib/scheduler/slicing/ran_slice_instance.h b/lib/scheduler/slicing/ran_slice_instance.h index ab9b7ac8d5..27fb44c688 100644 --- a/lib/scheduler/slicing/ran_slice_instance.h +++ b/lib/scheduler/slicing/ran_slice_instance.h @@ -32,7 +32,7 @@ class ran_slice_instance int get_dl_prio() { - if (not active() or pdsch_stopped or cfg.max_prb <= pdsch_rb_count) { + if (not active() or pdsch_complete or cfg.max_prb <= pdsch_rb_count) { return skip_slice_prio; } return cfg.min_prb > pdsch_rb_count ? cfg.min_prb - pdsch_rb_count : default_slice_prio; @@ -40,7 +40,7 @@ class ran_slice_instance int get_ul_prio() { - if (not active() or pusch_stopped or cfg.max_prb <= pusch_rb_count) { + if (not active() or pusch_complete or cfg.max_prb <= pusch_rb_count) { return skip_slice_prio; } return cfg.min_prb > pusch_rb_count ? cfg.min_prb - pusch_rb_count : default_slice_prio; @@ -53,10 +53,10 @@ class ran_slice_instance void store_pusch_grant(unsigned crbs) { pusch_rb_count += crbs; } /// Mark the allocation of PDSCH for this slice and the current slot as complete. - void pdsch_completed() { pdsch_stopped = true; } + void set_pdsch_scheduled() { pdsch_complete = true; } /// Mark the allocation of PUSCH for this slice and the current slot as complete. - void pusch_completed() { pusch_stopped = true; } + void set_pusch_scheduled() { pusch_complete = true; } /// Determine if at least one bearer of the given UE is currently managed by this slice. bool contains(du_ue_index_t ue_idx) const { return bearers.contains(ue_idx); } @@ -87,8 +87,8 @@ class ran_slice_instance unsigned pusch_rb_count = 0; private: - bool pdsch_stopped = false; - bool pusch_stopped = false; + bool pdsch_complete = false; + bool pusch_complete = false; }; } // namespace srsran diff --git a/lib/scheduler/slicing/slice_scheduler.cpp b/lib/scheduler/slicing/slice_scheduler.cpp index a91ec871f4..b363682413 100644 --- a/lib/scheduler/slicing/slice_scheduler.cpp +++ b/lib/scheduler/slicing/slice_scheduler.cpp @@ -14,7 +14,7 @@ using namespace srsran; -slice_scheduler::slice_scheduler(const cell_configuration& cell_cfg_, const slice_rrm_policy_config& cfg_) : +slice_scheduler::slice_scheduler(const cell_configuration& cell_cfg_) : cell_cfg(cell_cfg_), logger(srslog::fetch_basic_logger("SCHED")) { // Create a number of slices equal to the number of configured RRM Policy members + 1 (default slice). @@ -61,28 +61,26 @@ void slice_scheduler::slot_indication() std::sort(sorted_ul_prios.begin(), sorted_ul_prios.end(), std::greater<>{}); } -void slice_scheduler::add_ue(du_ue_index_t ue_idx, const ue_configuration& ue_cfg) +void slice_scheduler::add_ue(const ue_configuration& ue_cfg) { for (const logical_channel_config& lc_cfg : ue_cfg.logical_channels()) { ran_slice_instance& sl_inst = get_slice(lc_cfg.rrm_policy); - sl_inst.add_logical_channel(ue_idx, lc_cfg.lcid); + sl_inst.add_logical_channel(ue_cfg.ue_index, lc_cfg.lcid); } } -void slice_scheduler::reconf_ue(du_ue_index_t ue_idx, - const ue_configuration& next_ue_cfg, - const ue_configuration& prev_ue_cfg) +void slice_scheduler::reconf_ue(const ue_configuration& next_ue_cfg, const ue_configuration& prev_ue_cfg) { // Remove old bearers. for (const logical_channel_config& lc_cfg : prev_ue_cfg.logical_channels()) { ran_slice_instance& sl_inst = get_slice(lc_cfg.rrm_policy); - sl_inst.rem_logical_channel(ue_idx, lc_cfg.lcid); + sl_inst.rem_logical_channel(prev_ue_cfg.ue_index, lc_cfg.lcid); } // Add new bearers. for (const logical_channel_config& lc_cfg : next_ue_cfg.logical_channels()) { ran_slice_instance& sl_inst = get_slice(lc_cfg.rrm_policy); - sl_inst.add_logical_channel(ue_idx, lc_cfg.lcid); + sl_inst.add_logical_channel(prev_ue_cfg.ue_index, lc_cfg.lcid); } } @@ -104,7 +102,7 @@ ran_slice_instance& slice_scheduler::get_slice(const rrm_policy_member& rrm) return *it; } -dl_ran_slice_candidate slice_scheduler::get_next_dl_candidate() +std::optional slice_scheduler::get_next_dl_candidate() { if (slices.size() == 1) { return create_dl_candidate(); @@ -128,7 +126,7 @@ dl_ran_slice_candidate slice_scheduler::get_next_dl_candidate() return create_dl_candidate(); } -ul_ran_slice_candidate slice_scheduler::get_next_ul_candidate() +std::optional slice_scheduler::get_next_ul_candidate() { if (slices.size() == 1) { return create_ul_candidate(); @@ -152,14 +150,22 @@ ul_ran_slice_candidate slice_scheduler::get_next_ul_candidate() return create_ul_candidate(); } -dl_ran_slice_candidate slice_scheduler::create_dl_candidate() +std::optional slice_scheduler::create_dl_candidate() { - bool has_candidates = sorted_dl_prios[0].prio != ran_slice_instance::skip_slice_prio; - return dl_ran_slice_candidate{has_candidates ? &slices[sorted_dl_prios[0].id.value()] : nullptr}; + if (sorted_dl_prios[0].prio != ran_slice_instance::skip_slice_prio) { + slices[sorted_dl_prios[0].id.value()].set_pdsch_scheduled(); + sorted_dl_prios[0].prio = ran_slice_instance::skip_slice_prio; + return dl_ran_slice_candidate{slices[sorted_dl_prios[0].id.value()]}; + } + return std::nullopt; } -ul_ran_slice_candidate slice_scheduler::create_ul_candidate() +std::optional slice_scheduler::create_ul_candidate() { - bool has_candidates = sorted_ul_prios[0].prio != ran_slice_instance::skip_slice_prio; - return ul_ran_slice_candidate{has_candidates ? &slices[sorted_ul_prios[0].id.value()] : nullptr}; + if (sorted_ul_prios[0].prio != ran_slice_instance::skip_slice_prio) { + slices[sorted_ul_prios[0].id.value()].set_pusch_scheduled(); + sorted_ul_prios[0].prio = ran_slice_instance::skip_slice_prio; + return ul_ran_slice_candidate{slices[sorted_ul_prios[0].id.value()]}; + } + return std::nullopt; } diff --git a/lib/scheduler/slicing/slice_scheduler.h b/lib/scheduler/slicing/slice_scheduler.h index 1b8ad7aa86..3e0cd1ba3f 100644 --- a/lib/scheduler/slicing/slice_scheduler.h +++ b/lib/scheduler/slicing/slice_scheduler.h @@ -20,21 +20,24 @@ namespace srsran { class slice_scheduler { public: - slice_scheduler(const cell_configuration& cell_cfg_, const slice_rrm_policy_config& cfg_); + slice_scheduler(const cell_configuration& cell_cfg_); /// Reset the state of the slices. void slot_indication(); /// Update the state of the slice with the provided UE configs. - void add_ue(du_ue_index_t ue_idx, const ue_configuration& ue_cfg); - void reconf_ue(du_ue_index_t ue_idx, const ue_configuration& next_ue_cfg, const ue_configuration& prev_ue_cfg); + void add_ue(const ue_configuration& ue_cfg); + void reconf_ue(const ue_configuration& next_ue_cfg, const ue_configuration& prev_ue_cfg); void rem_ue(du_ue_index_t ue_idx); /// Get next RAN slice for PDSCH scheduling. - dl_ran_slice_candidate get_next_dl_candidate(); + std::optional get_next_dl_candidate(); /// Get next RAN slice for PUSCH scheduling. - ul_ran_slice_candidate get_next_ul_candidate(); + std::optional get_next_ul_candidate(); + + size_t nof_slices() const { return slices.size(); } + const slice_rrm_policy_config& slice_config(ran_slice_id_t id) const { return slices[id.value()].cfg; } private: struct slice_prio_context { @@ -51,8 +54,8 @@ class slice_scheduler ran_slice_instance& get_slice(const rrm_policy_member& rrm); - dl_ran_slice_candidate create_dl_candidate(); - ul_ran_slice_candidate create_ul_candidate(); + std::optional create_dl_candidate(); + std::optional create_ul_candidate(); const cell_configuration& cell_cfg; srslog::basic_logger& logger; diff --git a/tests/unittests/scheduler/CMakeLists.txt b/tests/unittests/scheduler/CMakeLists.txt index de9f7ce25e..2e729ad5c9 100644 --- a/tests/unittests/scheduler/CMakeLists.txt +++ b/tests/unittests/scheduler/CMakeLists.txt @@ -18,6 +18,7 @@ add_subdirectory(ue_scheduling) add_subdirectory(uci_and_pucch) add_subdirectory(policy) add_subdirectory(config) +add_subdirectory(slicing) add_executable(sched_no_ue_test scheduler_no_ue_test.cpp) target_link_libraries(sched_no_ue_test srsran_sched diff --git a/tests/unittests/scheduler/slicing/CMakeLists.txt b/tests/unittests/scheduler/slicing/CMakeLists.txt new file mode 100644 index 0000000000..a51d2b6e05 --- /dev/null +++ b/tests/unittests/scheduler/slicing/CMakeLists.txt @@ -0,0 +1,18 @@ +# +# Copyright 2021-2024 Software Radio Systems Limited +# +# By using this file, you agree to the terms and conditions set +# forth in the LICENSE file which can be found at the top level of +# the distribution. +# + +add_executable(slice_scheduler_test slice_scheduler_test.cpp) +target_link_libraries(slice_scheduler_test + srsran_sched + scheduler_test_utils + scheduler_test_suite + mac_configuration_helpers + gtest + gtest_main +) +add_test(slice_scheduler_test slice_scheduler_test) diff --git a/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp b/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp new file mode 100644 index 0000000000..9aa6102186 --- /dev/null +++ b/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp @@ -0,0 +1,129 @@ +/* + * + * Copyright 2021-2024 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#include "lib/scheduler/slicing/slice_scheduler.h" +#include "tests/unittests/scheduler/test_utils/config_generators.h" +#include "srsran/srslog/srslog.h" +#include + +using namespace srsran; + +class slice_scheduler_test : public ::testing::Test +{ +protected: + slice_scheduler_test(const std::vector& rrm_policy_members = {}) : + test_cfg( + []() { + cell_config_builder_params params{}; + params.scs_common = subcarrier_spacing::kHz30; + params.channel_bw_mhz = bs_channel_bandwidth_fr1::MHz100; + params.dl_arfcn = 520000; + params.band = nr_band::n41; + return params; + }(), + scheduler_expert_config{}), + cell_cfg([this, rrm_policy_members]() -> const cell_configuration& { + auto req = test_cfg.get_default_cell_config_request(); + req.rrm_policy_members = rrm_policy_members; + return *test_cfg.add_cell(req); + }()) + { + logger.set_level(srslog::basic_levels::debug); + srslog::init(); + } + + ~slice_scheduler_test() { srslog::flush(); } + + const ue_configuration* add_ue(du_ue_index_t ue_idx) + { + auto req = test_cfg.get_default_ue_config_request(); + req.ue_index = ue_idx; + req.crnti = to_rnti(0x4601 + ue_idx); + req.starts_in_fallback = false; + const ue_configuration* ue_cfg = test_cfg.add_ue(req); + + slice_sched.add_ue(*ue_cfg); + + return ue_cfg; + } + + srslog::basic_logger& logger = srslog::fetch_basic_logger("TEST"); + test_helpers::test_sched_config_manager test_cfg; + const cell_configuration& cell_cfg; + + slice_scheduler slice_sched{cell_cfg}; +}; + +class default_slice_scheduler_test : public slice_scheduler_test +{}; + +TEST_F(default_slice_scheduler_test, if_no_rrm_policy_cfg_exists_then_only_default_slice_is_created) +{ + ASSERT_EQ(slice_sched.nof_slices(), 1); + ASSERT_EQ(slice_sched.slice_config(ran_slice_id_t{0}).min_prb, 0); + ASSERT_EQ(slice_sched.slice_config(ran_slice_id_t{0}).max_prb, MAX_NOF_PRBS); +} + +TEST_F(default_slice_scheduler_test, when_no_lcid_exists_then_default_slice_is_not_a_candidate) +{ + slice_sched.slot_indication(); + + auto next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_FALSE(next_dl_slice.has_value()); + + auto next_ul_slice = slice_sched.get_next_ul_candidate(); + ASSERT_FALSE(next_ul_slice.has_value()); +} + +TEST_F(default_slice_scheduler_test, when_lcid_is_part_of_default_slice_then_default_slice_is_valid_candidate) +{ + this->add_ue(to_du_ue_index(0)); + slice_sched.slot_indication(); + + auto next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_TRUE(next_dl_slice.has_value()); + ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{0}); + ASSERT_TRUE(next_dl_slice->is_candidate(to_du_ue_index(0))); + ASSERT_TRUE(next_dl_slice->is_candidate(to_du_ue_index(0), lcid_t::LCID_SRB1)); + + auto next_ul_slice = slice_sched.get_next_ul_candidate(); + ASSERT_TRUE(next_ul_slice.has_value()); + ASSERT_EQ(next_ul_slice->id(), ran_slice_id_t{0}); + ASSERT_TRUE(next_ul_slice->is_candidate(to_du_ue_index(0))); + ASSERT_TRUE(next_ul_slice->is_candidate(to_du_ue_index(0), lcid_t::LCID_SRB1)); +} + +TEST_F(default_slice_scheduler_test, + when_candidate_instance_goes_out_of_scope_then_it_stops_being_a_candidate_for_the_same_slot) +{ + this->add_ue(to_du_ue_index(0)); + slice_sched.slot_indication(); + + auto next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_TRUE(next_dl_slice.has_value()); + ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{0}); + + next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_FALSE(next_dl_slice.has_value()); +} + +TEST_F(default_slice_scheduler_test, when_candidate_instance_goes_out_of_scope_then_it_can_be_a_candidate_for_next_slot) +{ + this->add_ue(to_du_ue_index(0)); + + slice_sched.slot_indication(); + auto next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_TRUE(next_dl_slice.has_value()); + + slice_sched.slot_indication(); + next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_TRUE(next_dl_slice.has_value()); + ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{0}); +} diff --git a/tests/unittests/scheduler/test_utils/CMakeLists.txt b/tests/unittests/scheduler/test_utils/CMakeLists.txt index 51ad2fae35..95ad43665f 100644 --- a/tests/unittests/scheduler/test_utils/CMakeLists.txt +++ b/tests/unittests/scheduler/test_utils/CMakeLists.txt @@ -6,7 +6,12 @@ # the distribution. # -add_library(scheduler_test_suite scheduler_test_suite.cpp scheduler_output_test_helpers.cpp) +add_library(scheduler_test_utils config_generators.cpp) +target_link_libraries(scheduler_test_utils srslog mac_configuration_helpers sched_config) + +add_library(scheduler_test_suite + scheduler_test_suite.cpp + scheduler_output_test_helpers.cpp) target_link_libraries(scheduler_test_suite srsran_sched gtest) add_library(uci_test_utils ../uci_and_pucch/uci_test_utils.cpp) diff --git a/tests/unittests/scheduler/test_utils/config_generators.cpp b/tests/unittests/scheduler/test_utils/config_generators.cpp new file mode 100644 index 0000000000..8a2daababb --- /dev/null +++ b/tests/unittests/scheduler/test_utils/config_generators.cpp @@ -0,0 +1,71 @@ +/* + * + * Copyright 2021-2024 Software Radio Systems Limited + * + * By using this file, you agree to the terms and conditions set + * forth in the LICENSE file which can be found at the top level of + * the distribution. + * + */ + +#include "config_generators.h" +#include "lib/scheduler/logging/scheduler_metrics_ue_configurator.h" + +using namespace srsran; +using namespace test_helpers; + +namespace { + +class dummy_sched_configuration_notifier : public sched_configuration_notifier +{ +public: + void on_ue_config_complete(du_ue_index_t ue_index, bool ue_creation_result) override {} + void on_ue_delete_response(du_ue_index_t ue_index) override {} +}; + +class dummy_scheduler_ue_metrics_notifier : public scheduler_ue_metrics_notifier +{ +public: + void report_metrics(span ue_metrics) override {} +}; + +class dummy_sched_metrics_ue_configurator : public sched_metrics_ue_configurator +{ +public: + void handle_ue_creation(du_ue_index_t ue_index, rnti_t rnti, pci_t pcell_pci, unsigned num_prbs) override {} + void handle_ue_deletion(du_ue_index_t ue_index) override {} +}; + +} // namespace + +test_sched_config_manager::test_sched_config_manager(const cell_config_builder_params& builder_params_, + const scheduler_expert_config& expert_cfg_) : + builder_params(builder_params_), + expert_cfg(expert_cfg_), + cfg_notifier(std::make_unique()), + metric_notifier(std::make_unique()), + ue_metrics_configurator(std::make_unique()), + cfg_mng(scheduler_config{expert_cfg, *cfg_notifier, *metric_notifier}, *ue_metrics_configurator) +{ + default_cell_req = test_helpers::make_default_sched_cell_configuration_request(builder_params); + default_ue_req = test_helpers::create_default_sched_ue_creation_request(builder_params, {lcid_t::LCID_MIN_DRB}); +} + +test_sched_config_manager::~test_sched_config_manager() {} + +const cell_configuration* test_sched_config_manager::add_cell(const sched_cell_configuration_request_message& msg) +{ + return cfg_mng.add_cell(msg); +} + +const ue_configuration* test_sched_config_manager::add_ue(const sched_ue_creation_request_message& cfg_req) +{ + auto ue_ev = cfg_mng.add_ue(cfg_req); + return ue_ev.valid() ? &ue_ev.next_config() : nullptr; +} + +bool test_sched_config_manager::rem_ue(du_ue_index_t ue_index) +{ + auto ev = cfg_mng.remove_ue(ue_index); + return ev.valid(); +} diff --git a/tests/unittests/scheduler/test_utils/config_generators.h b/tests/unittests/scheduler/test_utils/config_generators.h index 8da557fd14..1f8419fe27 100644 --- a/tests/unittests/scheduler/test_utils/config_generators.h +++ b/tests/unittests/scheduler/test_utils/config_generators.h @@ -11,6 +11,7 @@ #pragma once #include "lib/du_manager/converters/scheduler_configuration_helpers.h" +#include "lib/scheduler/config/sched_config_manager.h" #include "srsran/du/du_cell_config_helpers.h" #include "srsran/ran/duplex_mode.h" #include "srsran/scheduler/config/csi_helper.h" @@ -275,5 +276,36 @@ inline slot_point generate_random_slot_point(subcarrier_spacing scs) return slot_point{scs, count}; } +/// Helper class to manage cell and UE configurations of a scheduler test. +class test_sched_config_manager +{ +public: + test_sched_config_manager(const cell_config_builder_params& builder_params, + const scheduler_expert_config& expert_cfg_ = {}); + ~test_sched_config_manager(); + + const sched_cell_configuration_request_message& get_default_cell_config_request() const { return default_cell_req; } + const sched_ue_creation_request_message& get_default_ue_config_request() const { return default_ue_req; } + + const cell_configuration* add_cell(const sched_cell_configuration_request_message& msg); + + const cell_configuration& get_cell(du_cell_index_t cell_idx) const { return *cfg_mng.common_cell_list()[cell_idx]; } + + const ue_configuration* add_ue(const sched_ue_creation_request_message& cfg_req); + bool rem_ue(du_ue_index_t ue_index); + +private: + const cell_config_builder_params builder_params; + scheduler_expert_config expert_cfg; + std::unique_ptr cfg_notifier; + std::unique_ptr metric_notifier; + std::unique_ptr ue_metrics_configurator; + + sched_cell_configuration_request_message default_cell_req; + sched_ue_creation_request_message default_ue_req; + + sched_config_manager cfg_mng; +}; + } // namespace test_helpers } // namespace srsran From c6b218e29b91cceca6a0713b100fb17d12594a1b Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Mon, 17 Jun 2024 10:44:48 +0200 Subject: [PATCH 39/49] sched: write unit test for multi slice scheduling --- include/srsran/ran/rrm.h | 3 +- lib/scheduler/slicing/slice_scheduler.cpp | 2 - .../slicing/slice_scheduler_test.cpp | 87 ++++++++++++++++--- 3 files changed, 77 insertions(+), 15 deletions(-) diff --git a/include/srsran/ran/rrm.h b/include/srsran/ran/rrm.h index 3df72356a5..bda6b22eee 100644 --- a/include/srsran/ran/rrm.h +++ b/include/srsran/ran/rrm.h @@ -18,7 +18,8 @@ namespace srsran { /// \remark See O-RAN.WG3.E2SM-RC-R003-v3.00 Section 8.4.3.6 struct rrm_policy_member { std::string plmn_id; - s_nssai_t s_nssai; + /// Single Network Slice Selection Assistance Information (S-NSSAI). + s_nssai_t s_nssai; bool operator==(const rrm_policy_member& other) const { diff --git a/lib/scheduler/slicing/slice_scheduler.cpp b/lib/scheduler/slicing/slice_scheduler.cpp index b363682413..2b2ce4a2e9 100644 --- a/lib/scheduler/slicing/slice_scheduler.cpp +++ b/lib/scheduler/slicing/slice_scheduler.cpp @@ -154,7 +154,6 @@ std::optional slice_scheduler::create_dl_candidate() { if (sorted_dl_prios[0].prio != ran_slice_instance::skip_slice_prio) { slices[sorted_dl_prios[0].id.value()].set_pdsch_scheduled(); - sorted_dl_prios[0].prio = ran_slice_instance::skip_slice_prio; return dl_ran_slice_candidate{slices[sorted_dl_prios[0].id.value()]}; } return std::nullopt; @@ -164,7 +163,6 @@ std::optional slice_scheduler::create_ul_candidate() { if (sorted_ul_prios[0].prio != ran_slice_instance::skip_slice_prio) { slices[sorted_ul_prios[0].id.value()].set_pusch_scheduled(); - sorted_ul_prios[0].prio = ran_slice_instance::skip_slice_prio; return ul_ran_slice_candidate{slices[sorted_ul_prios[0].id.value()]}; } return std::nullopt; diff --git a/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp b/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp index 9aa6102186..779fc9bea8 100644 --- a/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp +++ b/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp @@ -39,18 +39,12 @@ class slice_scheduler_test : public ::testing::Test srslog::init(); } - ~slice_scheduler_test() { srslog::flush(); } + ~slice_scheduler_test() override { srslog::flush(); } - const ue_configuration* add_ue(du_ue_index_t ue_idx) + const ue_configuration* add_ue(const sched_ue_creation_request_message& req) { - auto req = test_cfg.get_default_ue_config_request(); - req.ue_index = ue_idx; - req.crnti = to_rnti(0x4601 + ue_idx); - req.starts_in_fallback = false; const ue_configuration* ue_cfg = test_cfg.add_ue(req); - slice_sched.add_ue(*ue_cfg); - return ue_cfg; } @@ -62,7 +56,37 @@ class slice_scheduler_test : public ::testing::Test }; class default_slice_scheduler_test : public slice_scheduler_test -{}; +{ +protected: + const ue_configuration* add_ue(du_ue_index_t ue_idx) + { + auto req = test_cfg.get_default_ue_config_request(); + req.ue_index = ue_idx; + req.crnti = to_rnti(0x4601 + ue_idx); + req.starts_in_fallback = false; + return slice_scheduler_test::add_ue(req); + } +}; + +class rb_ratio_slice_scheduler_test : public slice_scheduler_test +{ +protected: + constexpr static unsigned MIN_PRB = 10; + constexpr static unsigned MAX_PRB = 20; + + rb_ratio_slice_scheduler_test() : slice_scheduler_test({{{"00101", s_nssai_t{1}}, MIN_PRB, MAX_PRB}}) {} + + const ue_configuration* add_ue(du_ue_index_t ue_idx) + { + auto req = test_cfg.get_default_ue_config_request(); + req.ue_index = ue_idx; + req.crnti = to_rnti(0x4601 + ue_idx); + req.starts_in_fallback = false; + (*req.cfg.lc_config_list)[2].rrm_policy.plmn_id = "00101"; + (*req.cfg.lc_config_list)[2].rrm_policy.s_nssai = s_nssai_t{1}; + return slice_scheduler_test::add_ue(req); + } +}; TEST_F(default_slice_scheduler_test, if_no_rrm_policy_cfg_exists_then_only_default_slice_is_created) { @@ -84,7 +108,7 @@ TEST_F(default_slice_scheduler_test, when_no_lcid_exists_then_default_slice_is_n TEST_F(default_slice_scheduler_test, when_lcid_is_part_of_default_slice_then_default_slice_is_valid_candidate) { - this->add_ue(to_du_ue_index(0)); + ASSERT_NE(this->add_ue(to_du_ue_index(0)), nullptr); slice_sched.slot_indication(); auto next_dl_slice = slice_sched.get_next_dl_candidate(); @@ -103,7 +127,7 @@ TEST_F(default_slice_scheduler_test, when_lcid_is_part_of_default_slice_then_def TEST_F(default_slice_scheduler_test, when_candidate_instance_goes_out_of_scope_then_it_stops_being_a_candidate_for_the_same_slot) { - this->add_ue(to_du_ue_index(0)); + ASSERT_NE(this->add_ue(to_du_ue_index(0)), nullptr); slice_sched.slot_indication(); auto next_dl_slice = slice_sched.get_next_dl_candidate(); @@ -116,7 +140,7 @@ TEST_F(default_slice_scheduler_test, TEST_F(default_slice_scheduler_test, when_candidate_instance_goes_out_of_scope_then_it_can_be_a_candidate_for_next_slot) { - this->add_ue(to_du_ue_index(0)); + ASSERT_NE(this->add_ue(to_du_ue_index(0)), nullptr); slice_sched.slot_indication(); auto next_dl_slice = slice_sched.get_next_dl_candidate(); @@ -127,3 +151,42 @@ TEST_F(default_slice_scheduler_test, when_candidate_instance_goes_out_of_scope_t ASSERT_TRUE(next_dl_slice.has_value()); ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{0}); } + +TEST_F(default_slice_scheduler_test, when_grant_gets_allocated_then_number_of_available_rbs_decreases) +{ + ASSERT_NE(this->add_ue(to_du_ue_index(0)), nullptr); + slice_sched.slot_indication(); + + auto next_dl_slice = slice_sched.get_next_dl_candidate(); + + unsigned alloc_rbs = 10; + unsigned rem_rbs = next_dl_slice->remaining_rbs(); + next_dl_slice->store_grant(alloc_rbs); + ASSERT_EQ(next_dl_slice->remaining_rbs(), rem_rbs - alloc_rbs); +} + +// rb_ratio_slice_scheduler_test + +TEST_F(rb_ratio_slice_scheduler_test, when_slice_with_min_rb_has_ues_then_it_is_the_first_candidate) +{ + ASSERT_NE(this->add_ue(to_du_ue_index(0)), nullptr); + slice_sched.slot_indication(); + + auto next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{1}); + ASSERT_TRUE(next_dl_slice->is_candidate(to_du_ue_index(0), lcid_t::LCID_MIN_DRB)); + + next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{0}); +} + +TEST_F(rb_ratio_slice_scheduler_test, when_slice_rb_ratios_are_bounded_then_remaining_rbs_is_bounded) +{ + ASSERT_NE(this->add_ue(to_du_ue_index(0)), nullptr); + slice_sched.slot_indication(); + + auto next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{1}); + + ASSERT_LE(next_dl_slice->remaining_rbs(), MAX_PRB); +} From 82487267f93f8c34f6e4abc125b84afaecec2f5b Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Mon, 17 Jun 2024 19:41:19 +0200 Subject: [PATCH 40/49] sched: extend unit tests for slice scheduler and rewrote the prioritization mechanism --- lib/scheduler/slicing/ran_slice_candidate.h | 12 +- lib/scheduler/slicing/ran_slice_instance.cpp | 2 - lib/scheduler/slicing/ran_slice_instance.h | 29 ---- lib/scheduler/slicing/slice_scheduler.cpp | 163 ++++++++++-------- lib/scheduler/slicing/slice_scheduler.h | 79 +++++++-- .../slicing/slice_scheduler_test.cpp | 156 ++++++++++++++--- 6 files changed, 293 insertions(+), 148 deletions(-) diff --git a/lib/scheduler/slicing/ran_slice_candidate.h b/lib/scheduler/slicing/ran_slice_candidate.h index 97aa71f71c..8d50af243d 100644 --- a/lib/scheduler/slicing/ran_slice_candidate.h +++ b/lib/scheduler/slicing/ran_slice_candidate.h @@ -20,7 +20,10 @@ template class common_ran_slice_candidate { public: - common_ran_slice_candidate(ran_slice_instance& instance_) : inst(&instance_) {} + common_ran_slice_candidate(ran_slice_instance& instance_, unsigned max_rbs_ = 0) : + inst(&instance_), max_rbs(max_rbs_ == 0 ? inst->cfg.max_prb : max_rbs_) + { + } ran_slice_id_t id() const { return inst->id; } [[nodiscard]] const slice_rrm_policy_config& cfg() const { return inst->cfg; } @@ -43,13 +46,14 @@ class common_ran_slice_candidate [[nodiscard]] unsigned remaining_rbs() const { if constexpr (IsDl) { - return inst->cfg.max_prb < inst->pdsch_rb_count ? 0 : inst->cfg.max_prb - inst->pdsch_rb_count; + return max_rbs < inst->pdsch_rb_count ? 0 : max_rbs - inst->pdsch_rb_count; } - return inst->cfg.max_prb < inst->pusch_rb_count ? 0 : inst->cfg.max_prb - inst->pusch_rb_count; + return max_rbs < inst->pusch_rb_count ? 0 : max_rbs - inst->pusch_rb_count; } protected: - ran_slice_instance* inst = nullptr; + ran_slice_instance* inst = nullptr; + unsigned max_rbs = 0; }; } // namespace detail diff --git a/lib/scheduler/slicing/ran_slice_instance.cpp b/lib/scheduler/slicing/ran_slice_instance.cpp index e5d9516d6b..9be101722b 100644 --- a/lib/scheduler/slicing/ran_slice_instance.cpp +++ b/lib/scheduler/slicing/ran_slice_instance.cpp @@ -23,8 +23,6 @@ void ran_slice_instance::slot_indication() { pdsch_rb_count = 0; pusch_rb_count = 0; - pdsch_complete = false; - pusch_complete = false; } void ran_slice_instance::add_logical_channel(du_ue_index_t ue_idx, lcid_t lcid) diff --git a/lib/scheduler/slicing/ran_slice_instance.h b/lib/scheduler/slicing/ran_slice_instance.h index 27fb44c688..0bec3bd0ff 100644 --- a/lib/scheduler/slicing/ran_slice_instance.h +++ b/lib/scheduler/slicing/ran_slice_instance.h @@ -21,43 +21,18 @@ namespace srsran { class ran_slice_instance { public: - constexpr static int skip_slice_prio = std::numeric_limits::min(); - constexpr static int default_slice_prio = 0; - ran_slice_instance(ran_slice_id_t id_, const cell_configuration& cell_cfg_, const slice_rrm_policy_config& cfg_); void slot_indication(); bool active() const { return not bearers.empty(); } - int get_dl_prio() - { - if (not active() or pdsch_complete or cfg.max_prb <= pdsch_rb_count) { - return skip_slice_prio; - } - return cfg.min_prb > pdsch_rb_count ? cfg.min_prb - pdsch_rb_count : default_slice_prio; - } - - int get_ul_prio() - { - if (not active() or pusch_complete or cfg.max_prb <= pusch_rb_count) { - return skip_slice_prio; - } - return cfg.min_prb > pusch_rb_count ? cfg.min_prb - pusch_rb_count : default_slice_prio; - } - /// Save PDSCH grant. void store_pdsch_grant(unsigned crbs) { pdsch_rb_count += crbs; } /// Save PUSCH grant. void store_pusch_grant(unsigned crbs) { pusch_rb_count += crbs; } - /// Mark the allocation of PDSCH for this slice and the current slot as complete. - void set_pdsch_scheduled() { pdsch_complete = true; } - - /// Mark the allocation of PUSCH for this slice and the current slot as complete. - void set_pusch_scheduled() { pusch_complete = true; } - /// Determine if at least one bearer of the given UE is currently managed by this slice. bool contains(du_ue_index_t ue_idx) const { return bearers.contains(ue_idx); } @@ -85,10 +60,6 @@ class ran_slice_instance unsigned pdsch_rb_count = 0; /// Counter of how many RBs have been scheduled for PUSCH in the current slot for this slice. unsigned pusch_rb_count = 0; - -private: - bool pdsch_complete = false; - bool pusch_complete = false; }; } // namespace srsran diff --git a/lib/scheduler/slicing/slice_scheduler.cpp b/lib/scheduler/slicing/slice_scheduler.cpp index 2b2ce4a2e9..411ce17cf9 100644 --- a/lib/scheduler/slicing/slice_scheduler.cpp +++ b/lib/scheduler/slicing/slice_scheduler.cpp @@ -19,46 +19,40 @@ slice_scheduler::slice_scheduler(const cell_configuration& cell_cfg_) : { // Create a number of slices equal to the number of configured RRM Policy members + 1 (default slice). slices.reserve(cell_cfg.rrm_policy_members.size() + 1); - sorted_dl_prios.reserve(cell_cfg.rrm_policy_members.size() + 1); - sorted_ul_prios.reserve(cell_cfg.rrm_policy_members.size() + 1); // Create RAN slice instances. ran_slice_id_t id_count{0}; // Default slice. slices.emplace_back(id_count, cell_cfg, slice_rrm_policy_config{}); - slices.back().policy = + slices.back().inst.policy = create_scheduler_strategy(scheduler_strategy_params{"time_rr", &logger}, cell_cfg.expert_cfg.ue); - sorted_dl_prios.emplace_back(id_count); - sorted_ul_prios.emplace_back(id_count); ++id_count; // Configured RRM policy members for (const slice_rrm_policy_config& rrm : cell_cfg.rrm_policy_members) { slices.emplace_back(id_count, cell_cfg, rrm); - slices.back().policy = + slices.back().inst.policy = create_scheduler_strategy(scheduler_strategy_params{"time_rr", &logger}, cell_cfg.expert_cfg.ue); - sorted_dl_prios.emplace_back(id_count); - sorted_ul_prios.emplace_back(id_count); ++id_count; } } void slice_scheduler::slot_indication() { + slot_count++; + + // Update the context of each slice. for (auto& slice : slices) { - slice.slot_indication(); + slice.inst.slot_indication(); } - // Compute slice priorities. - for (slice_prio_context& ctx : sorted_dl_prios) { - ctx.prio = slices[ctx.id.value()].get_dl_prio(); - } - for (slice_prio_context& ctx : sorted_ul_prios) { - ctx.prio = slices[ctx.id.value()].get_ul_prio(); + // Recompute the priority queues. + dl_prio_queue.clear(); + ul_prio_queue.clear(); + for (const auto& slice : slices) { + unsigned max_rbs = slice.inst.cfg.min_prb > 0 ? slice.inst.cfg.min_prb : slice.inst.cfg.max_prb; + dl_prio_queue.push(slice_candidate_context{slice.inst.id, slice.get_prio(true, slot_count, false), {0, max_rbs}}); + ul_prio_queue.push(slice_candidate_context{slice.inst.id, slice.get_prio(false, slot_count, false), {0, max_rbs}}); } - - // Sort slices by descending priority. - std::sort(sorted_dl_prios.begin(), sorted_dl_prios.end(), std::greater<>{}); - std::sort(sorted_ul_prios.begin(), sorted_ul_prios.end(), std::greater<>{}); } void slice_scheduler::add_ue(const ue_configuration& ue_cfg) @@ -87,83 +81,104 @@ void slice_scheduler::reconf_ue(const ue_configuration& next_ue_cfg, const ue_co void slice_scheduler::rem_ue(du_ue_index_t ue_idx) { for (auto& slice : slices) { - slice.rem_ue(ue_idx); + slice.inst.rem_ue(ue_idx); } } ran_slice_instance& slice_scheduler::get_slice(const rrm_policy_member& rrm) { - auto it = std::find_if( - slices.begin(), slices.end(), [&rrm](const ran_slice_instance& slice) { return slice.cfg.rrc_member == rrm; }); + auto it = std::find_if(slices.begin() + 1, slices.end(), [&rrm](const ran_slice_sched_context& slice) { + return slice.inst.cfg.rrc_member == rrm; + }); if (it == slices.end()) { // Slice with the provided RRM policy member was not found. Return default slice. - return slices.front(); + return slices.front().inst; } - return *it; + return it->inst; } -std::optional slice_scheduler::get_next_dl_candidate() +template +std::optional> +slice_scheduler::get_next_candidate() { - if (slices.size() == 1) { - return create_dl_candidate(); - } - - // Check if the slice priority hasn't changed. If it did, recompute priorities. - slice_prio_context* slice_prio_ctxt = &sorted_dl_prios.front(); - // Recompute priority - int prio = slices[slice_prio_ctxt->id.value()].get_dl_prio(); - if (prio != slice_prio_ctxt->prio) { - // Priority changed - slice_prio_ctxt->prio = prio; - // Check if sort needs to be called again - // Note: This assumes that only the previous front slice was used in scheduling. - if (prio < sorted_dl_prios[1].prio) { - // Slices need to be reordered. - std::sort(sorted_dl_prios.begin(), sorted_dl_prios.end(), std::greater<>{}); + slice_prio_queue& prio_queue = IsDownlink ? dl_prio_queue : ul_prio_queue; + while (not prio_queue.empty()) { + ran_slice_sched_context& chosen_slice = slices[prio_queue.top().id.value()]; + interval rb_lims = prio_queue.top().rb_lims; + prio_queue.pop(); + + unsigned rb_count = IsDownlink ? chosen_slice.inst.pdsch_rb_count : chosen_slice.inst.pusch_rb_count; + if (not rb_lims.contains(rb_count)) { + // The slice has been scheduled in this slot with a number of RBs that is not within the limits for this + // candidate. This could happen, for instance, if the scheduler could not schedule all RBs of a candidate + // bounded between {0, minRB}. In this case, the second candidate for the same slice with bounds {minRB, maxRB} + // is skipped. + continue; } - } - return create_dl_candidate(); -} + const slice_rrm_policy_config& cfg = chosen_slice.inst.cfg; + if (cfg.min_prb != cfg.max_prb and rb_lims.stop() == cfg.min_prb) { + // For the special case when minRB ratio>0, the first candidate for this slice was bounded between {0, minRB}. + // We re-add the slice as a candidate, this time, with RB bounds {minRB, maxRB}. + priority_type prio = chosen_slice.get_prio(true, slot_count, true); + prio_queue.push(slice_candidate_context{chosen_slice.inst.id, prio, {cfg.min_prb, cfg.max_prb}}); + } -std::optional slice_scheduler::get_next_ul_candidate() -{ - if (slices.size() == 1) { - return create_ul_candidate(); - } + // Save current slot count. + unsigned& count_to_set = IsDownlink ? chosen_slice.last_dl_slot : chosen_slice.last_ul_slot; + count_to_set = slot_count; - // Check if the slice priority hasn't changed. If it did, recompute priorities. - slice_prio_context* slice_prio_ctxt = &sorted_ul_prios.front(); - // Recompute priority - int prio = slices[slice_prio_ctxt->id.value()].get_ul_prio(); - if (prio != slice_prio_ctxt->prio) { - // Priority changed - slice_prio_ctxt->prio = prio; - // Check if sort needs to be called again - // Note: This assumes that only the previous front slice was used in scheduling. - if (prio < sorted_ul_prios[1].prio) { - // Slices need to be reordered. - std::sort(sorted_ul_prios.begin(), sorted_ul_prios.end(), std::greater<>{}); - } + // Return the candidate. + return std::conditional_t{chosen_slice.inst, + rb_lims.stop()}; } + return std::nullopt; +} - return create_ul_candidate(); +std::optional slice_scheduler::get_next_dl_candidate() +{ + return get_next_candidate(); } -std::optional slice_scheduler::create_dl_candidate() +std::optional slice_scheduler::get_next_ul_candidate() { - if (sorted_dl_prios[0].prio != ran_slice_instance::skip_slice_prio) { - slices[sorted_dl_prios[0].id.value()].set_pdsch_scheduled(); - return dl_ran_slice_candidate{slices[sorted_dl_prios[0].id.value()]}; - } - return std::nullopt; + return get_next_candidate(); } -std::optional slice_scheduler::create_ul_candidate() +slice_scheduler::priority_type slice_scheduler::ran_slice_sched_context::get_prio(bool is_dl, + slot_count_type current_slot_count, + bool slice_resched) const { - if (sorted_ul_prios[0].prio != ran_slice_instance::skip_slice_prio) { - slices[sorted_ul_prios[0].id.value()].set_pusch_scheduled(); - return ul_ran_slice_candidate{slices[sorted_ul_prios[0].id.value()]}; + // Note: The positive integer representing the priority of a slice consists of a concatenation of three priority + // values: + // 1. slice traffic priority (16 bits). Differentiation of slices based on whether they have minimum required + // traffic agreements (e.g. minRB ratio). + // 2. delay priority (8 bits), which attributes the highest priority to slices that have not been scheduled for a + // long time + // 3. round-robin based on slot indication count (8 bits). + + // Priority when slice has already reached its minimum RB ratio agreement. + constexpr static priority_type default_prio = 0x1U; + // Priority when slice still needs to reach its minimum RB ratio agreement. + constexpr static priority_type high_prio = 0x2U; + constexpr static priority_type delay_bitsize = 8U; + constexpr static priority_type rr_bitsize = 8U; + + unsigned rb_count = is_dl ? inst.pdsch_rb_count : inst.pusch_rb_count; + if (not inst.active() or rb_count >= inst.cfg.max_prb) { + // If the slice is not in a state to be scheduled in this slot, return skip priority level. + return skip_prio; } - return std::nullopt; + + // In case minRB > 0 and this is the first time the slice is proposed as a candidate, we give it a higher priority. + priority_type slice_prio = inst.cfg.min_prb > 0 and not slice_resched ? high_prio : default_prio; + + // Increase priorities of slices that have not been scheduled for a long time. + unsigned last_count = is_dl ? last_dl_slot : last_ul_slot; + priority_type delay_prio = (current_slot_count - last_count) & ((1U << delay_bitsize) - 1U); + + // Round-robin across slices with the same slice and delay priorities. + priority_type rr_prio = (inst.id.value() % current_slot_count) & ((1U << rr_bitsize) - 1U); + + return (slice_prio << (delay_bitsize + rr_bitsize)) + (delay_prio << rr_bitsize) + rr_prio; } diff --git a/lib/scheduler/slicing/slice_scheduler.h b/lib/scheduler/slicing/slice_scheduler.h index 3e0cd1ba3f..be63f9ba53 100644 --- a/lib/scheduler/slicing/slice_scheduler.h +++ b/lib/scheduler/slicing/slice_scheduler.h @@ -13,12 +13,17 @@ #include "../policy/scheduler_policy.h" #include "ran_slice_candidate.h" #include "ran_slice_instance.h" +#include namespace srsran { /// Inter-slice Scheduler. class slice_scheduler { + using priority_type = uint32_t; + using slot_count_type = uint32_t; + constexpr static priority_type skip_prio = 0; + public: slice_scheduler(const cell_configuration& cell_cfg_); @@ -37,34 +42,80 @@ class slice_scheduler std::optional get_next_ul_candidate(); size_t nof_slices() const { return slices.size(); } - const slice_rrm_policy_config& slice_config(ran_slice_id_t id) const { return slices[id.value()].cfg; } + const slice_rrm_policy_config& slice_config(ran_slice_id_t id) const { return slices[id.value()].inst.cfg; } private: - struct slice_prio_context { - ran_slice_id_t id; - // Cached values. - int prio = 0; + /// Class responsible for tracking the scheduling context of each RAN slice instance. + struct ran_slice_sched_context { + ran_slice_instance inst; + // Counter tracking the last time this slice was scheduled as a candidate. + slot_count_type last_dl_slot = 0; + slot_count_type last_ul_slot = 0; + + ran_slice_sched_context(ran_slice_id_t id, const cell_configuration& cell_cfg, const slice_rrm_policy_config& cfg) : + inst(id, cell_cfg, cfg) + { + } + + /// Get the slice priority as a candidate, considering also past decisions by the slice scheduler for this slot. + priority_type get_prio(bool is_dl, slot_count_type current_slot_count, bool slice_resched) const; + }; + + struct slice_candidate_context { + ran_slice_id_t id; + priority_type prio; + interval rb_lims; - slice_prio_context(ran_slice_id_t id_) : id(id_) {} + slice_candidate_context(ran_slice_id_t id_, priority_type prio_, interval rb_lims_) : + id(id_), prio(prio_), rb_lims(rb_lims_) + { + } /// Compares priorities between two slice contexts. - bool operator<(const slice_prio_context& rhs) const { return prio < rhs.prio; } - bool operator>(const slice_prio_context& rhs) const { return prio > rhs.prio; } + bool operator<(const slice_candidate_context& rhs) const { return prio < rhs.prio; } + bool operator>(const slice_candidate_context& rhs) const { return prio > rhs.prio; } + }; + + // Note: the std::priority_queue makes its underlying container protected, so it seems that they are ok with + // inheritance. + class slice_prio_queue : public std::priority_queue + { + public: + // Note: faster than while(!empty()) pop() because it avoids the O(NlogN). Faster than = {}, because it preserves + // memory. + void clear() + { + // Access to underlying vector. + this->c.clear(); + } + + // Adapter of the priority_queue push method to avoid adding candidates with skip priority level. + void push(const slice_candidate_context& elem) + { + if (elem.prio == skip_prio) { + return; + } + std::priority_queue::push(elem); + } }; ran_slice_instance& get_slice(const rrm_policy_member& rrm); - std::optional create_dl_candidate(); - std::optional create_ul_candidate(); + template + std::optional> get_next_candidate(); const cell_configuration& cell_cfg; srslog::basic_logger& logger; - std::vector slices; + std::vector slices; + + // Queue of slice candidates sorted by priority. + slice_prio_queue dl_prio_queue; + slice_prio_queue ul_prio_queue; - /// List of slice IDs sorted by priority. - std::vector sorted_dl_prios; - std::vector sorted_ul_prios; + // Count that gets incremented with every new slot. Useful for time round-robin of slices with the same priority. + // Note: This unsigned value will wrap-around. + slot_count_type slot_count = 0; }; } // namespace srsran diff --git a/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp b/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp index 779fc9bea8..3a7f35bba2 100644 --- a/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp +++ b/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp @@ -15,7 +15,7 @@ using namespace srsran; -class slice_scheduler_test : public ::testing::Test +class slice_scheduler_test { protected: slice_scheduler_test(const std::vector& rrm_policy_members = {}) : @@ -39,7 +39,7 @@ class slice_scheduler_test : public ::testing::Test srslog::init(); } - ~slice_scheduler_test() override { srslog::flush(); } + ~slice_scheduler_test() { srslog::flush(); } const ue_configuration* add_ue(const sched_ue_creation_request_message& req) { @@ -55,7 +55,7 @@ class slice_scheduler_test : public ::testing::Test slice_scheduler slice_sched{cell_cfg}; }; -class default_slice_scheduler_test : public slice_scheduler_test +class default_slice_scheduler_test : public slice_scheduler_test, public ::testing::Test { protected: const ue_configuration* add_ue(du_ue_index_t ue_idx) @@ -68,26 +68,6 @@ class default_slice_scheduler_test : public slice_scheduler_test } }; -class rb_ratio_slice_scheduler_test : public slice_scheduler_test -{ -protected: - constexpr static unsigned MIN_PRB = 10; - constexpr static unsigned MAX_PRB = 20; - - rb_ratio_slice_scheduler_test() : slice_scheduler_test({{{"00101", s_nssai_t{1}}, MIN_PRB, MAX_PRB}}) {} - - const ue_configuration* add_ue(du_ue_index_t ue_idx) - { - auto req = test_cfg.get_default_ue_config_request(); - req.ue_index = ue_idx; - req.crnti = to_rnti(0x4601 + ue_idx); - req.starts_in_fallback = false; - (*req.cfg.lc_config_list)[2].rrm_policy.plmn_id = "00101"; - (*req.cfg.lc_config_list)[2].rrm_policy.s_nssai = s_nssai_t{1}; - return slice_scheduler_test::add_ue(req); - } -}; - TEST_F(default_slice_scheduler_test, if_no_rrm_policy_cfg_exists_then_only_default_slice_is_created) { ASSERT_EQ(slice_sched.nof_slices(), 1); @@ -167,6 +147,26 @@ TEST_F(default_slice_scheduler_test, when_grant_gets_allocated_then_number_of_av // rb_ratio_slice_scheduler_test +class rb_ratio_slice_scheduler_test : public slice_scheduler_test, public ::testing::Test +{ +protected: + constexpr static unsigned MIN_SLICE_RB = 10; + constexpr static unsigned MAX_SLICE_RB = 20; + + rb_ratio_slice_scheduler_test() : slice_scheduler_test({{{"00101", s_nssai_t{1}}, MIN_SLICE_RB, MAX_SLICE_RB}}) {} + + const ue_configuration* add_ue(du_ue_index_t ue_idx) + { + auto req = test_cfg.get_default_ue_config_request(); + req.ue_index = ue_idx; + req.crnti = to_rnti(0x4601 + ue_idx); + req.starts_in_fallback = false; + (*req.cfg.lc_config_list)[2].rrm_policy.plmn_id = "00101"; + (*req.cfg.lc_config_list)[2].rrm_policy.s_nssai = s_nssai_t{1}; + return slice_scheduler_test::add_ue(req); + } +}; + TEST_F(rb_ratio_slice_scheduler_test, when_slice_with_min_rb_has_ues_then_it_is_the_first_candidate) { ASSERT_NE(this->add_ue(to_du_ue_index(0)), nullptr); @@ -175,18 +175,124 @@ TEST_F(rb_ratio_slice_scheduler_test, when_slice_with_min_rb_has_ues_then_it_is_ auto next_dl_slice = slice_sched.get_next_dl_candidate(); ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{1}); ASSERT_TRUE(next_dl_slice->is_candidate(to_du_ue_index(0), lcid_t::LCID_MIN_DRB)); +} + +TEST_F(rb_ratio_slice_scheduler_test, when_slice_rb_ratios_are_min_bounded_then_remaining_rbs_is_min_bounded) +{ + ASSERT_NE(this->add_ue(to_du_ue_index(0)), nullptr); + slice_sched.slot_indication(); + + auto next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{1}); + ASSERT_EQ(next_dl_slice->remaining_rbs(), MIN_SLICE_RB); +} + +TEST_F(rb_ratio_slice_scheduler_test, + when_slice_with_min_rb_is_partially_scheduled_then_it_is_never_a_candidate_again_for_the_same_slot) +{ + ASSERT_NE(this->add_ue(to_du_ue_index(0)), nullptr); + slice_sched.slot_indication(); + + auto next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{1}); + next_dl_slice->store_grant(MIN_SLICE_RB - 1); // we leave one RB empty (MIN_SLICE_RB - 1). + ASSERT_EQ(next_dl_slice->remaining_rbs(), 1); + + // Another slice should be selected as second candidate. next_dl_slice = slice_sched.get_next_dl_candidate(); ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{0}); + + // No more slices to schedule. + next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_FALSE(next_dl_slice.has_value()); } -TEST_F(rb_ratio_slice_scheduler_test, when_slice_rb_ratios_are_bounded_then_remaining_rbs_is_bounded) +TEST_F(rb_ratio_slice_scheduler_test, + when_slice_with_min_rb_is_allocated_until_min_rb_then_it_can_still_a_candidate_until_max_rb_is_reached) { ASSERT_NE(this->add_ue(to_du_ue_index(0)), nullptr); slice_sched.slot_indication(); auto next_dl_slice = slice_sched.get_next_dl_candidate(); ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{1}); + next_dl_slice->store_grant(MIN_SLICE_RB); + ASSERT_EQ(next_dl_slice->remaining_rbs(), 0); + + // Another slice should be selected as second candidate (assuming RR between the slices 0 and 1). + ran_slice_id_t last_slice_id{}; + for (unsigned i = 0; i != 2; ++i) { + next_dl_slice = slice_sched.get_next_dl_candidate(); + if (i == 1) { + ASSERT_NE(next_dl_slice->id(), last_slice_id) << "The same slice was selected twice."; + } + if (next_dl_slice->id() == ran_slice_id_t{0}) { + ASSERT_EQ(next_dl_slice->remaining_rbs(), MAX_NOF_PRBS); + } else { + // Original slice is selected again, now using maxRB ratio as the remaining RBs. + ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{1}); + ASSERT_EQ(next_dl_slice->remaining_rbs(), MAX_SLICE_RB - MIN_SLICE_RB); + } + last_slice_id = next_dl_slice->id(); + } + + // No more slices to schedule. + next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_FALSE(next_dl_slice.has_value()); +} + +TEST_F(rb_ratio_slice_scheduler_test, + when_candidates_are_scheduled_in_a_slot_then_priorities_are_recomputed_in_a_new_slot) +{ + ASSERT_NE(this->add_ue(to_du_ue_index(0)), nullptr); + slice_sched.slot_indication(); + + auto next_dl_slice = slice_sched.get_next_dl_candidate(); + next_dl_slice->store_grant(MIN_SLICE_RB); + next_dl_slice = slice_sched.get_next_dl_candidate(); + next_dl_slice = slice_sched.get_next_dl_candidate(); + next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_FALSE(next_dl_slice.has_value()); + + // New slot and priorities are reestablished. + slice_sched.slot_indication(); + next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{1}); + ASSERT_EQ(next_dl_slice->remaining_rbs(), MIN_SLICE_RB); + next_dl_slice->store_grant(MIN_SLICE_RB); + next_dl_slice = slice_sched.get_next_dl_candidate(); + ran_slice_id_t last_slice_id = next_dl_slice->id(); + next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_NE(next_dl_slice->id(), last_slice_id); + next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_FALSE(next_dl_slice.has_value()); +} + +TEST_F(rb_ratio_slice_scheduler_test, + when_slices_are_saturated_then_slices_should_have_equal_opportunity_to_reach_max_rbs) +{ + ASSERT_NE(this->add_ue(to_du_ue_index(0)), nullptr); + + unsigned max_nof_slots = 100; + unsigned slice_0_count = 0; + for (unsigned count = 0; count != max_nof_slots; ++count) { + slice_sched.slot_indication(); + + // Slice 1 with minRBs to fill is first selected. + auto next_dl_slice = slice_sched.get_next_dl_candidate(); + ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{1}); + next_dl_slice->store_grant(MIN_SLICE_RB); + + // Either Slice 0 or 1 are then selected. + next_dl_slice = slice_sched.get_next_dl_candidate(); + if (next_dl_slice->id() == ran_slice_id_t{0}) { + ASSERT_EQ(next_dl_slice->remaining_rbs(), MAX_NOF_PRBS); + slice_0_count++; + } else { + ASSERT_EQ(next_dl_slice->id(), ran_slice_id_t{1}); + ASSERT_EQ(next_dl_slice->remaining_rbs(), MAX_SLICE_RB - MIN_SLICE_RB); + } + } - ASSERT_LE(next_dl_slice->remaining_rbs(), MAX_PRB); + ASSERT_EQ(slice_0_count, max_nof_slots / 2) << "Round-robin of slices of same priority failed"; } From 62251a586389dcf1edc1e43b72915200fcd14d74 Mon Sep 17 00:00:00 2001 From: Francisco Paisana Date: Tue, 18 Jun 2024 15:03:46 +0200 Subject: [PATCH 41/49] sched: improve comments of slice scheduler --- lib/scheduler/slicing/slice_scheduler.cpp | 12 ++++++------ lib/scheduler/slicing/slice_scheduler.h | 2 +- .../scheduler/slicing/slice_scheduler_test.cpp | 13 +++++++------ 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/lib/scheduler/slicing/slice_scheduler.cpp b/lib/scheduler/slicing/slice_scheduler.cpp index 411ce17cf9..2eab4776af 100644 --- a/lib/scheduler/slicing/slice_scheduler.cpp +++ b/lib/scheduler/slicing/slice_scheduler.cpp @@ -27,7 +27,7 @@ slice_scheduler::slice_scheduler(const cell_configuration& cell_cfg_) : slices.back().inst.policy = create_scheduler_strategy(scheduler_strategy_params{"time_rr", &logger}, cell_cfg.expert_cfg.ue); ++id_count; - // Configured RRM policy members + // Configured RRM policy members. for (const slice_rrm_policy_config& rrm : cell_cfg.rrm_policy_members) { slices.emplace_back(id_count, cell_cfg, rrm); slices.back().inst.policy = @@ -87,7 +87,7 @@ void slice_scheduler::rem_ue(du_ue_index_t ue_idx) ran_slice_instance& slice_scheduler::get_slice(const rrm_policy_member& rrm) { - auto it = std::find_if(slices.begin() + 1, slices.end(), [&rrm](const ran_slice_sched_context& slice) { + auto it = std::find_if(slices.begin(), slices.end(), [&rrm](const ran_slice_sched_context& slice) { return slice.inst.cfg.rrc_member == rrm; }); if (it == slices.end()) { @@ -151,11 +151,11 @@ slice_scheduler::priority_type slice_scheduler::ran_slice_sched_context::get_pri { // Note: The positive integer representing the priority of a slice consists of a concatenation of three priority // values: - // 1. slice traffic priority (16 bits). Differentiation of slices based on whether they have minimum required - // traffic agreements (e.g. minRB ratio). + // 1. slice traffic priority (16 most significant bits). It differentiates slices based on whether they have + // minimum required traffic agreements (e.g. minRB ratio). // 2. delay priority (8 bits), which attributes the highest priority to slices that have not been scheduled for a - // long time - // 3. round-robin based on slot indication count (8 bits). + // long time. + // 3. round-robin based on slot indication count (8 least significant bits). // Priority when slice has already reached its minimum RB ratio agreement. constexpr static priority_type default_prio = 0x1U; diff --git a/lib/scheduler/slicing/slice_scheduler.h b/lib/scheduler/slicing/slice_scheduler.h index be63f9ba53..d18e101835 100644 --- a/lib/scheduler/slicing/slice_scheduler.h +++ b/lib/scheduler/slicing/slice_scheduler.h @@ -57,7 +57,7 @@ class slice_scheduler { } - /// Get the slice priority as a candidate, considering also past decisions by the slice scheduler for this slot. + /// Determines the slice candidate priority. priority_type get_prio(bool is_dl, slot_count_type current_slot_count, bool slice_resched) const; }; diff --git a/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp b/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp index 3a7f35bba2..a80e66a645 100644 --- a/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp +++ b/tests/unittests/scheduler/slicing/slice_scheduler_test.cpp @@ -157,12 +157,13 @@ class rb_ratio_slice_scheduler_test : public slice_scheduler_test, public ::test const ue_configuration* add_ue(du_ue_index_t ue_idx) { - auto req = test_cfg.get_default_ue_config_request(); - req.ue_index = ue_idx; - req.crnti = to_rnti(0x4601 + ue_idx); - req.starts_in_fallback = false; - (*req.cfg.lc_config_list)[2].rrm_policy.plmn_id = "00101"; - (*req.cfg.lc_config_list)[2].rrm_policy.s_nssai = s_nssai_t{1}; + const unsigned drb1_idx = 2; + auto req = test_cfg.get_default_ue_config_request(); + req.ue_index = ue_idx; + req.crnti = to_rnti(0x4601 + ue_idx); + req.starts_in_fallback = false; + (*req.cfg.lc_config_list)[drb1_idx].rrm_policy.plmn_id = "00101"; + (*req.cfg.lc_config_list)[drb1_idx].rrm_policy.s_nssai = s_nssai_t{1}; return slice_scheduler_test::add_ue(req); } }; From d9339331f3cee122601f4b14439d63c3b8b149b8 Mon Sep 17 00:00:00 2001 From: asaezper Date: Tue, 18 Jun 2024 12:52:27 +0200 Subject: [PATCH 42/49] ci,builder: make builder script more robust when creating symlinks --- docker/scripts/builder.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docker/scripts/builder.sh b/docker/scripts/builder.sh index c95210e3a3..84325b361f 100755 --- a/docker/scripts/builder.sh +++ b/docker/scripts/builder.sh @@ -169,11 +169,14 @@ if [[ -n "$DPDK_VERSION" ]]; then # Create alias for _avx2 / _avx512 versions if [[ $DPDK_VERSION == *_* ]]; then DPDK_VERSION_BASE=${DPDK_VERSION%_*} + # If the folder without _ exists, try to remove it if [ -e "/opt/dpdk/$DPDK_VERSION_BASE" ]; then - rm -Rf "/opt/dpdk/$DPDK_VERSION_BASE" + rm -Rf "/opt/dpdk/$DPDK_VERSION_BASE" || echo "/opt/dpdk/$DPDK_VERSION_BASE already exists!!" + fi + # If we can create the hard line, we change DPDK_VERSION to the new version without _ + if ln -s "/opt/dpdk/$DPDK_VERSION" "/opt/dpdk/$DPDK_VERSION_BASE"; then + DPDK_VERSION=$DPDK_VERSION_BASE fi - ln -s "/opt/dpdk/$DPDK_VERSION" "/opt/dpdk/$DPDK_VERSION_BASE" - DPDK_VERSION=$DPDK_VERSION_BASE fi export DPDK_DIR="/opt/dpdk/$DPDK_VERSION" echo "DPDK_DIR set to $DPDK_DIR" From b9b565eb305c407c1af374a9469f37e28430e2c5 Mon Sep 17 00:00:00 2001 From: asaezper Date: Tue, 18 Jun 2024 12:52:43 +0200 Subject: [PATCH 43/49] docker: fix dpdk arch build --- docker/scripts/build_dpdk.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/scripts/build_dpdk.sh b/docker/scripts/build_dpdk.sh index ed3bb8611e..5d000f55cd 100755 --- a/docker/scripts/build_dpdk.sh +++ b/docker/scripts/build_dpdk.sh @@ -24,7 +24,7 @@ main() { cd /tmp curl -L "https://fast.dpdk.org/rel/dpdk-${dpdk_version}.tar.xz" | tar xJf - cd dpdk*"${dpdk_version}" - meson setup build --prefix "/opt/dpdk/${dpdk_version}" -Dc_args="-march=${arch}" + meson setup build --prefix "/opt/dpdk/${dpdk_version}" -Dcpu_instruction_set="${arch}" ninja -j"${ncores}" -C build install } From 70d5c029a8dbbe67074b894334624d0b36bff79f Mon Sep 17 00:00:00 2001 From: asaezper Date: Tue, 18 Jun 2024 12:53:47 +0200 Subject: [PATCH 44/49] ci: rename max test execution time variable in build jobs --- .gitlab/ci/build.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab/ci/build.yml b/.gitlab/ci/build.yml index bf10a527d3..b5ae2b340a 100644 --- a/.gitlab/ci/build.yml +++ b/.gitlab/ci/build.yml @@ -114,7 +114,7 @@ variables: FORCE_DEBUG_INFO: "" # Empty for cmake default AUTO_DETECT_ISA: "" # TEST - CTEST_TIMEOUT: 0 + TEST_EXECUTION_TIMEOUT: 0 # CI SAVE_ARTIFACTS: "" # Empty by default KUBERNETES_CPU_REQUEST: 6 @@ -273,7 +273,7 @@ variables: echo "=============================================================================================" status_file=$(mktemp) - timeout ${CTEST_TIMEOUT} \ + timeout ${TEST_EXECUTION_TIMEOUT} \ bash -c "${CTEST_CMD} && echo 0 > ${status_file} || echo 1 > ${status_file}" \ && ret=$(cat ${status_file}) || ret=124 @@ -709,7 +709,7 @@ valgrind changed tests: variables: CLEAN_BUILD: "False" FINGERPRINT: "fingerprints.csv" - CTEST_TIMEOUT: 20m + TEST_EXECUTION_TIMEOUT: 20m SAVE_ARTIFACTS: "True" script: - git config advice.detachedHead false @@ -729,7 +729,7 @@ valgrind changed tests: echo "##################################################" git checkout origin/$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME build_srsgnb - - echo "This test execution has a timeout of ${CTEST_TIMEOUT}. If the execution excess that timer, the job will be marked as allowed_to_fail. This will avoid the job to have a huge duration in a MR pipeline." + - echo "This test execution has a timeout of ${TEST_EXECUTION_TIMEOUT}. If the execution excess that timer, the job will be marked as allowed_to_fail. This will avoid the job to have a huge duration in a MR pipeline." - launch_tests cache: - !reference [.fetch_src_cache, cache] From 571c355aff92a7d78568e538a543f25d36005831 Mon Sep 17 00:00:00 2001 From: asaezper Date: Tue, 18 Jun 2024 12:54:07 +0200 Subject: [PATCH 45/49] ci: fix test timeout in valgrind jobs --- .gitlab/ci/build.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitlab/ci/build.yml b/.gitlab/ci/build.yml index b5ae2b340a..a21737cf1a 100644 --- a/.gitlab/ci/build.yml +++ b/.gitlab/ci/build.yml @@ -252,7 +252,10 @@ variables: ;; valgrind) G_DEBUG=gc-friendly G_SLICE=always-malloc - ctest_extra="-T memcheck -LE NO_MEMCHECK --test-timeout 10800" + # Default timeout per test is 1500 (25 min) + export CTEST_TIMEOUT=2700 + export CTEST_TEST_TIMEOUT=${CTEST_TIMEOUT} + ctest_extra="-T memcheck -LE NO_MEMCHECK --timeout ${CTEST_TIMEOUT}" ;; esac if [ -n "${FINGERPRINT}" ]; then From 9314c69b1a656b168b968672f24558113585622b Mon Sep 17 00:00:00 2001 From: asaezper Date: Tue, 18 Jun 2024 12:57:01 +0200 Subject: [PATCH 46/49] ci: fix march flags in ubuntu 20 package --- .gitlab/ci/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab/ci/build.yml b/.gitlab/ci/build.yml index a21737cf1a..1ef93d6d06 100644 --- a/.gitlab/ci/build.yml +++ b/.gitlab/ci/build.yml @@ -926,6 +926,7 @@ package: parallel: matrix: - OS_VERSION: "20.04" + extraopts: -DAUTO_DETECT_ISA=False -DCMAKE_CXX_FLAGS="-march=x86-64" - OS_VERSION: "22.04" - OS_VERSION: "23.10" - OS_VERSION: "24.04" From 1985ad465dd08e903c5a6bf92335341e53aef713 Mon Sep 17 00:00:00 2001 From: asaezper Date: Tue, 18 Jun 2024 12:57:13 +0200 Subject: [PATCH 47/49] ci: fix basic dpdk jobs --- .gitlab/ci/build.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.gitlab/ci/build.yml b/.gitlab/ci/build.yml index 1ef93d6d06..63745b6fa7 100644 --- a/.gitlab/ci/build.yml +++ b/.gitlab/ci/build.yml @@ -1983,16 +1983,12 @@ basic avx512 dpdk: ENABLE_UHD: "False" ENABLE_ZEROMQ: "False" ENABLE_DPDK: "True" - DPDK_VERSION: "23.11" + DPDK_VERSION: "23.11_avx512" AUTO_DETECT_ISA: "False" BUILD_ARGS: -DCMAKE_CXX_FLAGS="-march=x86-64-v4" FORCE_DEBUG_INFO: "True" ASSERT_LEVEL: MINIMAL SAVE_ARTIFACTS: "True" - KUBERNETES_CPU_REQUEST: 14 - KUBERNETES_CPU_LIMIT: 14 - KUBERNETES_MEMORY_REQUEST: 20Gi - KUBERNETES_MEMORY_LIMIT: 20Gi tags: ["${AMD64_AVX512_TAG}"] artifacts: <<: *build_artifacts From 9d7719bd0b18a64acd3d08807c929259c3111429 Mon Sep 17 00:00:00 2001 From: asaezper Date: Tue, 18 Jun 2024 13:01:08 +0200 Subject: [PATCH 48/49] ci,docker: change release with debug images name --- .gitlab/ci/docker.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.gitlab/ci/docker.yml b/.gitlab/ci/docker.yml index 725bd54407..b8cd7c9881 100644 --- a/.gitlab/ci/docker.yml +++ b/.gitlab/ci/docker.yml @@ -309,7 +309,7 @@ srsran image split72: ARCH: x86-64-v3 TAG: amd64-avx2 PLATFORM: amd64 - - SUFFIX: release_debug_avx2 + - SUFFIX: release_with_debug_avx2 EXTRA_CMAKE_ARGS: -DAUTO_DETECT_ISA=Off -DFORCE_DEBUG_INFO=On ARCH: x86-64-v3 TAG: amd64-avx2 @@ -320,7 +320,7 @@ srsran image split72: ARCH: x86-64-v4 TAG: amd64-avx2-avx512 PLATFORM: amd64 - - SUFFIX: release_debug_avx512 + - SUFFIX: release_with_debug_avx512 EXTRA_CMAKE_ARGS: -DAUTO_DETECT_ISA=Off -DFORCE_DEBUG_INFO=On ARCH: x86-64-v4 TAG: amd64-avx2-avx512 @@ -331,7 +331,7 @@ srsran image split72: ARCH: armv8.2-a+crypto+fp16+dotprod TAG: arm64 PLATFORM: arm64 - - SUFFIX: release_debug_arm + - SUFFIX: release_with_debug_arm EXTRA_CMAKE_ARGS: -DAUTO_DETECT_ISA=Off -DFORCE_DEBUG_INFO=On ARCH: armv8.2-a+crypto+fp16+dotprod TAG: arm64 @@ -350,7 +350,7 @@ srsran image split8: ARCH: x86-64-v3 TAG: amd64-avx2 PLATFORM: amd64 - - SUFFIX: release_debug_avx2 + - SUFFIX: release_with_debug_avx2 EXTRA_CMAKE_ARGS: -DAUTO_DETECT_ISA=Off -DFORCE_DEBUG_INFO=On ARCH: x86-64-v3 TAG: amd64-avx2 @@ -361,7 +361,7 @@ srsran image split8: ARCH: armv8.2-a+crypto+fp16+dotprod TAG: arm64 PLATFORM: arm64 - - SUFFIX: release_debug_arm + - SUFFIX: release_with_debug_arm EXTRA_CMAKE_ARGS: -DAUTO_DETECT_ISA=Off -DFORCE_DEBUG_INFO=On ARCH: armv8.2-a+crypto+fp16+dotprod TAG: arm64 From 6d1b220da8efff82e1d4391f68b1c0f62d1f4a94 Mon Sep 17 00:00:00 2001 From: asaezper Date: Tue, 18 Jun 2024 18:17:38 +0200 Subject: [PATCH 49/49] ci: update retina --- .gitlab/ci/e2e/.env | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab/ci/e2e/.env b/.gitlab/ci/e2e/.env index 41ff6d6aee..81109095af 100644 --- a/.gitlab/ci/e2e/.env +++ b/.gitlab/ci/e2e/.env @@ -1,12 +1,12 @@ SRSGNB_REGISTRY_URI=registry.gitlab.com/softwareradiosystems/srsgnb RETINA_REGISTRY_PREFIX=registry.gitlab.com/softwareradiosystems/ci/retina -RETINA_VERSION=0.49.12 +RETINA_VERSION=0.49.16 UBUNTU_VERSION=24.04 AMARISOFT_VERSION=2023-09-08 SRSUE_VERSION=23.11 OPEN5GS_VERSION=2.7.0 PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin METRICS_SERVER_VERSION=1.7.2 -DPDK_VERSION=23.11_avx512 +DPDK_VERSION=23.11 ZMQ_HOSTLABEL_0=kubernetes.io/hostname=k8s-worker-vm2 ZMQ_HOSTLABEL_1=kubernetes.io/hostname=k8s-worker-vm2