diff --git a/README b/README index 6513bb320..59bb93111 100644 --- a/README +++ b/README @@ -1036,7 +1036,14 @@ XLIO_PRINT_REPORT Print a human readable report of resources usage at exit. The report is printed during termination phase. Therefore, It can be missed if the process is killed with the SIGKILL signal. -Default: 0 (Disabled) +Default value: auto + +auto + Print report if anomaly is detected +on + Print report +off + Disabled XLIO Monitoring & Performance Counters ===================================== diff --git a/src/core/dev/allocator.cpp b/src/core/dev/allocator.cpp index fd8b7d1c3..b168493e2 100644 --- a/src/core/dev/allocator.cpp +++ b/src/core/dev/allocator.cpp @@ -208,7 +208,7 @@ void xlio_allocator::print_hugepages_warning(size_t requested_size) vlog_printf(VLOG_WARNING, "or switch to a different memory allocation type:\n"); vlog_printf(VLOG_WARNING, " %s=ANON\n", SYS_VAR_MEM_ALLOC_TYPE); - g_hugepage_mgr.print_report(true); + g_hugepage_mgr.print_report(VLOG_INFO, false, true); vlog_printf(VLOG_WARNING, "************************************************************\n"); } else { diff --git a/src/core/dev/buffer_pool.cpp b/src/core/dev/buffer_pool.cpp index e070022d2..966524af7 100644 --- a/src/core/dev/buffer_pool.cpp +++ b/src/core/dev/buffer_pool.cpp @@ -200,7 +200,7 @@ void buffer_pool::print_report(vlog_levels_t log_level /*=VLOG_DEBUG*/) } /* static */ -void buffer_pool::print_full_report(vlog_levels_t log_level) +void buffer_pool::print_full_report(vlog_levels_t log_level, bool print_only_critical /*=false*/) { std::vector pools = {g_buffer_pool_rx_rwqe, g_buffer_pool_rx_stride, g_buffer_pool_tx, g_buffer_pool_zc}; @@ -209,7 +209,9 @@ void buffer_pool::print_full_report(vlog_levels_t log_level) for (auto &pool : pools) { if (pool != nullptr) { is_error = is_error || pool->m_p_bpool_stat->n_buffer_pool_no_bufs; - pool->print_report(log_level); + if (!print_only_critical) { + pool->print_report(log_level); + } } } diff --git a/src/core/dev/buffer_pool.h b/src/core/dev/buffer_pool.h index b697d3ee2..df56c3fe0 100644 --- a/src/core/dev/buffer_pool.h +++ b/src/core/dev/buffer_pool.h @@ -53,7 +53,7 @@ class buffer_pool { void register_memory(ib_ctx_handler *p_ib_ctx_h); void print_val_tbl(); void print_report(vlog_levels_t log_level = VLOG_DEBUG); - static void print_full_report(vlog_levels_t log_level); + static void print_full_report(vlog_levels_t log_level, bool print_only_critical = false); uint32_t find_lkey_by_ib_ctx_thread_safe(ib_ctx_handler *p_ib_ctx_h); diff --git a/src/core/dev/cq_mgr_rx.cpp b/src/core/dev/cq_mgr_rx.cpp index bd77427da..4ff3f51a0 100644 --- a/src/core/dev/cq_mgr_rx.cpp +++ b/src/core/dev/cq_mgr_rx.cpp @@ -23,6 +23,8 @@ #include "hw_queue_rx.h" #include "ring_simple.h" +#include "core/dev/net_device_table_mgr.h" + #define MODULE_NAME "cq_mgr_rx" #define cq_logpanic __log_info_panic @@ -148,6 +150,8 @@ cq_mgr_rx::~cq_mgr_rx() ENDIF_VERBS_FAILURE; VALGRIND_MAKE_MEM_UNDEFINED(m_p_ibv_cq, sizeof(ibv_cq)); + g_p_net_device_table_mgr->increase_closed_rings_rx_cq_drop_counter( + m_p_cq_stat->n_rx_hw_pkt_drops); statistics_print(); xlio_stats_instance_remove_cq_block(m_p_cq_stat); @@ -156,10 +160,13 @@ cq_mgr_rx::~cq_mgr_rx() void cq_mgr_rx::statistics_print() { - if (m_p_cq_stat->n_rx_pkt_drop || m_p_cq_stat->n_rx_sw_queue_len || - m_p_cq_stat->n_rx_drained_at_once_max || m_p_cq_stat->n_buffer_pool_len) { + if (m_p_cq_stat->n_rx_sw_pkt_drops || m_p_cq_stat->n_rx_hw_pkt_drops || + m_p_cq_stat->n_rx_sw_queue_len || m_p_cq_stat->n_rx_drained_at_once_max || + m_p_cq_stat->n_buffer_pool_len) { cq_logdbg_no_funcname("Packets dropped: %12llu", - (unsigned long long int)m_p_cq_stat->n_rx_pkt_drop); + (unsigned long long int)m_p_cq_stat->n_rx_sw_pkt_drops); + cq_logdbg_no_funcname("HW RX Packets dropped: %12llu", + (unsigned long long int)m_p_cq_stat->n_rx_hw_pkt_drops); cq_logdbg_no_funcname("Drained max: %17u", m_p_cq_stat->n_rx_drained_at_once_max); cq_logdbg_no_funcname("CQE errors: %18llu", (unsigned long long int)m_p_cq_stat->n_rx_cqe_error); @@ -360,7 +367,7 @@ bool cq_mgr_rx::compensate_qp_poll_success(mem_buf_desc_t *buff_cur) m_debt -= buffers; m_p_cq_stat->n_buffer_pool_len = m_rx_pool.size(); } else if (m_b_sysvar_cq_keep_qp_full || m_debt >= (int)m_hqrx_ptr->m_rx_num_wr) { - m_p_cq_stat->n_rx_pkt_drop++; + m_p_cq_stat->n_rx_sw_pkt_drops++; m_hqrx_ptr->post_recv_buffer(buff_cur); --m_debt; return true; diff --git a/src/core/dev/cq_mgr_rx.h b/src/core/dev/cq_mgr_rx.h index 8bc45da2f..9b6eba304 100644 --- a/src/core/dev/cq_mgr_rx.h +++ b/src/core/dev/cq_mgr_rx.h @@ -115,6 +115,7 @@ class cq_mgr_rx { int reclaim_recv_single_buffer(mem_buf_desc_t *rx_reuse); void get_cq_event(int count = 1) { xlio_ib_mlx5_get_cq_event(&m_mlx5_cq, count); }; + uint64_t get_n_rx_hw_pkt_drops() { return m_p_cq_stat->n_rx_hw_pkt_drops; } protected: /** diff --git a/src/core/dev/cq_mgr_rx_regrq.cpp b/src/core/dev/cq_mgr_rx_regrq.cpp index 56e60393e..a5254d7f3 100644 --- a/src/core/dev/cq_mgr_rx_regrq.cpp +++ b/src/core/dev/cq_mgr_rx_regrq.cpp @@ -117,7 +117,9 @@ void cq_mgr_rx_regrq::cqe_to_mem_buff_desc(struct xlio_mlx5_cqe *cqe, p_rx_wc_buf_desc->rx.tls_decrypted = (cqe->pkt_info >> 3) & 0x3; #endif /* DEFINED_UTLS */ p_rx_wc_buf_desc->rx.timestamps.hw_raw = ntohll(cqe->timestamp); - p_rx_wc_buf_desc->rx.flow_tag_id = ntohl((uint32_t)(cqe->sop_drop_qpn)); + uint32_t sop_rxdrop_qpn_flowtag_h_byte = ntohl(cqe->sop_rxdrop_qpn_flowtag); + p_rx_wc_buf_desc->rx.flow_tag_id = sop_rxdrop_qpn_flowtag_h_byte & 0x00FFFFFF; + m_p_cq_stat->n_rx_hw_pkt_drops += sop_rxdrop_qpn_flowtag_h_byte >> 24; p_rx_wc_buf_desc->rx.is_sw_csum_need = !(m_b_is_rx_hw_csum_on && (cqe->hds_ip_ext & MLX5_CQE_L4_OK) && (cqe->hds_ip_ext & MLX5_CQE_L3_OK)); @@ -204,7 +206,7 @@ int cq_mgr_rx_regrq::drain_and_proccess(uintptr_t *p_recycle_buffers_last_wr_id if (cqe_process_rx(buff, status)) { if (p_recycle_buffers_last_wr_id) { - m_p_cq_stat->n_rx_pkt_drop++; + m_p_cq_stat->n_rx_sw_pkt_drops++; reclaim_recv_buffer_helper(buff); } else { bool procces_now = is_eth_tcp_frame(buff); @@ -294,7 +296,7 @@ bool cq_mgr_rx_regrq::poll_and_process_element_rx(uint64_t *p_cq_poll_sn, void * process_recv_buffer(buff, pv_fd_ready_array); } } else { - m_p_cq_stat->n_rx_pkt_drop++; + m_p_cq_stat->n_rx_sw_pkt_drops++; if (++m_debt >= (int)m_n_sysvar_rx_num_wr_to_post_recv) { compensate_qp_poll_failed(); } diff --git a/src/core/dev/cq_mgr_rx_strq.cpp b/src/core/dev/cq_mgr_rx_strq.cpp index 5a889cdd1..9c3e9f5a0 100644 --- a/src/core/dev/cq_mgr_rx_strq.cpp +++ b/src/core/dev/cq_mgr_rx_strq.cpp @@ -224,7 +224,9 @@ inline bool cq_mgr_rx_strq::strq_cqe_to_mem_buff_desc(struct xlio_mlx5_cqe *cqe, _current_wqe_consumed_bytes += _hot_buffer_stride->sz_buffer; _hot_buffer_stride->rx.timestamps.hw_raw = ntohll(cqe->timestamp); - _hot_buffer_stride->rx.flow_tag_id = ntohl((uint32_t)(cqe->sop_drop_qpn)); + uint32_t sop_rxdrop_qpn_flowtag_h_byte = ntohl(cqe->sop_rxdrop_qpn_flowtag); + _hot_buffer_stride->rx.flow_tag_id = sop_rxdrop_qpn_flowtag_h_byte & 0x00FFFFFF; + m_p_cq_stat->n_rx_hw_pkt_drops += sop_rxdrop_qpn_flowtag_h_byte >> 24; _hot_buffer_stride->rx.is_sw_csum_need = !(m_b_is_rx_hw_csum_on && (cqe->hds_ip_ext & MLX5_CQE_L4_OK) && (cqe->hds_ip_ext & MLX5_CQE_L3_OK)); @@ -320,7 +322,7 @@ int cq_mgr_rx_strq::drain_and_proccess_helper(mem_buf_desc_t *buff, mem_buf_desc ++ret_total; if (process_strq_cq_element_rx(buff, status)) { if (p_recycle_buffers_last_wr_id) { - m_p_cq_stat->n_rx_pkt_drop++; + m_p_cq_stat->n_rx_sw_pkt_drops++; reclaim_recv_buffer_helper(buff); } else { bool procces_now = is_eth_tcp_frame(buff); diff --git a/src/core/dev/net_device_table_mgr.cpp b/src/core/dev/net_device_table_mgr.cpp index d9da7f676..e7f4bf037 100644 --- a/src/core/dev/net_device_table_mgr.cpp +++ b/src/core/dev/net_device_table_mgr.cpp @@ -47,7 +47,7 @@ net_device_table_mgr::net_device_table_mgr() m_num_devices = 0; m_global_ring_epfd = 0; m_max_mtu = 0; - + m_closed_rings_rx_cq_drop_counter = 0; ndtm_logdbg(""); m_global_ring_epfd = SYSCALL(epoll_create, 48); @@ -457,6 +457,30 @@ int net_device_table_mgr::global_ring_epfd_get() return m_global_ring_epfd; } +uint64_t net_device_table_mgr::global_get_rx_drop_counter() +{ + // coverity[missing_lock:FALSE] /*Turn off coverity missing_lock check*/ + uint64_t accumulator = this->m_closed_rings_rx_cq_drop_counter; + std::for_each(g_p_net_device_table_mgr->m_net_device_map_index.begin(), + g_p_net_device_table_mgr->m_net_device_map_index.end(), + [&accumulator](const auto &net_dev_map_iter) { + accumulator += net_dev_map_iter.second->get_accumulative_rx_cq_drop_counter(); + }); + return accumulator; +} + +void net_device_table_mgr::print_report(vlog_levels_t log_level, + bool print_only_critical /*=false*/) +{ + uint64_t accumulator = global_get_rx_drop_counter(); + if (print_only_critical && !accumulator) { + return; + } + vlog_printf(log_level, "*********************************\n"); + vlog_printf(log_level, "Total HW RX drop counter: %lu\n", accumulator); + vlog_printf(log_level, "*********************************\n"); +} + void net_device_table_mgr::global_ring_wait_for_notification_and_process_element( uint64_t *p_poll_sn, void *pv_fd_ready_array /*=NULL*/) { diff --git a/src/core/dev/net_device_table_mgr.h b/src/core/dev/net_device_table_mgr.h index 6aec15eed..fb4214018 100644 --- a/src/core/dev/net_device_table_mgr.h +++ b/src/core/dev/net_device_table_mgr.h @@ -72,6 +72,13 @@ class net_device_table_mgr : public cache_table_mgr, publ int global_ring_epfd_get(); + /* + * This will get accumlated RX out of buffer drops + * for all net devices. + */ + uint64_t global_get_rx_drop_counter(void); + void print_report(vlog_levels_t log_level, bool print_only_critical = false); + void handle_timer_expired(void *user_data); uint32_t get_max_mtu() const { return m_max_mtu; } @@ -80,6 +87,13 @@ class net_device_table_mgr : public cache_table_mgr, publ void get_net_devices(local_dev_vector &vec); + void increase_closed_rings_rx_cq_drop_counter(uint64_t count) + { + m_closed_rings_rx_cq_drop_counter_lock.lock(); + m_closed_rings_rx_cq_drop_counter += count; + m_closed_rings_rx_cq_drop_counter_lock.unlock(); + } + private: void del_link_event(const netlink_link_info *info); void new_link_event(const netlink_link_info *info); @@ -98,6 +112,8 @@ class net_device_table_mgr : public cache_table_mgr, publ int m_global_ring_pipe_fds[2]; uint32_t m_max_mtu; + lock_mutex m_closed_rings_rx_cq_drop_counter_lock; + uint64_t m_closed_rings_rx_cq_drop_counter; }; extern net_device_table_mgr *g_p_net_device_table_mgr; diff --git a/src/core/dev/net_device_val.cpp b/src/core/dev/net_device_val.cpp index 0577cab28..399308c08 100644 --- a/src/core/dev/net_device_val.cpp +++ b/src/core/dev/net_device_val.cpp @@ -905,7 +905,15 @@ bool net_device_val::update_active_slaves() } return 0; } - +uint64_t net_device_val::get_accumulative_rx_cq_drop_counter() +{ + uint64_t accumaltor = 0; + std::for_each(m_h_ring_map.begin(), m_h_ring_map.end(), + [&accumaltor](const auto &m_h_ring_map_iter) { + accumaltor += m_h_ring_map_iter.second.first->get_rx_cq_out_of_buffer_drop(); + }); + return accumaltor; +}; void net_device_val::update_netvsc_slaves(int if_index, int if_flags) { slave_data_t *s = nullptr; diff --git a/src/core/dev/net_device_val.h b/src/core/dev/net_device_val.h index 44e8e96ba..e081cc5fb 100644 --- a/src/core/dev/net_device_val.h +++ b/src/core/dev/net_device_val.h @@ -228,6 +228,7 @@ class net_device_val { void register_to_ibverbs_events(event_handler_ibverbs *handler); void unregister_to_ibverbs_events(event_handler_ibverbs *handler); uint32_t get_priority_by_tc_class(uint32_t tc_class); + uint64_t get_accumulative_rx_cq_drop_counter(); protected: void set_slave_array(); diff --git a/src/core/dev/ring.h b/src/core/dev/ring.h index 755a6acc5..6f08e426c 100644 --- a/src/core/dev/ring.h +++ b/src/core/dev/ring.h @@ -55,6 +55,7 @@ class ring { virtual ~ring(); virtual void print_val(); + virtual uint64_t get_rx_cq_out_of_buffer_drop() = 0; virtual bool attach_flow(flow_tuple &flow_spec_5t, sockinfo *sink, bool force_5t = false) = 0; virtual bool detach_flow(flow_tuple &flow_spec_5t, sockinfo *sink) = 0; diff --git a/src/core/dev/ring_bond.cpp b/src/core/dev/ring_bond.cpp index e16cb2926..811035252 100644 --- a/src/core/dev/ring_bond.cpp +++ b/src/core/dev/ring_bond.cpp @@ -900,6 +900,14 @@ int ring_bond::socketxtreme_poll(struct xlio_socketxtreme_completion_t *, unsign { return 0; } +uint64_t ring_bond::get_rx_cq_out_of_buffer_drop() +{ + uint64_t accumulator = 0; + for (uint32_t i = 0; i < m_recv_rings.size(); i++) { + accumulator += m_recv_rings[i]->get_rx_cq_out_of_buffer_drop(); + } + return accumulator; +} void ring_bond::slave_destroy(int if_index) { diff --git a/src/core/dev/ring_bond.h b/src/core/dev/ring_bond.h index 0162773a2..1a29a0275 100644 --- a/src/core/dev/ring_bond.h +++ b/src/core/dev/ring_bond.h @@ -89,6 +89,7 @@ class ring_bond : public ring { { m_xmit_rings[id]->reset_inflight_zc_buffers_ctx(id, ctx); } + virtual uint64_t get_rx_cq_out_of_buffer_drop(); protected: void update_cap(ring_slave *slave = nullptr); diff --git a/src/core/dev/ring_simple.cpp b/src/core/dev/ring_simple.cpp index 7624de52c..fbb93d97c 100644 --- a/src/core/dev/ring_simple.cpp +++ b/src/core/dev/ring_simple.cpp @@ -1009,6 +1009,11 @@ void ring_simple::modify_cq_moderation(uint32_t period, uint32_t count) priv_ibv_modify_cq_moderation(m_p_cq_mgr_rx->get_ibv_cq_hndl(), period, count); } +uint64_t ring_simple::get_rx_cq_out_of_buffer_drop() +{ + return m_p_cq_mgr_rx->get_n_rx_hw_pkt_drops(); +} + void ring_simple::adapt_cq_moderation() { if (m_lock_ring_rx.trylock()) { diff --git a/src/core/dev/ring_simple.h b/src/core/dev/ring_simple.h index e2b2c9a24..daa2c0162 100644 --- a/src/core/dev/ring_simple.h +++ b/src/core/dev/ring_simple.h @@ -111,6 +111,8 @@ class ring_simple : public ring_slave { m_p_ring_stat->simple.n_tx_tso_byte_count += bytes; } + virtual uint64_t get_rx_cq_out_of_buffer_drop() override; + #ifdef DEFINED_UTLS bool tls_tx_supported(void) override { return m_tls.tls_tx; } bool tls_rx_supported(void) override { return m_tls.tls_rx; } diff --git a/src/core/dev/ring_tap.h b/src/core/dev/ring_tap.h index f760b35f0..ee74c2868 100644 --- a/src/core/dev/ring_tap.h +++ b/src/core/dev/ring_tap.h @@ -94,6 +94,7 @@ class ring_tap : public ring_slave { NOT_IN_USE(id); return 0; } + virtual uint64_t get_rx_cq_out_of_buffer_drop() { return 0; } virtual bool is_tso(void) { return false; } inline void set_tap_data_available() { m_tap_data_available = true; } diff --git a/src/core/ib/mlx5/ib_mlx5.h b/src/core/ib/mlx5/ib_mlx5.h index 45c898886..b54e9999d 100644 --- a/src/core/ib/mlx5/ib_mlx5.h +++ b/src/core/ib/mlx5/ib_mlx5.h @@ -178,7 +178,7 @@ typedef struct xlio_mlx5_cqe { uint8_t rsvd4[4]; __be32 byte_cnt; __be64 timestamp; - __be32 sop_drop_qpn; + __be32 sop_rxdrop_qpn_flowtag; __be16 wqe_counter; uint8_t rsvd5; uint8_t op_own; diff --git a/src/core/main.cpp b/src/core/main.cpp index 4fb108d5f..fedac761f 100644 --- a/src/core/main.cpp +++ b/src/core/main.cpp @@ -94,9 +94,11 @@ static int free_libxlio_resources() g_b_exit = true; - if (safe_mce_sys().print_report) { - buffer_pool::print_full_report(VLOG_INFO); - g_hugepage_mgr.print_report(); + if (safe_mce_sys().print_report != option_3::OFF) { + bool print_only_critical = (safe_mce_sys().print_report == option_3::AUTO); + buffer_pool::print_full_report(VLOG_INFO, print_only_critical); + g_hugepage_mgr.print_report(VLOG_INFO, print_only_critical); + g_p_net_device_table_mgr->print_report(VLOG_INFO, print_only_critical); } // Destroy polling groups before fd_collection to clear XLIO sockets from the fd_collection @@ -471,8 +473,9 @@ void print_xlio_global_settings() VLOG_PARAM_STRING("SegFault Backtrace", safe_mce_sys().handle_segfault, MCE_DEFAULT_HANDLE_SIGFAULT, SYS_VAR_HANDLE_SIGSEGV, safe_mce_sys().handle_segfault ? "Enabled " : "Disabled"); - VLOG_PARAM_STRING("Print a report", safe_mce_sys().print_report, MCE_DEFAULT_PRINT_REPORT, - SYS_VAR_PRINT_REPORT, safe_mce_sys().print_report ? "Enabled " : "Disabled"); + VLOG_PARAM_STRING("Print a report", option_3::to_str(safe_mce_sys().print_report), + option_3::to_str(MCE_DEFAULT_PRINT_REPORT), SYS_VAR_PRINT_REPORT, + option_3::to_str(safe_mce_sys().print_report)); VLOG_PARAM_STRING("Quick start", safe_mce_sys().quick_start, MCE_DEFAULT_QUICK_START, SYS_VAR_QUICK_START, safe_mce_sys().quick_start ? "Enabled " : "Disabled"); diff --git a/src/core/util/hugepage_mgr.cpp b/src/core/util/hugepage_mgr.cpp index c7cddf81c..583eca4be 100644 --- a/src/core/util/hugepage_mgr.cpp +++ b/src/core/util/hugepage_mgr.cpp @@ -192,8 +192,13 @@ void hugepage_mgr::dealloc_hugepages(void *ptr, size_t size) } } -void hugepage_mgr::print_report(bool short_report /*=false*/) +void hugepage_mgr::print_report(vlog_levels_t log_level, bool print_only_critical /*=false*/, + bool short_report /*=false*/) { + if (print_only_critical) { + return; + } + std::lock_guard lock(m_lock); const size_t ONE_MB = 1024U * 1024U; @@ -203,13 +208,13 @@ void hugepage_mgr::print_report(bool short_report /*=false*/) read_sysfs(); get_supported_hugepages(hugepages); - vlog_printf(VLOG_INFO, "Hugepages info:\n"); + vlog_printf(log_level, "Hugepages info:\n"); if (safe_mce_sys().hugepage_size) { - vlog_printf(VLOG_INFO, " User forced to use %lu kB hugepages.\n", + vlog_printf(log_level, " User forced to use %lu kB hugepages.\n", (safe_mce_sys().hugepage_size) / 1024U); } for (size_t hugepage : hugepages) { - vlog_printf(VLOG_INFO, " %zu kB : total=%u free=%u\n", hugepage / 1024U, + vlog_printf(log_level, " %zu kB : total=%u free=%u\n", hugepage / 1024U, get_total_hugepages(hugepage), get_free_hugepages(hugepage)); } @@ -217,15 +222,15 @@ void hugepage_mgr::print_report(bool short_report /*=false*/) return; } - vlog_printf(VLOG_INFO, "Hugepages statistics:\n"); + vlog_printf(log_level, "Hugepages statistics:\n"); for (size_t hugepage : hugepages) { - vlog_printf(VLOG_INFO, " %zu kB : allocated_pages=%u allocations=%u\n", hugepage / 1024U, + vlog_printf(log_level, " %zu kB : allocated_pages=%u allocations=%u\n", hugepage / 1024U, m_hugepages[hugepage].nr_hugepages_allocated, m_hugepages[hugepage].nr_allocations); } - vlog_printf(VLOG_INFO, " Total: allocations=%u unsatisfied=%u\n", m_stats.allocations, + vlog_printf(log_level, " Total: allocations=%u unsatisfied=%u\n", m_stats.allocations, m_stats.fails); - vlog_printf(VLOG_INFO, " Total: allocated=%zuMB requested=%zuMB unused_space=%zuMB\n", + vlog_printf(log_level, " Total: allocated=%zuMB requested=%zuMB unused_space=%zuMB\n", m_stats.total_allocated / ONE_MB, m_stats.total_requested / ONE_MB, m_stats.total_unused / ONE_MB); } diff --git a/src/core/util/hugepage_mgr.h b/src/core/util/hugepage_mgr.h index 368c2384b..88057f5f8 100644 --- a/src/core/util/hugepage_mgr.h +++ b/src/core/util/hugepage_mgr.h @@ -43,7 +43,8 @@ class hugepage_mgr { void *alloc_hugepages(size_t &size, size_t &hugepage_size); void dealloc_hugepages(void *ptr, size_t size); - void print_report(bool short_report = false); + void print_report(vlog_levels_t log_level, bool print_only_critical = false, + bool short_report = false); private: enum { diff --git a/src/core/util/sys_vars.cpp b/src/core/util/sys_vars.cpp index f29988454..386ae2f3e 100644 --- a/src/core/util/sys_vars.cpp +++ b/src/core/util/sys_vars.cpp @@ -1054,7 +1054,7 @@ void mce_sys_var::get_env_params() } if ((env_ptr = getenv(SYS_VAR_PRINT_REPORT))) { - print_report = atoi(env_ptr) ? true : false; + print_report = option_3::from_str(env_ptr, MCE_DEFAULT_PRINT_REPORT); } if ((env_ptr = getenv(SYS_VAR_QUICK_START))) { diff --git a/src/core/util/sys_vars.h b/src/core/util/sys_vars.h index 84d3c2964..0b44bcc02 100644 --- a/src/core/util/sys_vars.h +++ b/src/core/util/sys_vars.h @@ -311,7 +311,7 @@ struct mce_sys_var { uint32_t mce_spec; - bool print_report; + option_3::mode_t print_report; bool quick_start; vlog_levels_t log_level; uint32_t log_details; @@ -669,7 +669,7 @@ extern mce_sys_var &safe_mce_sys(); * This block consists of default values for library specific * configuration variables */ -#define MCE_DEFAULT_PRINT_REPORT (false) +#define MCE_DEFAULT_PRINT_REPORT (option_3::AUTO) #define MCE_DEFAULT_TCP_SEND_BUFFER_SIZE (1024 * 1024) #define MCE_DEFAULT_LOG_FILE ("") #define MCE_DEFAULT_CONF_FILE ("/etc/libxlio.conf") diff --git a/src/core/util/xlio_stats.h b/src/core/util/xlio_stats.h index ea077e1fd..abf73e14c 100644 --- a/src/core/util/xlio_stats.h +++ b/src/core/util/xlio_stats.h @@ -308,9 +308,10 @@ typedef struct { // CQ stat info typedef struct { uint64_t n_rx_stride_count; + uint64_t n_rx_hw_pkt_drops; uint64_t n_rx_packet_count; uint64_t n_rx_consumed_rwqe_count; - uint64_t n_rx_pkt_drop; + uint64_t n_rx_sw_pkt_drops; uint64_t n_rx_lro_packets; uint64_t n_rx_lro_bytes; uint64_t n_rx_gro_packets; diff --git a/src/stats/stats_reader.cpp b/src/stats/stats_reader.cpp index abb971af3..69f8a44b8 100644 --- a/src/stats/stats_reader.cpp +++ b/src/stats/stats_reader.cpp @@ -396,8 +396,11 @@ void update_delta_cq_stat(cq_stats_t *p_curr_cq_stats, cq_stats_t *p_prev_cq_sta int delay = user_params.interval; if (p_curr_cq_stats && p_prev_cq_stats) { p_prev_cq_stats->n_rx_drained_at_once_max = p_curr_cq_stats->n_rx_drained_at_once_max; - p_prev_cq_stats->n_rx_pkt_drop = - (p_curr_cq_stats->n_rx_pkt_drop - p_prev_cq_stats->n_rx_pkt_drop) / delay; + p_prev_cq_stats->n_rx_sw_pkt_drops = + (p_curr_cq_stats->n_rx_sw_pkt_drops - p_prev_cq_stats->n_rx_sw_pkt_drops) / delay; + p_prev_cq_stats->n_rx_hw_pkt_drops = + (p_curr_cq_stats->n_rx_hw_pkt_drops - p_prev_cq_stats->n_rx_hw_pkt_drops) / delay; + p_prev_cq_stats->n_rx_sw_queue_len = p_curr_cq_stats->n_rx_sw_queue_len; p_prev_cq_stats->n_buffer_pool_len = p_curr_cq_stats->n_buffer_pool_len; p_prev_cq_stats->n_rx_lro_packets = @@ -569,7 +572,10 @@ void print_cq_stats(cq_instance_block_t *p_cq_inst_arr) p_cq_stats = &p_cq_inst_arr[i].cq_stats; printf("======================================================\n"); printf("\tCQ=[%u]\n", i); - printf(FORMAT_STATS_64bit, "Packets dropped:", p_cq_stats->n_rx_pkt_drop, post_fix); + printf(FORMAT_STATS_64bit, "SW RX Packets dropped:", p_cq_stats->n_rx_sw_pkt_drops, + post_fix); + printf(FORMAT_STATS_64bit, "HW RX Packets dropped:", p_cq_stats->n_rx_hw_pkt_drops, + post_fix); printf(FORMAT_STATS_32bit, "Packets queue len:", p_cq_stats->n_rx_sw_queue_len); printf(FORMAT_STATS_32bit, "Drained max:", p_cq_stats->n_rx_drained_at_once_max); printf(FORMAT_STATS_32bit, "Buffer pool size:", p_cq_stats->n_buffer_pool_len);