Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions config/m4/verbs.m4
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,6 @@ CHECK_VERBS_ATTRIBUTE([IBV_QPT_RAW_PACKET], [infiniband/verbs.h])
CHECK_VERBS_ATTRIBUTE([IBV_WC_WITH_VLAN], [infiniband/verbs.h])
CHECK_VERBS_ATTRIBUTE([IBV_ACCESS_ALLOCATE_MR], [infiniband/verbs.h])
CHECK_VERBS_ATTRIBUTE([IBV_QP_CREATE_SOURCE_QPN], [infiniband/verbs.h], [IBV_QP_INIT_SOURCE_QPN])
CHECK_VERBS_ATTRIBUTE([IBV_FLOW_SPEC_IB], [infiniband/verbs.h], [IBV_FLOW_SPEC_IB])
CHECK_VERBS_ATTRIBUTE([IBV_DEVICE_RAW_IP_CSUM], [infiniband/verbs.h])
CHECK_VERBS_ATTRIBUTE([IBV_SEND_IP_CSUM], [infiniband/verbs.h])
CHECK_VERBS_ATTRIBUTE([IBV_FLOW_SPEC_ACTION_TAG], [infiniband/verbs.h], [IBV_FLOW_TAG])
Expand All @@ -145,7 +144,6 @@ if test "x$vma_cv_verbs" == x2; then
CHECK_VERBS_ATTRIBUTE([IBV_EXP_WR_NOP], [infiniband/verbs_exp.h], [IBV_WR_NOP])
CHECK_VERBS_ATTRIBUTE([IBV_EXP_ACCESS_ALLOCATE_MR], [infiniband/verbs_exp.h])
CHECK_VERBS_ATTRIBUTE([IBV_EXP_QP_INIT_ATTR_ASSOCIATED_QPN], [infiniband/verbs_exp.h], [IBV_QP_INIT_SOURCE_QPN])
CHECK_VERBS_ATTRIBUTE([IBV_EXP_FLOW_SPEC_IB], [infiniband/verbs_exp.h], [IBV_FLOW_SPEC_IB])
CHECK_VERBS_ATTRIBUTE([IBV_EXP_SEND_IP_CSUM], [infiniband/verbs_exp.h])
CHECK_VERBS_ATTRIBUTE([IBV_EXP_DEVICE_ATTR_MAX_DM_SIZE], [infiniband/verbs_exp.h], [IBV_DM])
CHECK_VERBS_ATTRIBUTE([IBV_EXP_QP_RATE_LIMIT], [infiniband/verbs_exp.h], [IBV_PACKET_PACING_CAPS])
Expand Down
7 changes: 0 additions & 7 deletions src/vma/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ libvma_la_SOURCES := \
dev/net_device_entry.cpp \
dev/net_device_table_mgr.cpp \
dev/wqe_send_handler.cpp \
dev/wqe_send_ib_handler.cpp \
dev/ring.cpp \
dev/ring_bond.cpp \
dev/ring_slave.cpp \
Expand Down Expand Up @@ -137,8 +136,6 @@ libvma_la_SOURCES := \
proto/dst_entry_tcp.cpp \
proto/header.cpp \
proto/arp.cpp \
proto/igmp_mgr.cpp \
proto/igmp_handler.cpp \
\
sock/sockinfo.cpp \
sock/sockinfo_udp.cpp \
Expand Down Expand Up @@ -190,7 +187,6 @@ libvma_la_SOURCES := \
dev/ring_profile.h \
dev/ring_allocation_logic.h \
dev/wqe_send_handler.h \
dev/wqe_send_ib_handler.h \
\
event/command.h \
event/delta_timer.h \
Expand Down Expand Up @@ -245,14 +241,11 @@ libvma_la_SOURCES := \
proto/dst_entry_udp_mc.h \
proto/flow_tuple.h \
proto/header.h \
proto/igmp_handler.h \
proto/igmp_mgr.h \
proto/ip_address.h \
proto/ip_frag.h \
proto/L2_address.h \
proto/mem_buf_desc.h \
proto/neighbour.h \
proto/neighbour_observer.h \
proto/neighbour_table_mgr.h \
proto/netlink_socket_mgr.h \
proto/peer_key.h \
Expand Down
55 changes: 13 additions & 42 deletions src/vma/dev/cq_mgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,9 @@ cq_mgr::cq_mgr(ring_simple* p_ring, ib_ctx_handler* p_ib_ctx_handler, int cq_siz
,m_n_sysvar_cq_poll_batch_max(safe_mce_sys().cq_poll_batch_max)
,m_n_sysvar_progress_engine_wce_max(safe_mce_sys().progress_engine_wce_max)
,m_p_cq_stat(&m_cq_stat_static) // use local copy of stats by default (on rx cq get shared memory stats)
,m_transport_type(m_p_ring->get_transport_type())
,m_p_next_rx_desc_poll(NULL)
,m_n_sysvar_rx_prefetch_bytes_before_poll(safe_mce_sys().rx_prefetch_bytes_before_poll)
,m_n_sysvar_rx_prefetch_bytes(safe_mce_sys().rx_prefetch_bytes)
,m_sz_transport_header(0)
,m_p_ib_ctx_handler(p_ib_ctx_handler)
,m_n_sysvar_rx_num_wr_to_post_recv(safe_mce_sys().rx_num_wr_to_post_recv)
,m_comp_event_channel(p_comp_event_channel)
Expand Down Expand Up @@ -106,25 +104,9 @@ void cq_mgr::configure(int cq_size)
}
BULLSEYE_EXCLUDE_BLOCK_END
VALGRIND_MAKE_MEM_DEFINED(m_p_ibv_cq, sizeof(ibv_cq));
switch (m_transport_type) {
case VMA_TRANSPORT_IB:
m_sz_transport_header = GRH_HDR_LEN;
break;
case VMA_TRANSPORT_ETH:
m_sz_transport_header = ETH_HDR_LEN;
break;
BULLSEYE_EXCLUDE_BLOCK_START
default:
cq_logpanic("Unknown transport type: %d", m_transport_type);
break;
BULLSEYE_EXCLUDE_BLOCK_END
}

if (m_b_is_rx) {
vma_stats_instance_create_cq_block(m_p_cq_stat);
}

if (m_b_is_rx) {
m_b_is_rx_hw_csum_on = vma_is_rx_hw_csum_supported(m_p_ib_ctx_handler->get_ibv_device_attr());
cq_logdbg("RX CSUM support = %d", m_b_is_rx_hw_csum_on);
}
Expand Down Expand Up @@ -499,9 +481,8 @@ mem_buf_desc_t* cq_mgr::process_cq_element_rx(vma_ibv_wc* p_wce)

VALGRIND_MAKE_MEM_DEFINED(p_mem_buf_desc->p_buffer, p_mem_buf_desc->sz_data);

prefetch_range((uint8_t*)p_mem_buf_desc->p_buffer + m_sz_transport_header,
std::min(p_mem_buf_desc->sz_data - m_sz_transport_header, (size_t)m_n_sysvar_rx_prefetch_bytes));
//prefetch((uint8_t*)p_mem_buf_desc->p_buffer + m_sz_transport_header);
prefetch_range((uint8_t*)p_mem_buf_desc->p_buffer + ETH_HDR_LEN,
std::min(p_mem_buf_desc->sz_data - ETH_HDR_LEN, (size_t)m_n_sysvar_rx_prefetch_bytes));
}

return p_mem_buf_desc;
Expand Down Expand Up @@ -790,29 +771,19 @@ int cq_mgr::drain_and_proccess(uintptr_t* p_recycle_buffers_last_wr_id /*=NULL*/
if (p_recycle_buffers_last_wr_id) {
m_p_cq_stat->n_rx_pkt_drop++;
reclaim_recv_buffer_helper(buff);
} else {
bool procces_now = false;
if (m_transport_type == VMA_TRANSPORT_ETH) {
procces_now = is_eth_tcp_frame(buff);
}
if (m_transport_type == VMA_TRANSPORT_IB) {
procces_now = is_ib_tcp_frame(buff);
}
} else if (is_eth_tcp_frame(buff)) {
// We process immediately all non udp/ip traffic..
if (procces_now) {
buff->rx.is_vma_thr = true;
if ((++m_qp_rec.debt < (int)m_n_sysvar_rx_num_wr_to_post_recv) ||
!compensate_qp_poll_success(buff)) {
process_recv_buffer(buff, NULL);
}
buff->rx.is_vma_thr = true;
if ((++m_qp_rec.debt < (int)m_n_sysvar_rx_num_wr_to_post_recv) ||
!compensate_qp_poll_success(buff)) {
process_recv_buffer(buff, NULL);
}
else { //udp/ip traffic we just put in the cq's rx queue
m_rx_queue.push_back(buff);
mem_buf_desc_t* buff_cur = m_rx_queue.get_and_pop_front();
if ((++m_qp_rec.debt < (int)m_n_sysvar_rx_num_wr_to_post_recv) ||
!compensate_qp_poll_success(buff_cur)) {
m_rx_queue.push_front(buff_cur);
}
} else { //udp/ip traffic we just put in the cq's rx queue
m_rx_queue.push_back(buff);
mem_buf_desc_t* buff_cur = m_rx_queue.get_and_pop_front();
if ((++m_qp_rec.debt < (int)m_n_sysvar_rx_num_wr_to_post_recv) ||
!compensate_qp_poll_success(buff_cur)) {
m_rx_queue.push_front(buff_cur);
}
}
}
Expand Down
2 changes: 0 additions & 2 deletions src/vma/dev/cq_mgr.h
Original file line number Diff line number Diff line change
Expand Up @@ -184,11 +184,9 @@ class cq_mgr
const uint32_t m_n_sysvar_cq_poll_batch_max;
const uint32_t m_n_sysvar_progress_engine_wce_max;
cq_stats_t* m_p_cq_stat;
transport_type_t m_transport_type;
mem_buf_desc_t* m_p_next_rx_desc_poll;
const uint32_t m_n_sysvar_rx_prefetch_bytes_before_poll;
const uint32_t m_n_sysvar_rx_prefetch_bytes;
size_t m_sz_transport_header;
ib_ctx_handler* m_p_ib_ctx_handler;
const uint32_t m_n_sysvar_rx_num_wr_to_post_recv;
private:
Expand Down
18 changes: 0 additions & 18 deletions src/vma/dev/cq_mgr.inl
Original file line number Diff line number Diff line change
Expand Up @@ -62,22 +62,4 @@ inline bool is_eth_tcp_frame(mem_buf_desc_t* buff)
return false;
}

inline bool is_ib_tcp_frame(mem_buf_desc_t* buff)
{
struct ipoibhdr* p_ipoib_h = (struct ipoibhdr*)(buff->p_buffer + GRH_HDR_LEN);

// Validate IPoIB header
if (unlikely(p_ipoib_h->ipoib_header != htonl(IPOIB_HEADER))) {
return false;
}

size_t transport_header_len = GRH_HDR_LEN + IPOIB_HDR_LEN;

struct iphdr * p_ip_h = (struct iphdr*)(buff->p_buffer + transport_header_len);
if (likely(p_ip_h->protocol == IPPROTO_TCP)) {
return true;
}
return false;
}

#endif//CQ_MGR_INL_H
70 changes: 25 additions & 45 deletions src/vma/dev/cq_mgr_mlx5.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -294,29 +294,19 @@ int cq_mgr_mlx5::drain_and_proccess(uintptr_t* p_recycle_buffers_last_wr_id /*=N
if (p_recycle_buffers_last_wr_id) {
m_p_cq_stat->n_rx_pkt_drop++;
reclaim_recv_buffer_helper(m_rx_hot_buffer);
} else {
bool procces_now = false;
if (m_transport_type == VMA_TRANSPORT_ETH) {
procces_now = is_eth_tcp_frame(m_rx_hot_buffer);
}
if (m_transport_type == VMA_TRANSPORT_IB) {
procces_now = is_ib_tcp_frame(m_rx_hot_buffer);
}
} else if (is_eth_tcp_frame(m_rx_hot_buffer)) {
// We process immediately all non udp/ip traffic..
if (procces_now) {
m_rx_hot_buffer->rx.is_vma_thr = true;
if ((++m_qp_rec.debt < (int)m_n_sysvar_rx_num_wr_to_post_recv) ||
!compensate_qp_poll_success(m_rx_hot_buffer)) {
process_recv_buffer(m_rx_hot_buffer, NULL);
}
m_rx_hot_buffer->rx.is_vma_thr = true;
if ((++m_qp_rec.debt < (int)m_n_sysvar_rx_num_wr_to_post_recv) ||
!compensate_qp_poll_success(m_rx_hot_buffer)) {
process_recv_buffer(m_rx_hot_buffer, NULL);
}
else { //udp/ip traffic we just put in the cq's rx queue
m_rx_queue.push_back(m_rx_hot_buffer);
mem_buf_desc_t* buff_cur = m_rx_queue.get_and_pop_front();
if ((++m_qp_rec.debt < (int)m_n_sysvar_rx_num_wr_to_post_recv) ||
!compensate_qp_poll_success(buff_cur)) {
m_rx_queue.push_front(buff_cur);
}
} else { //udp/ip traffic we just put in the cq's rx queue
m_rx_queue.push_back(m_rx_hot_buffer);
mem_buf_desc_t* buff_cur = m_rx_queue.get_and_pop_front();
if ((++m_qp_rec.debt < (int)m_n_sysvar_rx_num_wr_to_post_recv) ||
!compensate_qp_poll_success(buff_cur)) {
m_rx_queue.push_front(buff_cur);
}
}
}
Expand Down Expand Up @@ -344,30 +334,20 @@ int cq_mgr_mlx5::drain_and_proccess(uintptr_t* p_recycle_buffers_last_wr_id /*=N
if (p_recycle_buffers_last_wr_id) {
m_p_cq_stat->n_rx_pkt_drop++;
reclaim_recv_buffer_helper(buff);
} else {
bool procces_now = false;
if (m_transport_type == VMA_TRANSPORT_ETH) {
procces_now = is_eth_tcp_frame(buff);
}
if (m_transport_type == VMA_TRANSPORT_IB) {
procces_now = is_ib_tcp_frame(buff);
}
} else if (is_eth_tcp_frame(buff)) {
/* We process immediately all non udp/ip traffic.. */
if (procces_now) {
buff->rx.is_vma_thr = true;
if ((++m_qp_rec.debt < (int)m_n_sysvar_rx_num_wr_to_post_recv) ||
!compensate_qp_poll_success(buff)) {
process_recv_buffer(buff, NULL);
}
buff->rx.is_vma_thr = true;
if ((++m_qp_rec.debt < (int)m_n_sysvar_rx_num_wr_to_post_recv) ||
!compensate_qp_poll_success(buff)) {
process_recv_buffer(buff, NULL);
}
else { /* udp/ip traffic we just put in the cq's rx queue */
m_rx_queue.push_back(buff);
mem_buf_desc_t* buff_cur = m_rx_queue.front();
m_rx_queue.pop_front();
if ((++m_qp_rec.debt < (int)m_n_sysvar_rx_num_wr_to_post_recv) ||
!compensate_qp_poll_success(buff_cur)) {
m_rx_queue.push_front(buff_cur);
}
} else { /* udp/ip traffic we just put in the cq's rx queue */
m_rx_queue.push_back(buff);
mem_buf_desc_t* buff_cur = m_rx_queue.front();
m_rx_queue.pop_front();
if ((++m_qp_rec.debt < (int)m_n_sysvar_rx_num_wr_to_post_recv) ||
!compensate_qp_poll_success(buff_cur)) {
m_rx_queue.push_front(buff_cur);
}
}
}
Expand Down Expand Up @@ -444,8 +424,8 @@ mem_buf_desc_t* cq_mgr_mlx5::process_cq_element_rx(mem_buf_desc_t* p_mem_buf_des

VALGRIND_MAKE_MEM_DEFINED(p_mem_buf_desc->p_buffer, p_mem_buf_desc->sz_data);

prefetch_range((uint8_t*)p_mem_buf_desc->p_buffer + m_sz_transport_header,
std::min(p_mem_buf_desc->sz_data - m_sz_transport_header, (size_t)m_n_sysvar_rx_prefetch_bytes));
prefetch_range((uint8_t*)p_mem_buf_desc->p_buffer + ETH_HDR_LEN,
std::min(p_mem_buf_desc->sz_data - ETH_HDR_LEN, (size_t)m_n_sysvar_rx_prefetch_bytes));


return p_mem_buf_desc;
Expand Down
3 changes: 0 additions & 3 deletions src/vma/dev/net_device_table_mgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -214,9 +214,6 @@ void net_device_table_mgr::update_tbl()
case ARPHRD_ETHER:
p_net_device_val = new net_device_val_eth(&desc);
break;
case ARPHRD_INFINIBAND:
p_net_device_val = new net_device_val_ib(&desc);
break;
default:
goto next;
}
Expand Down
Loading