Skip to content

Commit

Permalink
NetKVM: Fix unclassified packets out of order on RX
Browse files Browse the repository at this point in the history
When working with 1 queue MQ or without MQ RX path
may be processed by a few CPUs running DPC routine
in parallel.

In this case only one of those should do the real
processing.

Signed-off-by: Dmitry Fleytman <dmitry@daynix.com>
  • Loading branch information
Dmitry Fleytman authored and vrozenfe committed Jun 21, 2016
1 parent 0efc5ec commit 2d2df86
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 52 deletions.
103 changes: 54 additions & 49 deletions NetKVM/Common/ParaNdis-Common.cpp
Expand Up @@ -1535,62 +1535,55 @@ UpdateReceiveFailStatistics(PPARANDIS_ADAPTER pContext, UINT nCoalescedSegmentsC
pContext->Statistics.ifInDiscards += nCoalescedSegmentsCount;
}

static BOOLEAN ProcessReceiveQueue(PARANDIS_ADAPTER *pContext,
PULONG pnPacketsToIndicateLeft,
PPARANDIS_RECEIVE_QUEUE pTargetReceiveQueue,
PNET_BUFFER_LIST *indicate,
PNET_BUFFER_LIST *indicateTail,
ULONG *nIndicate)
static void ProcessReceiveQueue(PARANDIS_ADAPTER *pContext,
PULONG pnPacketsToIndicateLeft,
PPARANDIS_RECEIVE_QUEUE pTargetReceiveQueue,
PNET_BUFFER_LIST *indicate,
PNET_BUFFER_LIST *indicateTail,
ULONG *nIndicate)
{

pRxNetDescriptor pBufferDescriptor;

if(NdisInterlockedIncrement(&pTargetReceiveQueue->ActiveProcessorsCount) == 1)
while( (*pnPacketsToIndicateLeft > 0) &&
(NULL != (pBufferDescriptor = ReceiveQueueGetBuffer(pTargetReceiveQueue))) )
{
while( (*pnPacketsToIndicateLeft > 0) &&
(NULL != (pBufferDescriptor = ReceiveQueueGetBuffer(pTargetReceiveQueue))) )
{
PNET_PACKET_INFO pPacketInfo = &pBufferDescriptor->PacketInfo;
PNET_PACKET_INFO pPacketInfo = &pBufferDescriptor->PacketInfo;

if( !pContext->bSurprizeRemoved &&
pContext->bConnected &&
ShallPassPacket(pContext, pPacketInfo))
if( !pContext->bSurprizeRemoved &&
pContext->bConnected &&
ShallPassPacket(pContext, pPacketInfo))
{
UINT nCoalescedSegmentsCount;
PNET_BUFFER_LIST packet = ParaNdis_PrepareReceivedPacket(pContext, pBufferDescriptor, &nCoalescedSegmentsCount);
if(packet != NULL)
{
UINT nCoalescedSegmentsCount;
PNET_BUFFER_LIST packet = ParaNdis_PrepareReceivedPacket(pContext, pBufferDescriptor, &nCoalescedSegmentsCount);
if(packet != NULL)
UpdateReceiveSuccessStatistics(pContext, pPacketInfo, nCoalescedSegmentsCount);
if (*indicate == nullptr)
{
UpdateReceiveSuccessStatistics(pContext, pPacketInfo, nCoalescedSegmentsCount);
if (*indicate == nullptr)
{
*indicate = *indicateTail = packet;
}
else
{
NET_BUFFER_LIST_NEXT_NBL(*indicateTail) = packet;
*indicateTail = packet;
}

NET_BUFFER_LIST_NEXT_NBL(*indicateTail) = NULL;
(*pnPacketsToIndicateLeft)--;
(*nIndicate)++;
*indicate = *indicateTail = packet;
}
else
{
UpdateReceiveFailStatistics(pContext, nCoalescedSegmentsCount);
pBufferDescriptor->Queue->ReuseReceiveBuffer(pBufferDescriptor);
NET_BUFFER_LIST_NEXT_NBL(*indicateTail) = packet;
*indicateTail = packet;
}

NET_BUFFER_LIST_NEXT_NBL(*indicateTail) = NULL;
(*pnPacketsToIndicateLeft)--;
(*nIndicate)++;
}
else
{
pContext->extraStatistics.framesFilteredOut++;
UpdateReceiveFailStatistics(pContext, nCoalescedSegmentsCount);
pBufferDescriptor->Queue->ReuseReceiveBuffer(pBufferDescriptor);
}
}
}

NdisInterlockedDecrement(&pTargetReceiveQueue->ActiveProcessorsCount);
return ReceiveQueueHasBuffers(pTargetReceiveQueue);
else
{
pContext->extraStatistics.framesFilteredOut++;
pBufferDescriptor->Queue->ReuseReceiveBuffer(pBufferDescriptor);
}
}
}


Expand Down Expand Up @@ -1621,7 +1614,7 @@ static
BOOLEAN RxDPCWorkBody(PARANDIS_ADAPTER *pContext, CPUPathesBundle *pathBundle, ULONG nPacketsToIndicate)
{
BOOLEAN res = FALSE;

bool rxPathOwner = false;
PNET_BUFFER_LIST indicate, indicateTail;
ULONG nIndicate;

Expand All @@ -1636,25 +1629,26 @@ BOOLEAN RxDPCWorkBody(PARANDIS_ADAPTER *pContext, CPUPathesBundle *pathBundle, U
associated queues */
if (pathBundle != nullptr)
{
rxPathOwner = pathBundle->rxPath.UnclassifiedPacketsQueue().Ownership.Acquire();

pathBundle->rxPath.ProcessRxRing(CurrCpuReceiveQueue);

res |= ProcessReceiveQueue(pContext, &nPacketsToIndicate, &pathBundle->rxPath.UnclassifiedPacketsQueue(),
&indicate, &indicateTail, &nIndicate);
if (rxPathOwner)
{
ProcessReceiveQueue(pContext, &nPacketsToIndicate, &pathBundle->rxPath.UnclassifiedPacketsQueue(),
&indicate, &indicateTail, &nIndicate);
}
}

#ifdef PARANDIS_SUPPORT_RSS
if (CurrCpuReceiveQueue != PARANDIS_RECEIVE_NO_QUEUE)
{
res |= ProcessReceiveQueue(pContext, &nPacketsToIndicate, &pContext->ReceiveQueues[CurrCpuReceiveQueue],
&indicate, &indicateTail, &nIndicate);
ProcessReceiveQueue(pContext, &nPacketsToIndicate, &pContext->ReceiveQueues[CurrCpuReceiveQueue],
&indicate, &indicateTail, &nIndicate);
res |= ReceiveQueueHasBuffers(&pContext->ReceiveQueues[CurrCpuReceiveQueue]);
}
#endif

if (pathBundle != nullptr)
{
res |= pathBundle->rxPath.RestartQueue();
}

if (nIndicate)
{
if(pContext->m_RxStateMachine.RegisterOutstandingItems(nIndicate))
Expand All @@ -1668,6 +1662,17 @@ BOOLEAN RxDPCWorkBody(PARANDIS_ADAPTER *pContext, CPUPathesBundle *pathBundle, U
}
}

if (rxPathOwner)
{
pathBundle->rxPath.UnclassifiedPacketsQueue().Ownership.Release();
}

if (pathBundle != nullptr)
{
res |= pathBundle->rxPath.RestartQueue() |
ReceiveQueueHasBuffers(&pathBundle->rxPath.UnclassifiedPacketsQueue());
}

return res;
}

Expand Down
1 change: 0 additions & 1 deletion NetKVM/Common/ParaNdis-RX.cpp
Expand Up @@ -8,7 +8,6 @@ CParaNdisRX::CParaNdisRX() : m_nReusedRxBuffersCounter(0), m_NetNofReceiveBuffer

NdisAllocateSpinLock(&m_UnclassifiedPacketsQueue.Lock);
InitializeListHead(&m_UnclassifiedPacketsQueue.BuffersList);
m_UnclassifiedPacketsQueue.ActiveProcessorsCount = 0;
}

CParaNdisRX::~CParaNdisRX()
Expand Down
3 changes: 1 addition & 2 deletions NetKVM/Common/ndis56common.h
Expand Up @@ -89,8 +89,7 @@ typedef struct _tagPARANDIS_RECEIVE_QUEUE
{
NDIS_SPIN_LOCK Lock;
LIST_ENTRY BuffersList;

LONG ActiveProcessorsCount;
COwnership Ownership;
} PARANDIS_RECEIVE_QUEUE, *PPARANDIS_RECEIVE_QUEUE;

#include "ParaNdis-TX.h"
Expand Down

0 comments on commit 2d2df86

Please sign in to comment.