Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
netkvm: Fix functionality with single MSIX vector
https://bugzilla.redhat.com/show_bug.cgi?id=1666940
Set clear and simple rule for assignment of vectors to queues
and translation of MSIX messages to queues:
in case of one or two vectors they are assigned
to TX/RX queues and CX reuses the last vector. In this case
we do not spawn DPC for CX and process it during regular
RX/TX processing. For case of 3 and more vectors the processing
is the same as before.

Signed-off-by: Yuri Benditovich <yuri.benditovich@janustech.com>
  • Loading branch information
ybendito authored and YanVugenfirer committed Jan 30, 2019
1 parent d408737 commit 8a7005b
Show file tree
Hide file tree
Showing 6 changed files with 74 additions and 55 deletions.
16 changes: 16 additions & 0 deletions NetKVM/Common/ParaNdis-AbstractPath.h
Expand Up @@ -67,6 +67,8 @@ class CParaNdisAbstractPath
KAFFINITY DPCTargetProcessor = 0;
#endif

virtual bool FireDPC(ULONG messageId) = 0;

protected:
PPARANDIS_ADAPTER m_Context;
CVirtQueue *m_pVirtQueue;
Expand Down Expand Up @@ -123,6 +125,20 @@ template <class VQ> class CParaNdisTemplatePath : public CParaNdisAbstractPath,
m_VirtQueue.AllowTouchHardware();
}
}

// this default implementation is for RX/TX queues
// CX queue shall redefine it
bool FireDPC(ULONG messageId) override
{
#if NDIS_SUPPORT_NDIS620
if (DPCAffinity.Mask)
{
NdisMQueueDpcEx(m_Context->InterruptHandle, messageId, &DPCAffinity, NULL);
return TRUE;
}
#endif
return FALSE;
}
protected:
CNdisSpinLock m_Lock;
bool m_ObserverAdded;
Expand Down
7 changes: 7 additions & 0 deletions NetKVM/Common/ParaNdis-CX.cpp
Expand Up @@ -154,3 +154,10 @@ NDIS_STATUS CParaNdisCX::SetupMessageIndex(u16 vector)

return CParaNdisAbstractPath::SetupMessageIndex(vector);
}

bool CParaNdisCX::FireDPC(ULONG messageId)
{
DPrintf(0, "[%s] message %u\n", __FUNCTION__, messageId);
KeInsertQueueDpc(&m_DPC, NULL, NULL);
return TRUE;
}
1 change: 1 addition & 0 deletions NetKVM/Common/ParaNdis-CX.h
Expand Up @@ -24,6 +24,7 @@ class CParaNdisCX : public CParaNdisTemplatePath<CVirtQueue>, public CPlacementA
int levelIfOK
);

bool FireDPC(ULONG messageId) override;
KDPC m_DPC;

protected:
Expand Down
19 changes: 10 additions & 9 deletions NetKVM/Common/ParaNdis-Common.cpp
Expand Up @@ -754,6 +754,10 @@ NDIS_STATUS ParaNdis_InitializeContext(
DPrintf(0, "[%s] Message interrupt assigned\n", __FUNCTION__);
pContext->bUsingMSIX = TRUE;
}
else
{
pContext->bSharedVectors = TRUE;
}

NTSTATUS nt_status = virtio_device_initialize(
&pContext->IODevice,
Expand Down Expand Up @@ -937,7 +941,8 @@ static USHORT DetermineQueueNumber(PARANDIS_ADAPTER *pContext)
USHORT nBundles = USHORT(((pContext->pMSIXInfoTable->MessageCount - 1) / 2) & 0xFFFF);
if (!nBundles && (pContext->pMSIXInfoTable->MessageCount == 1 || pContext->pMSIXInfoTable->MessageCount == 2))
{
DPrintf(0, "[%s] WARNING: Single MSIx interrupt allocated, performance will be reduced\n", __FUNCTION__);
DPrintf(0, "[%s] WARNING: %d MSIX message(s), performance will be reduced\n",
__FUNCTION__, pContext->pMSIXInfoTable->MessageCount);
nBundles = 1;
}

Expand Down Expand Up @@ -1910,14 +1915,10 @@ bool ParaNdis_RXTXDPCWorkBody(PARANDIS_ADAPTER *pContext, ULONG ulMaxPacketsToIn
}
InterlockedDecrement(&pContext->counterDPCInside);

return stillRequiresProcessing;
}

bool ParaNdis_DPCWorkBody(PARANDIS_ADAPTER *pContext, ULONG ulMaxPacketsToIndicate)
{
bool stillRequiresProcessing = ParaNdis_RXTXDPCWorkBody(pContext, ulMaxPacketsToIndicate);
ParaNdis_CXDPCWorkBody(pContext);

if (pContext->bSharedVectors && pathBundle)
{
ParaNdis_CXDPCWorkBody(pContext);
}
return stillRequiresProcessing;
}

Expand Down
5 changes: 1 addition & 4 deletions NetKVM/Common/ndis56common.h
Expand Up @@ -452,6 +452,7 @@ typedef struct _tagPARANDIS_ADAPTER
CParaNdisCX CXPath;
BOOLEAN bCXPathAllocated;
BOOLEAN bCXPathCreated;
BOOLEAN bSharedVectors;
BOOLEAN bDeviceNeedsReset;

CPUPathBundle *pPathBundles;
Expand Down Expand Up @@ -535,10 +536,6 @@ NDIS_STATUS ParaNdis_ConfigureMSIXVectors(
VOID ParaNdis_CleanupContext(
PARANDIS_ADAPTER *pContext);

bool ParaNdis_DPCWorkBody(
PARANDIS_ADAPTER *pContext,
ULONG ulMaxPacketsToIndicate);

void ParaNdis_CXDPCWorkBody(PARANDIS_ADAPTER *pContext);

void ParaNdis_ReuseRxNBLs(PNET_BUFFER_LIST pNBL);
Expand Down
81 changes: 39 additions & 42 deletions NetKVM/wlh/ParaNdis6-Impl.cpp
Expand Up @@ -256,28 +256,35 @@ static BOOLEAN MiniportInterrupt(
return true;
}

// This procedure must work the same way as
// ParaNdis_ConfigureMSIXVectors when spreads vectors over RX/TX/CX pathes.
// Returns respective TX or RX path if exists, then CX path if exists
// (i.e. returns CX path only if it has dedicated vector)
// otherwise (unlikely) returns NULL
static CParaNdisAbstractPath *GetPathByMessageId(PARANDIS_ADAPTER *pContext, ULONG MessageId)
{
CParaNdisAbstractPath *path = NULL;

CParaNdisAbstractPath *path;
UINT bundleId = MessageId / 2;
if (pContext->CXPath.getMessageIndex() < MessageId)
{
path = NULL;
}
else if (pContext->CXPath.getMessageIndex() == MessageId)

if (bundleId < pContext->nPathBundles)
{
path = &pContext->CXPath;
if (MessageId & 1)
{
path = &(pContext->pPathBundles[bundleId].rxPath);
}
else
{
path = &(pContext->pPathBundles[bundleId].txPath);
}
}
else if (MessageId % 2)
else if (pContext->CXPath.getMessageIndex() == MessageId)
{
path = &(pContext->pPathBundles[bundleId].rxPath);
path = &pContext->CXPath;
}
else
{
path = &(pContext->pPathBundles[bundleId].txPath);
path = NULL;
}

return path;
}

Expand Down Expand Up @@ -319,24 +326,15 @@ static BOOLEAN MiniportMSIInterrupt(

path->DisableInterrupts();

if (path->getMessageIndex() == pContext->CXPath.getMessageIndex())
// emit DPC for processing of TX/RX or DPC for processing of CX path
// note that in case the CX shares vector with TX/RX the CX DPC is not
// scheduled and CX path is processed by ParaNdis_RXTXDPCWorkBody
// (see pContext->bSharedVectors for such case)
if (!path->FireDPC(MessageId))
{
KeInsertQueueDpc(&pContext->CXPath.m_DPC, NULL, NULL);
}
else
{
#if NDIS_SUPPORT_NDIS620
if (path->DPCAffinity.Mask)
{
NdisMQueueDpcEx(pContext->InterruptHandle, MessageId, &path->DPCAffinity, NULL);
}
else
{
*QueueDefaultInterruptDpc = TRUE;
}
#else
*TargetProcessors = (ULONG)path->DPCTargetProcessor;
*QueueDefaultInterruptDpc = TRUE;
#if !NDIS_SUPPORT_NDIS620
*TargetProcessors = (ULONG)path->DPCTargetProcessor;
#endif
}

Expand Down Expand Up @@ -378,7 +376,7 @@ static VOID MiniportInterruptDPC(
PNDIS_RECEIVE_THROTTLE_PARAMETERS RxThrottleParameters = (PNDIS_RECEIVE_THROTTLE_PARAMETERS)ReceiveThrottleParameters;
DEBUG_ENTRY(5);
RxThrottleParameters->MoreNblsPending = 0;
requiresDPCRescheduling = ParaNdis_DPCWorkBody(pContext, RxThrottleParameters->MaxNblsToIndicate);
requiresDPCRescheduling = ParaNdis_RXTXDPCWorkBody(pContext, RxThrottleParameters->MaxNblsToIndicate);
if (requiresDPCRescheduling)
{
GROUP_AFFINITY Affinity;
Expand All @@ -390,7 +388,7 @@ static VOID MiniportInterruptDPC(
DEBUG_ENTRY(5);
UNREFERENCED_PARAMETER(ReceiveThrottleParameters);

requiresDPCRescheduling = ParaNdis_DPCWorkBody(pContext, PARANDIS_UNLIMITED_PACKETS_TO_INDICATE);
requiresDPCRescheduling = ParaNdis_RXTXDPCWorkBody(pContext, PARANDIS_UNLIMITED_PACKETS_TO_INDICATE);
if (requiresDPCRescheduling)
{
DPrintf(4, "[%s] Queued additional DPC for %d\n", __FUNCTION__, requiresDPCRescheduling);
Expand Down Expand Up @@ -459,7 +457,7 @@ static VOID MiniportMSIInterruptDpc(
#else
UNREFERENCED_PARAMETER(NdisReserved1);

requireDPCRescheduling = ParaNdis_DPCWorkBody(pContext, PARANDIS_UNLIMITED_PACKETS_TO_INDICATE);
requireDPCRescheduling = ParaNdis_RXTXDPCWorkBody(pContext, PARANDIS_UNLIMITED_PACKETS_TO_INDICATE);
if (requireDPCRescheduling)
{
NdisMQueueDpc(pContext->InterruptHandle, MessageId, 1 << KeGetCurrentProcessorNumber(), MiniportDpcContext);
Expand Down Expand Up @@ -522,9 +520,8 @@ NDIS_STATUS ParaNdis_ConfigureMSIXVectors(PARANDIS_ADAPTER *pContext)
{
NDIS_STATUS status = NDIS_STATUS_RESOURCES;
UINT i;
u16 nVectors = (u16)pContext->pMSIXInfoTable->MessageCount;
PIO_INTERRUPT_MESSAGE_INFO pTable = pContext->pMSIXInfoTable;
bool bSingleVector = pContext->pMSIXInfoTable->MessageCount == 1;
bool bDoubleVectors = pContext->pMSIXInfoTable->MessageCount == 2;
if (pTable && pTable->MessageCount)
{
status = NDIS_STATUS_SUCCESS;
Expand All @@ -543,7 +540,7 @@ NDIS_STATUS ParaNdis_ConfigureMSIXVectors(PARANDIS_ADAPTER *pContext)
status = pContext->pPathBundles[j].txPath.SetupMessageIndex(vector);
if (status == NDIS_STATUS_SUCCESS)
{
if (!bSingleVector && !bDoubleVectors) vector++;
if (nVectors > 1) vector++;
status = pContext->pPathBundles[j].rxPath.SetupMessageIndex(vector);
}
DPrintf(0, "[%s] Using messages %u/%u for RX/TX queue %u\n", __FUNCTION__,
Expand All @@ -554,22 +551,22 @@ NDIS_STATUS ParaNdis_ConfigureMSIXVectors(PARANDIS_ADAPTER *pContext)

if (status == NDIS_STATUS_SUCCESS && pContext->bCXPathCreated)
{
u16 nVector;
/*
Usually there is own vector for control queue.
In corner case of single vector control queue uses the same vector as RX and TX
In corner case of one or two vectors control queue uses the same vector as RX or TX
and does not spawn its own DPC for processing
*/
if (bSingleVector)
{
status = pContext->CXPath.SetupMessageIndex(0);
}
else if (bDoubleVectors)
if (nVectors < 3)
{
status = pContext->CXPath.SetupMessageIndex(1);
nVector = nVectors - 1;
pContext->bSharedVectors = TRUE;
}
else
{
status = pContext->CXPath.SetupMessageIndex(2 * u16(pContext->nPathBundles));
nVector = 2 * u16(pContext->nPathBundles);
}
status = pContext->CXPath.SetupMessageIndex(nVector);
}
}

Expand Down

0 comments on commit 8a7005b

Please sign in to comment.