Skip to content

Commit

Permalink
Make sure per file access stats are synchronized properly.
Browse files Browse the repository at this point in the history
  • Loading branch information
osschar committed May 8, 2018
1 parent 113736f commit 30500b0
Show file tree
Hide file tree
Showing 4 changed files with 43 additions and 17 deletions.
13 changes: 9 additions & 4 deletions src/XrdFileCache/XrdFileCacheFile.cc
Expand Up @@ -171,7 +171,8 @@ bool File::FinalizeSyncBeforeExit()
{
if ( ! m_writes_during_sync.empty() || m_non_flushed_cnt > 0 || ! m_detachTimeIsLogged)
{
m_cfi.WriteIOStatDetach(m_stats);
Stats loc_stats = m_stats.Clone();
m_cfi.WriteIOStatDetach(loc_stats);
m_detachTimeIsLogged = true;
TRACEF(Debug, "File::FinalizeSyncBeforeExit scheduling sync to write detach stats");
return true;
Expand Down Expand Up @@ -443,7 +444,6 @@ int File::ReadBlocksFromDisk(std::list<int>& blocks,
total += rs;
}

m_stats.m_BytesDisk += total;
return total;
}

Expand All @@ -458,6 +458,8 @@ int File::Read(char* iUserBuff, long long iUserOff, int iUserSize)

const long long BS = m_cfi.GetBufferSize();

Stats loc_stats;

// lock
// loop over reqired blocks:
// - if on disk, ok;
Expand Down Expand Up @@ -564,6 +566,7 @@ int File::Read(char* iUserBuff, long long iUserOff, int iUserSize)
if (rc >= 0)
{
bytes_read += rc;
loc_stats.m_BytesDisk += rc;
}
else
{
Expand Down Expand Up @@ -620,7 +623,7 @@ int File::Read(char* iUserBuff, long long iUserOff, int iUserSize)
TRACEF(Dump, "File::Read() ub=" << (void*)iUserBuff << " from finished block " << (*bi)->m_offset/BS << " size " << size_to_copy);
memcpy(&iUserBuff[user_off], &((*bi)->m_buff[off_in_block]), size_to_copy);
bytes_read += size_to_copy;
m_stats.m_BytesRam += size_to_copy;
loc_stats.m_BytesRam += size_to_copy;
if ((*bi)->m_prefetch)
prefetchHitsRam++;
}
Expand Down Expand Up @@ -653,7 +656,7 @@ int File::Read(char* iUserBuff, long long iUserOff, int iUserSize)
if (direct_handler->m_errno == 0)
{
bytes_read += direct_size;
m_stats.m_BytesMissed += direct_size;
loc_stats.m_BytesMissed += direct_size;
}
else
{
Expand Down Expand Up @@ -688,6 +691,8 @@ int File::Read(char* iUserBuff, long long iUserOff, int iUserSize)
m_prefetchScore = float(m_prefetchHitCnt)/m_prefetchReadCnt;
}

m_stats.AddStats(loc_stats);

return bytes_read;
}

Expand Down
1 change: 1 addition & 0 deletions src/XrdFileCache/XrdFileCacheIOFileBlock.cc
Expand Up @@ -88,6 +88,7 @@ void IOFileBlock::CloseInfoFile()
{
if (m_info.GetFileSize() > 0)
{
// We do not maintain access statistics for individual blocks.
Stats as;
m_info.WriteIOStatDetach(as);
}
Expand Down
20 changes: 14 additions & 6 deletions src/XrdFileCache/XrdFileCacheStats.hh
Expand Up @@ -27,7 +27,7 @@ namespace XrdFileCache
//----------------------------------------------------------------------------
//! Statistics of disk cache utilisation.
//----------------------------------------------------------------------------
class Stats : public XrdOucCacheStats
class Stats
{
public:
//----------------------------------------------------------------------
Expand All @@ -41,18 +41,26 @@ public:
long long m_BytesRam; //!< number of bytes served from RAM cache
long long m_BytesMissed; //!< number of bytes served directly from XrdCl

inline void AddStat(Stats &Src)
inline void AddStats(Stats &Src)
{
XrdOucCacheStats::Add(Src);

m_MutexXfc.Lock();
m_BytesDisk += Src.m_BytesDisk;
m_BytesRam += Src.m_BytesRam;

m_BytesDisk += Src.m_BytesDisk;
m_BytesRam += Src.m_BytesRam;
m_BytesMissed += Src.m_BytesMissed;

m_MutexXfc.UnLock();
}

Stats Clone()
{
Stats ret;
m_MutexXfc.Lock();
ret = *this;
m_MutexXfc.UnLock();
return ret;
}

private:
XrdSysMutex m_MutexXfc;
};
Expand Down
26 changes: 19 additions & 7 deletions src/XrdFileCache/XrdFileCacheVRead.cc
Expand Up @@ -92,11 +92,13 @@ int File::ReadV(const XrdOucIOVec *readV, int n)
return -1;
}

Stats loc_stats;

int bytesRead = 0;

ReadVBlockListRAM blocks_to_process;
ReadVBlockListRAM blocks_to_process;
std::vector<ReadVChunkListRAM> blks_processed;
ReadVBlockListDisk blocks_on_disk;
ReadVBlockListDisk blocks_on_disk;
std::vector<XrdOucIOVec> chunkVec;
DirectResponseHandler *direct_handler = 0;

Expand Down Expand Up @@ -124,19 +126,29 @@ int File::ReadV(const XrdOucIOVec *readV, int n)
{
int dr = VReadFromDisk(readV, n, blocks_on_disk);
if (dr < 0)
{
bytesRead = dr;
}
else
{
bytesRead += dr;
loc_stats.m_BytesDisk += dr;
}
}

// read from cached blocks
if (bytesRead >= 0)
{
int br = VReadProcessBlocks(readV, n, blocks_to_process.bv, blks_processed);
if (br < 0)
{
bytesRead = br;
}
else
{
bytesRead += br;
loc_stats.m_BytesRam += br;
}
}

// check direct requests have arrived, get bytes read from read handle
Expand All @@ -154,7 +166,7 @@ int File::ReadV(const XrdOucIOVec *readV, int n)
for (std::vector<XrdOucIOVec>::iterator i = chunkVec.begin(); i != chunkVec.end(); ++i)
{
bytesRead += i->size;
m_stats.m_BytesMissed += i->size;
loc_stats.m_BytesMissed += i->size;
}
}
else
Expand Down Expand Up @@ -183,6 +195,8 @@ int File::ReadV(const XrdOucIOVec *readV, int n)
for (std::vector<ReadVChunkListRAM>::iterator i = blks_processed.begin(); i != blks_processed.end(); ++i)
delete i->arr;

m_stats.AddStats(loc_stats);

TRACEF(Dump, "VRead exit, total = " << bytesRead);
return bytesRead;
}
Expand Down Expand Up @@ -292,10 +306,9 @@ int File::VReadFromDisk(const XrdOucIOVec *readV, int n, ReadVBlockListDisk& blo
overlap(blockIdx, m_cfi.GetBufferSize(), readV[chunkIdx].offset, readV[chunkIdx].size, off, blk_off, size);

int rs = m_output->Read(readV[chunkIdx].data + off, blockIdx*m_cfi.GetBufferSize() + blk_off - m_offset, size);
if (rs >=0)
if (rs >= 0)
{
bytes_read += rs;
m_stats.m_BytesDisk += rs;
}
else
{
Expand Down Expand Up @@ -358,8 +371,7 @@ int File::VReadProcessBlocks(const XrdOucIOVec *readV, int n,
int block_idx = bi->block->m_offset/m_cfi.GetBufferSize();
overlap(block_idx, m_cfi.GetBufferSize(), readV[*chunkIt].offset, readV[*chunkIt].size, off, blk_off, size);
memcpy(readV[*chunkIt].data + off, &(bi->block->m_buff[blk_off]), size);
bytes_read += size;
m_stats.m_BytesRam += size;
bytes_read += size;
}
}
else
Expand Down

0 comments on commit 30500b0

Please sign in to comment.