Permalink
Browse files

split up the packet & query cache into 1024 shards so our locks no lo…

…nger collide

Conflicts:
	pdns/packetcache.cc
  • Loading branch information...
1 parent 65931d9 commit d061045ce06c5e744663fe9c9710b6944754a37e @ahupowerdns ahupowerdns committed with mind04 Sep 4, 2014
Showing with 127 additions and 78 deletions.
  1. +4 −0 pdns/misc.hh
  2. +112 −76 pdns/packetcache.cc
  3. +11 −2 pdns/packetcache.hh
View
@@ -371,6 +371,10 @@ public:
return atomic_exchange_and_add( &value_, val );
}
+ native_t operator-=(native_t val)
+ {
+ return atomic_exchange_and_add( &value_, -val );
+ }
native_t operator--()
{
View
@@ -1,6 +1,6 @@
/*
PowerDNS Versatile Database Driven Nameserver
- Copyright (C) 2002 - 2011 PowerDNS.COM BV
+ Copyright (C) 2002 - 2014 PowerDNS.COM BV
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
@@ -26,13 +26,16 @@
#include "statbag.hh"
#include <map>
#include <boost/algorithm/string.hpp>
+#include <boost/foreach.hpp>
extern StatBag S;
PacketCache::PacketCache()
{
- pthread_rwlock_init(&d_mut, 0);
- // d_ops = 0;
+ d_maps.resize(1024);
+ BOOST_FOREACH(MapCombo& mc, d_maps) {
+ pthread_rwlock_init(&mc.d_mut, 0);
+ }
d_ttl=-1;
d_recursivettl=-1;
@@ -48,9 +51,18 @@ PacketCache::PacketCache()
PacketCache::~PacketCache()
{
- WriteLock l(&d_mut);
+ // WriteLock l(&d_mut);
+ vector<WriteLock*> locks;
+ BOOST_FOREACH(MapCombo& mc, d_maps) {
+ locks.push_back(new WriteLock(&mc.d_mut));
+ }
+ BOOST_FOREACH(WriteLock* wl, locks) {
+ delete wl;
+ }
}
+
+
int PacketCache::get(DNSPacket *p, DNSPacket *cached, bool recursive)
{
extern StatBag S;
@@ -82,7 +94,8 @@ int PacketCache::get(DNSPacket *p, DNSPacket *cached, bool recursive)
string value;
bool haveSomething;
{
- TryReadLock l(&d_mut); // take a readlock here
+ MapCombo& mc=getMap(pcReverse(p->qdomain));
+ TryReadLock l(&mc.d_mut); // take a readlock here
if(!l.gotIt()) {
S.inc("deferred-cache-lookup");
return 0;
@@ -168,15 +181,15 @@ void PacketCache::insert(const string &qname, const QType& qtype, CacheEntryType
val.zoneID = zoneID;
val.hasEDNS = EDNS;
- TryWriteLock l(&d_mut);
+ MapCombo& mc = getMap(val.qname);
+ TryWriteLock l(&mc.d_mut);
if(l.gotIt()) {
bool success;
cmap_t::iterator place;
- tie(place, success)=d_map.insert(val);
- // cerr<<"Insert succeeded: "<<success<<endl;
+ tie(place, success)=mc.d_map.insert(val);
+
if(!success)
- d_map.replace(place, val);
-
+ mc.d_map.replace(place, val);
}
else
S.inc("deferred-cache-inserts");
@@ -185,44 +198,51 @@ void PacketCache::insert(const string &qname, const QType& qtype, CacheEntryType
/* clears the entire packetcache. */
int PacketCache::purge()
{
- WriteLock l(&d_mut);
- int delcount=d_map.size();
- d_map.clear();
+ int delcount=0;
+ BOOST_FOREACH(MapCombo& mc, d_maps) {
+ WriteLock l(&mc.d_mut);
+ delcount+=mc.d_map.size();
+ mc.d_map.clear();
+ }
*d_statnumentries=AtomicCounter(0);
return delcount;
}
/* purges entries from the packetcache. If match ends on a $, it is treated as a suffix */
int PacketCache::purge(const string &match)
{
- WriteLock l(&d_mut);
int delcount=0;
- if(ends_with(match, "$")) {
- string prefix(match);
- prefix.resize(prefix.size()-1);
+ BOOST_FOREACH(MapCombo& mc, d_maps) {
+ WriteLock l(&mc.d_mut);
+
+ if(ends_with(match, "$")) {
+ string prefix(match);
+ prefix.resize(prefix.size()-1);
- string zone = pcReverse(prefix);
+ string zone = pcReverse(prefix);
- cmap_t::const_iterator iter = d_map.lower_bound(tie(zone));
- cmap_t::const_iterator start=iter;
+ cmap_t::const_iterator iter = mc.d_map.lower_bound(tie(zone));
+ cmap_t::const_iterator start=iter;
- for(; iter != d_map.end(); ++iter) {
- if(iter->qname.compare(0, zone.size(), zone) != 0) {
- break;
+ for(; iter != mc.d_map.end(); ++iter) {
+ if(iter->qname.compare(0, zone.size(), zone) != 0) {
+ break;
+ }
+ delcount++;
}
- delcount++;
+ mc.d_map.erase(start, iter);
+ }
+
+ else {
+ string qname = pcReverse(match);
+
+ delcount+=mc.d_map.count(tie(qname));
+ pair<cmap_t::iterator, cmap_t::iterator> range = mc.d_map.equal_range(tie(qname));
+ mc.d_map.erase(range.first, range.second);
}
- d_map.erase(start, iter);
- }
- else {
- string qname = pcReverse(match);
-
- delcount=d_map.count(tie(qname));
- pair<cmap_t::iterator, cmap_t::iterator> range = d_map.equal_range(tie(qname));
- d_map.erase(range.first, range.second);
}
- *d_statnumentries=AtomicCounter(d_map.size());
+ *d_statnumentries-=delcount; // XXX FIXME NEEDS TO BE ADJUSTED
return delcount;
}
// called from ueberbackend
@@ -236,7 +256,9 @@ bool PacketCache::getEntry(const string &qname, const QType& qtype, CacheEntryTy
cleanup();
}
- TryReadLock l(&d_mut); // take a readlock here
+ MapCombo& mc=getMap(pcReverse(qname));
+
+ TryReadLock l(&mc.d_mut); // take a readlock here
if(!l.gotIt()) {
S.inc( "deferred-cache-lookup");
return false;
@@ -252,9 +274,10 @@ bool PacketCache::getEntryLocked(const string &qname, const QType& qtype, CacheE
uint16_t qt = qtype.getCode();
//cerr<<"Lookup for maxReplyLen: "<<maxReplyLen<<endl;
string pcqname = pcReverse(qname);
- cmap_t::const_iterator i=d_map.find(tie(pcqname, qt, cet, zoneID, meritsRecursion, maxReplyLen, dnssecOK, hasEDNS, *age));
+ MapCombo& mc=getMap(pcqname);
+ cmap_t::const_iterator i=mc.d_map.find(tie(pcqname, qt, cet, zoneID, meritsRecursion, maxReplyLen, dnssecOK, hasEDNS, *age));
time_t now=time(0);
- bool ret=(i!=d_map.end() && i->ttd > now);
+ bool ret=(i!=mc.d_map.end() && i->ttd > now);
if(ret) {
if (age)
*age = now - i->created;
@@ -271,27 +294,29 @@ string PacketCache::pcReverse(const string &content)
return toLower(boost::replace_all_copy(tmp, ".", "\t"))+"\t";
}
-
map<char,int> PacketCache::getCounts()
{
- ReadLock l(&d_mut);
-
- map<char,int>ret;
int recursivePackets=0, nonRecursivePackets=0, queryCacheEntries=0, negQueryCacheEntries=0;
- for(cmap_t::const_iterator iter = d_map.begin() ; iter != d_map.end(); ++iter) {
- if(iter->ctype == PACKETCACHE)
- if(iter->meritsRecursion)
- recursivePackets++;
- else
- nonRecursivePackets++;
- else if(iter->ctype == QUERYCACHE) {
- if(iter->value.empty())
- negQueryCacheEntries++;
- else
- queryCacheEntries++;
+ BOOST_FOREACH(MapCombo& mc, d_maps) {
+ ReadLock l(&mc.d_mut);
+
+ for(cmap_t::const_iterator iter = mc.d_map.begin() ; iter != mc.d_map.end(); ++iter) {
+ if(iter->ctype == PACKETCACHE)
+ if(iter->meritsRecursion)
+ recursivePackets++;
+ else
+ nonRecursivePackets++;
+ else if(iter->ctype == QUERYCACHE) {
+ if(iter->value.empty())
+ negQueryCacheEntries++;
+ else
+ queryCacheEntries++;
+ }
}
}
+ map<char,int> ret;
+
ret['!']=negQueryCacheEntries;
ret['Q']=queryCacheEntries;
ret['n']=nonRecursivePackets;
@@ -301,22 +326,29 @@ map<char,int> PacketCache::getCounts()
int PacketCache::size()
{
- ReadLock l(&d_mut);
- return d_map.size();
+ uint64_t ret=0;
+ BOOST_FOREACH(MapCombo& mc, d_maps) {
+ ReadLock l(&mc.d_mut);
+ ret+=mc.d_map.size();
+ }
+ return ret;
}
/** readlock for figuring out which iterators to delete, upgrade to writelock when actually cleaning */
void PacketCache::cleanup()
{
- WriteLock l(&d_mut);
- *d_statnumentries=AtomicCounter(d_map.size());
+ *d_statnumentries=AtomicCounter(0);
+ BOOST_FOREACH(MapCombo& mc, d_maps) {
+ ReadLock l(&mc.d_mut);
+ *d_statnumentries+=mc.d_map.size();
+ }
unsigned int maxCached=::arg().asNum("max-cache-entries");
unsigned int toTrim=0;
AtomicCounter::native_t cacheSize=*d_statnumentries;
-
+
if(maxCached && cacheSize > maxCached) {
toTrim = cacheSize - maxCached;
}
@@ -331,29 +363,33 @@ void PacketCache::cleanup()
// cerr<<"cacheSize: "<<cacheSize<<", lookAt: "<<lookAt<<", toTrim: "<<toTrim<<endl;
time_t now=time(0);
-
DLOG(L<<"Starting cache clean"<<endl);
- if(d_map.empty())
- return; // clean
-
- typedef cmap_t::nth_index<1>::type sequence_t;
- sequence_t& sidx=d_map.get<1>();
- unsigned int erased=0, lookedAt=0;
- for(sequence_t::iterator i=sidx.begin(); i != sidx.end(); lookedAt++) {
- if(i->ttd < now) {
- sidx.erase(i++);
- erased++;
- }
- else
- ++i;
-
- if(toTrim && erased > toTrim)
- break;
+ BOOST_FOREACH(MapCombo& mc, d_maps) {
+ typedef cmap_t::nth_index<1>::type sequence_t;
+ sequence_t& sidx=mc.d_map.get<1>();
+ unsigned int erased=0, lookedAt=0;
+ for(sequence_t::iterator i=sidx.begin(); i != sidx.end(); lookedAt++) {
+ if(i->ttd < now) {
+ sidx.erase(i++);
+ erased++;
+ }
+ else
+ ++i;
- if(lookedAt > lookAt)
- break;
+ if(toTrim && erased > toTrim / d_maps.size())
+ break;
+
+ if(lookedAt > lookAt / d_maps.size())
+ break;
+ }
}
// cerr<<"erased: "<<erased<<endl;
- *d_statnumentries=AtomicCounter(d_map.size());
+
+ *d_statnumentries=AtomicCounter(0);
+ BOOST_FOREACH(MapCombo& mc, d_maps) {
+ ReadLock l(&mc.d_mut);
+ *d_statnumentries+=mc.d_map.size();
+ }
+
DLOG(L<<"Done with cache clean"<<endl);
}
View
@@ -115,9 +115,18 @@ private:
> cmap_t;
- cmap_t d_map;
+ struct MapCombo
+ {
+ pthread_rwlock_t d_mut;
+ cmap_t d_map;
+ };
+
+ vector<MapCombo> d_maps;
+ MapCombo& getMap(const std::string& qname)
+ {
+ return d_maps[burtle((const unsigned char*)qname.c_str(), qname.length(), 0) % d_maps.size()];
+ }
- pthread_rwlock_t d_mut;
AtomicCounter d_ops;
int d_ttl;

0 comments on commit d061045

Please sign in to comment.