3
3
import java .math .BigInteger ;
4
4
import java .util .ArrayList ;
5
5
import java .util .Arrays ;
6
+ import java .util .Date ;
6
7
import java .util .Map ;
7
8
import java .util .TreeMap ;
8
9
9
10
import com .mzlabs .count .util .IntVec ;
10
11
11
12
public final class SolnCache {
12
13
14
+ private static final class StoreEntry {
15
+ public final int [] key ;
16
+ public final BigInteger value ;
17
+ public long lastAccess ;
18
+
19
+ public StoreEntry (final int [] key , final BigInteger value , final long lastAccess ) {
20
+ this .key = key ;
21
+ this .value = value ;
22
+ this .lastAccess = lastAccess ;
23
+ }
24
+ }
25
+
13
26
private final int nStores = 1377 ;
14
- private final ArrayList <Map <int [],BigInteger >> hotStores = new ArrayList <Map <int [],BigInteger >>(nStores );
27
+ private final long bigSize = 5000000 ;
28
+ private final int initialSizeTargetI = (int )(bigSize /nStores ) + 5 ;
29
+ private final long baseTimeMS ;
30
+ private final ArrayList <Map <int [],StoreEntry >> hotStores = new ArrayList <Map <int [],StoreEntry >>(nStores ); // sub-stores (to shallow trees), also sync on these
31
+ private final int [] storeBound = new int [nStores ]; // heuristic triggers for expensive attempted cache shrink
15
32
16
33
17
- public SolnCache () {
34
+ public SolnCache () {
35
+ baseTimeMS = System .currentTimeMillis ();
18
36
for (int i =0 ;i <nStores ;++i ) {
19
- hotStores .add (new TreeMap <int [],BigInteger >(IntVec .IntComp ));
37
+ hotStores .add (new TreeMap <int [],StoreEntry >(IntVec .IntComp ));
20
38
}
39
+ clear (); // get into initial state
21
40
}
22
41
23
42
/**
@@ -28,53 +47,120 @@ public SolnCache() {
28
47
*/
29
48
public BigInteger evalCached (final CachableCalculation f , final int [] xin ) {
30
49
// find the sub-store
31
- final Map <int [],BigInteger > hotStore ;
50
+ final long time = System .currentTimeMillis ();
51
+ final int storeIndex ;
52
+ final Map <int [],StoreEntry > hotStore ;
32
53
{
33
54
int subi = Arrays .hashCode (xin )%nStores ;
34
55
if (subi <0 ) {
35
56
subi += nStores ;
36
57
}
37
- hotStore = hotStores .get (subi );
58
+ storeIndex = subi ;
59
+ hotStore = hotStores .get (storeIndex );
38
60
}
39
61
// hope for cached
40
62
synchronized (hotStore ) {
41
- final BigInteger found = hotStore .get (xin );
63
+ final StoreEntry found = hotStore .get (xin );
42
64
if (null !=found ) {
43
- return found ;
65
+ found .lastAccess = time ;
66
+ return found .value ;
44
67
}
45
68
}
46
69
// do the work (while not holding locks)
47
70
final int [] xcopy = Arrays .copyOf (xin ,xin .length );
48
71
final BigInteger value = f .eval (xcopy );
72
+ final StoreEntry entry = new StoreEntry (xcopy ,value ,time );
49
73
// write back result
74
+ boolean considerReorg = false ;
50
75
synchronized (hotStore ) {
51
- hotStore .put (xcopy ,value );
76
+ hotStore .put (xcopy ,entry );
77
+ considerReorg = hotStore .size ()>=storeBound [storeIndex ];
78
+ }
79
+ if (considerReorg ) {
80
+ possiblyReorg ();
52
81
}
53
82
return value ;
54
83
}
55
84
56
85
/**
57
- * best effort, not atomic across stores
86
+ * assumes no locks held when this is called
87
+ * expensive chech and re-org. amortize this to cheap by using double per-sub store count guards.
88
+ * trying to be a amortized cheap approximate LRU
89
+ */
90
+ private void possiblyReorg () {
91
+ synchronized (this ) { // there may be other operations working in parallel to this reorg, but no other reorg
92
+ // estimate sizes and reset individual triggers
93
+ long szEst = 0 ;
94
+ for (int i =0 ;i <nStores ;++i ) {
95
+ final Map <int [],StoreEntry > s = hotStores .get (i );
96
+ synchronized (s ) {
97
+ final int szi = s .size ();
98
+ szEst += szi ;
99
+ storeBound [i ] = 2 *szi + initialSizeTargetI ; // move trigger up to prevent a soon re-trigger
100
+ }
101
+ }
102
+ if (szEst <=bigSize ) {
103
+ return ; // total size still in bounds, take no more action
104
+ }
105
+ // total size too large, shrink the stores, reset the triggers
106
+ // use mean access time as a approximation for median access time
107
+ long sumTimes = 0 ;
108
+ long totEntries = 0 ;
109
+ for (int i =0 ;i <nStores ;++i ) {
110
+ final Map <int [],StoreEntry > s = hotStores .get (i );
111
+ synchronized (s ) {
112
+ for (final StoreEntry v : s .values ()) {
113
+ sumTimes += v .lastAccess -baseTimeMS ;
114
+ totEntries += 1 ;
115
+ }
116
+ }
117
+ }
118
+ final long meanTime = (long )Math .ceil (sumTimes /(double )totEntries );
119
+ // prune stores and reset triggers
120
+ long newSize = 0 ;
121
+ for (int i =0 ;i <nStores ;++i ) {
122
+ final Map <int [],StoreEntry > s = hotStores .get (i );
123
+ synchronized (s ) {
124
+ final StoreEntry [] vals = s .values ().toArray (new StoreEntry [s .size ()]);
125
+ s .clear ();
126
+ for (final StoreEntry v : vals ) {
127
+ if (v .lastAccess -baseTimeMS >meanTime ) {
128
+ s .put (v .key ,v );
129
+ }
130
+ }
131
+ final int szi = s .size ();
132
+ newSize += szi ;
133
+ storeBound [i ] = 2 *szi + initialSizeTargetI ; // move trigger up to prevent a soon re-trigger
134
+ }
135
+ }
136
+ System .err .println ("#\t info: reorg stores " + szEst + " -> " + newSize + "\t " + new Date ());
137
+ }
138
+ }
139
+
140
+ /**
141
+ * best effort, not atomic across stores; but still fairly expensive because of lock acquisition costs
58
142
* @return
59
143
*/
60
144
public long size () {
61
145
long size = 0 ;
62
- for (final Map <int [],BigInteger > s : hotStores ) {
146
+ for (int i =0 ;i <nStores ;++i ) {
147
+ final Map <int [],StoreEntry > s = hotStores .get (i );
63
148
synchronized (s ) {
64
149
size += s .size ();
65
150
}
66
151
}
67
152
return size ;
68
153
}
69
-
70
154
71
155
/**
72
156
* best effort, not atomic across stores
73
157
*/
74
158
public void clear () {
75
- for (final Map <int [],BigInteger > s : hotStores ) {
159
+ for (int i =0 ;i <nStores ;++i ) {
160
+ final Map <int [],StoreEntry > s = hotStores .get (i );
76
161
synchronized (s ) {
77
162
s .clear ();
163
+ storeBound [i ] = initialSizeTargetI ;
78
164
}
79
165
}
80
166
}
0 commit comments