Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Newer
Older
100644 830 lines (691 sloc) 33.882 kb
f045fd8a » ttung
2008-05-01 flat memory allocator
1 /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2
3 /**
4 * overview
5 * --------
6 *
7 * objects are memcache key-value pairs. chunks are units of memory used to
8 * store the objects. objects are classified as either a large object or a
9 * small object. a large object is stored using one or more large chunks. a
10 * small object is stored using one or more small chunks. there is one
11 * exception to this rule -- if the key is so large that it spans multiple small
12 * chunks, it will be stored in a large chunk. each object consists of a title
13 * chunk, optionally followed by body chunks.
14 *
15 *
16 * allocation strategy
17 * -------------------
18 *
19 * when allocating for a large object:
20 *
21 * 1) check large chunk free list. if there are free chunks, use them.
22 *
23 * 2) ask the master storage controller for more memory. if that succeeds, the
24 * free lists will be updated.
25 *
26 * 3) if the master storage controller has no more memory:
27 * a) if access-time(small) OLDER access-time(large), invalidate small.
28 * find remaining objects in the same slab as small. retain objects
29 * that are accessed more recently than access-time(small-queue).
30 * b) if access-time(small) YOUNGER access-time(large), dequeue from large
31 * LRU queue.
32 *
33 * when allocating for a small object:
34 *
35 * 1) check small chunk free list. if there are free chunks, use them.
36 *
37 * 2) check large chunk free list. if there are free chunks, break it up into
38 * small chunks and update the small chunk free lists.
39 *
40 * 3) ask the master storage controller for more memory. if that succeeds, the
41 * free lists will be updated.
42 *
43 * 4) if the master storage controller has no more memory:
44 * a) if access-time(small) OLDER access-time(large), dequeue from small
45 * LRU queue.
46 * b) if access-time(small) YOUNGER access-time(large), break up large
47 * object into small objects.
48 *
49 *
50 * structures
51 * ----------
52 *
53 * each large chunk consists of:
54 * - a flag indicating whether the chunk is in use.
55 * - a flag indicating if the chunk is used as a whole or as multiple
56 * small chunks.
57 * - a flag indicating whether the chunk is a title chunk or a body chunk.
58 *
59 * if the large chunk is split into multiple small chunks, it contains an array
60 * with fields for each component small chunk:
61 * - a flag indicating whether the chunk is in use.
62 * - a flag indicating whether the chunk is a title chunk or a body chunk.
63 *
64 *
65 * title chunks also contain:
66 * next # LRU next
67 * prev # LRU prev
68 * h_next # hash next
69 * chunk_next # chunk next
70 * rel_time_t time
71 * rel_time_t exptime
72 * int nbytes
73 * unsigned short refcount
74 * uint8_t it_flags
75 * uint8_t nkey # key length.
76 * data
77 *
78 * body chunks contain:
79 * pointer to title
80 * data
81 */
82
83 #if !defined(_flat_storage_h_)
84 #define _flat_storage_h_
85
86 #include "generic.h"
87
88 #include <assert.h>
89 #include <string.h>
90 #include <stdint.h>
91
92 #if defined(__GNUC__)
93 #define PACKED __attribute__((packed))
94 #endif
95
96 #if defined(FLAT_STORAGE_TESTS)
b04e15bf » ttung
2008-09-18 flat allocator broken by my last change
97 #define FA_STATIC
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
98 #if defined(FLAT_STORAGE_MODULE)
b04e15bf » ttung
2008-09-18 flat allocator broken by my last change
99 #define FA_STATIC_DECL(decl) decl
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
100 #else
b04e15bf » ttung
2008-09-18 flat allocator broken by my last change
101 #define FA_STATIC_DECL(decl) extern decl
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
102 #endif /* #if defined(FLAT_STORAGE_MODULE) */
103
f045fd8a » ttung
2008-05-01 flat memory allocator
104 #else
b04e15bf » ttung
2008-09-18 flat allocator broken by my last change
105 #define FA_STATIC static
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
106 #if defined(FLAT_STORAGE_MODULE)
b04e15bf » ttung
2008-09-18 flat allocator broken by my last change
107 #define FA_STATIC_DECL(decl) static decl
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
108 #else
b04e15bf » ttung
2008-09-18 flat allocator broken by my last change
109 #define FA_STATIC_DECL(decl)
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
110 #endif /* #if defined(FLAT_STORAGE_MODULE) */
f045fd8a » ttung
2008-05-01 flat memory allocator
111 #endif /* #if defined(FLAT_STORAGE_TESTS) */
112
113 /**
114 * constants
115 */
116
117 typedef enum large_chunk_flags_e {
118 LARGE_CHUNK_INITIALIZED = 0x1, /* if set, that means the chunk has been
119 * initialized. */
120 LARGE_CHUNK_USED = 0x2, /* if set, chunk is used. */
121 LARGE_CHUNK_BROKEN = 0x4, /* if set, chunk is used as a small
122 * chunks. */
123 LARGE_CHUNK_TITLE = 0x8, /* if set, chunk is a title. it is a
124 * contradiction for both
125 * LARGE_CHUNK_BROKEN and
126 * LARGE_CHUNK_TITLE to be set. */
127 LARGE_CHUNK_FREE = 0x10, /* if set, chunk is free. */
128 } large_chunk_flags_t;
129
130
131 typedef enum small_chunk_flags_e {
132 SMALL_CHUNK_INITIALIZED = 0x1, /* if set, that means the chunk has been
133 * initialized. */
134 SMALL_CHUNK_USED = 0x2, /* if set, chunk is used. */
135 SMALL_CHUNK_TITLE = 0x8, /* if set, chunk is a title. */
136 SMALL_CHUNK_FREE = 0x10, /* if set, chunk is free. */
137 SMALL_CHUNK_COALESCE_PENDING = 0x20, /* if set, chunk is free but pending a
138 * coalesce. it should *not* be in
139 * the free list. */
140 } small_chunk_flags_t;
141
142
143 typedef enum it_flags_e {
144 ITEM_VALID = 0x1,
145 ITEM_LINKED = 0x2, /* linked into the LRU. */
146 ITEM_DELETED = 0x4, /* deferred delete. */
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
147 ITEM_HAS_IP_ADDRESS = 0x10,
2db47cdd » ttung
2008-05-13 port changes from trunk to storage refactor tree
148 ITEM_HAS_TIMESTAMP = 0x20,
f045fd8a » ttung
2008-05-01 flat memory allocator
149 } it_flags_t;
150
151
152 typedef enum chunk_type_e {
153 SMALL_CHUNK,
154 LARGE_CHUNK,
155 } chunk_type_t;
156
157
158 #define LARGE_CHUNK_SZ 1024 /* large chunk size */
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
159 #define SMALL_CHUNK_SZ 124 /* small chunk size */
f045fd8a » ttung
2008-05-01 flat memory allocator
160
2db47cdd » ttung
2008-05-13 port changes from trunk to storage refactor tree
161 #define FLAT_STORAGE_INCREMENT_DELTA (LARGE_CHUNK_SZ * 1024) /* initialize 2k
f045fd8a » ttung
2008-05-01 flat memory allocator
162 * chunks at a time. */
163
164 /** instead of using raw pointers, we use chunk pointers. we address things
165 * intervals of CHUNK_ADDRESSING_SZ. it is possible for SMALL_CHUNK_SZ to be
166 * smaller than the addressing interval, as long as we can still uniquely
167 * identify each small chunk. this means that:
168 * floor(LARGE_CHUNK_SZ / SMALL_CHUNK_SZ) <=
169 * floor(LARGE_CHUNK_SZ / CHUNK_ADDRESSING_SZ)
170 *
171 * we will check for this condition in an assert in items_init(..).
172 */
173 #define CHUNK_ADDRESSING_SZ 128
174 #define SMALL_CHUNKS_PER_LARGE_CHUNK ((LARGE_CHUNK_SZ - LARGE_CHUNK_TAIL_SZ) / (SMALL_CHUNK_SZ))
175
176 #define MIN_LARGE_CHUNK_CAPACITY ((LARGE_TITLE_CHUNK_DATA_SZ <= LARGE_BODY_CHUNK_DATA_SZ) ? \
177 LARGE_TITLE_CHUNK_DATA_SZ : LARGE_BODY_CHUNK_DATA_SZ) /* this is the largeest number of data
178 * bytes a large chunk can hold. */
179
180 #define MIN_SMALL_CHUNK_CAPACITY ((SMALL_TITLE_CHUNK_DATA_SZ <= SMALL_BODY_CHUNK_DATA_SZ) ? \
181 SMALL_TITLE_CHUNK_DATA_SZ : SMALL_BODY_CHUNK_DATA_SZ) /* this is the smallest number of data
182 * bytes a small chunk can hold. */
183
90e3f181 » ttung
2008-07-08 merge LRU queues, other fixes.
184 #define LRU_SEARCH_DEPTH 50 /* number of items we'll check in the
185 * LRU to find items to evict. */
f045fd8a » ttung
2008-05-01 flat memory allocator
186
187 /**
188 * data types and structures
189 */
190
191 typedef uint32_t chunkptr_t;
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
192 typedef chunkptr_t item_ptr_t;
f045fd8a » ttung
2008-05-01 flat memory allocator
193 #define NULL_CHUNKPTR ((chunkptr_t) (0))
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
194 #define NULL_ITEM_PTR ((item_ptr_t) (0))
f045fd8a » ttung
2008-05-01 flat memory allocator
195
196 /**
197 * forward declarations
198 */
199 typedef union chunk_u chunk_t;
200 typedef union item_u item;
201 typedef struct large_chunk_s large_chunk_t;
202 typedef struct small_chunk_s small_chunk_t;
203
204 #define TITLE_CHUNK_HEADER_CONTENTS \
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
205 item_ptr_t h_next; /* hash next */ \
f045fd8a » ttung
2008-05-01 flat memory allocator
206 chunkptr_t next; /* LRU next */ \
207 chunkptr_t prev; /* LRU prev */ \
208 chunkptr_t next_chunk; /* next chunk */ \
209 rel_time_t time; /* most recent access */ \
210 rel_time_t exptime; /* expire time */ \
211 int nbytes; /* size of data */ \
212 unsigned int flags; /* flags */ \
213 unsigned short refcount; \
214 uint8_t it_flags; /* it flags */ \
215 uint8_t nkey; /* key length */ \
216
217
218 #define LARGE_BODY_CHUNK_HEADER \
219 chunkptr_t next_chunk;
220
221 #define SMALL_BODY_CHUNK_HEADER \
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
222 chunkptr_t prev_chunk; \
f045fd8a » ttung
2008-05-01 flat memory allocator
223 chunkptr_t next_chunk;
224
225 #define LARGE_CHUNK_TAIL \
226 uint8_t flags;
227
228 #define SMALL_CHUNK_TAIL \
229 uint8_t flags;
230
231 #define LARGE_CHUNK_TAIL_SZ sizeof(struct { LARGE_CHUNK_TAIL } PACKED)
232 #define SMALL_CHUNK_TAIL_SZ sizeof(struct { SMALL_CHUNK_TAIL } PACKED)
233 #define TITLE_CHUNK_HEADER_SZ sizeof(title_chunk_header_t)
234 #define LARGE_BODY_CHUNK_HEADER_SZ sizeof(struct { LARGE_BODY_CHUNK_HEADER } PACKED)
235 #define SMALL_BODY_CHUNK_HEADER_SZ sizeof(struct { SMALL_BODY_CHUNK_HEADER } PACKED)
236
237 typedef struct title_chunk_header_s title_chunk_header_t;
238 struct title_chunk_header_s {
239 TITLE_CHUNK_HEADER_CONTENTS;
240 } PACKED;
241
242 #define LARGE_TITLE_CHUNK_DATA_SZ (LARGE_CHUNK_SZ - LARGE_CHUNK_TAIL_SZ - TITLE_CHUNK_HEADER_SZ)
243 typedef struct large_title_chunk_s large_title_chunk_t;
244 struct large_title_chunk_s {
245 TITLE_CHUNK_HEADER_CONTENTS;
246 char data[LARGE_TITLE_CHUNK_DATA_SZ];
247 } PACKED;
248
249 #define LARGE_BODY_CHUNK_DATA_SZ (LARGE_CHUNK_SZ - LARGE_CHUNK_TAIL_SZ - LARGE_BODY_CHUNK_HEADER_SZ)
250 typedef struct large_body_chunk_s large_body_chunk_t;
251 struct large_body_chunk_s {
252 LARGE_BODY_CHUNK_HEADER;
253 char data[LARGE_BODY_CHUNK_DATA_SZ];
254 } PACKED;
255
256 typedef struct large_free_chunk_s large_free_chunk_t;
257 struct large_free_chunk_s {
258 /* large chunks do not need prev_next and next_prev pointers because we never remove an element
259 * from the middle of the free list. */
260 large_chunk_t* next;
261 large_chunk_t* prev;
262 };
263
264 #define SMALL_TITLE_CHUNK_DATA_SZ (SMALL_CHUNK_SZ - SMALL_CHUNK_TAIL_SZ - TITLE_CHUNK_HEADER_SZ)
265 typedef struct small_title_chunk_s small_title_chunk_t;
266 struct small_title_chunk_s {
267 TITLE_CHUNK_HEADER_CONTENTS;
268 char data[SMALL_TITLE_CHUNK_DATA_SZ];
269 } PACKED;
270
271 #define SMALL_BODY_CHUNK_DATA_SZ (SMALL_CHUNK_SZ - SMALL_CHUNK_TAIL_SZ - SMALL_BODY_CHUNK_HEADER_SZ)
272 typedef struct small_body_chunk_s small_body_chunk_t;
273 struct small_body_chunk_s {
274 SMALL_BODY_CHUNK_HEADER;
275 char data[SMALL_BODY_CHUNK_DATA_SZ];
276 } PACKED;
277
278 typedef struct small_free_chunk_s small_free_chunk_t;
279 struct small_free_chunk_s {
280 small_chunk_t** prev_next;
281 small_chunk_t* next;
282 };
283
284
285 #define sc_title __sc.__sc_title
286 #define sc_body __sc.__sc_body
287 #define sc_free __sc.__sc_free
288 struct small_chunk_s {
289 /* we could be one of 3 things:
290 * 1) a small chunk (sc) title.
291 * 2) a small chunk (sc) body.
292 * 3) a free chunk. */
293 union {
294 small_title_chunk_t __sc_title;
295 small_body_chunk_t __sc_body;
296 small_free_chunk_t __sc_free;
297 } PACKED __sc;
298
299 SMALL_CHUNK_TAIL;
300 } PACKED;
301
302
303 typedef struct large_broken_chunk_s large_broken_chunk_t;
304 struct large_broken_chunk_s {
305 small_chunk_t lbc[SMALL_CHUNKS_PER_LARGE_CHUNK];
306 uint8_t small_chunks_allocated;
307 };
308
309
310 union item_u {
311 title_chunk_header_t empty_header;
312 large_title_chunk_t large_title;
313 small_title_chunk_t small_title;
314 };
315
316
317
318 #define lc_title __lc.__lc_title
319 #define lc_body __lc.__lc_body
320 #define lc_broken __lc.__lc_broken
321 #define lc_free __lc.__lc_free
322 struct large_chunk_s {
323 /* we could be one of 4 things:
324 * 1) a large chunk (lc) title.
325 * 2) a large chunk (lc) body.
326 * 3) a set of small (sc) titles & bodies.
327 * 4) a free chunk. */
328 union {
329 large_title_chunk_t __lc_title;
330 large_body_chunk_t __lc_body;
331 large_broken_chunk_t __lc_broken;
332 large_free_chunk_t __lc_free;
333 } PACKED __lc;
334
335 LARGE_CHUNK_TAIL;
336 } PACKED;
337
338 union chunk_u {
339 large_chunk_t lc;
340 small_chunk_t sc;
341 };
342
343
344 typedef struct flat_storage_info_s flat_storage_info_t;
345 struct flat_storage_info_s {
346 void* mmap_start; // start of the mmap'ed region.
347 large_chunk_t* flat_storage_start; // start of the storage region.
348 large_chunk_t* uninitialized_start; // start of the uninitialized region.
349 size_t unused_memory; // unused memory region.
350
351 // large chunk free list
352 large_chunk_t* large_free_list; // free list head.
353 size_t large_free_list_sz; // number of large free list chunks.
354
355 // small chunk free list
356 small_chunk_t* small_free_list; // free list head.
357 size_t small_free_list_sz; // number of small free list chunks.
358
90e3f181 » ttung
2008-07-08 merge LRU queues, other fixes.
359 // LRU.
360 item* lru_head;
361 item* lru_tail;
f045fd8a » ttung
2008-05-01 flat memory allocator
362
363 bool initialized;
364
365 struct {
366 uint64_t large_title_chunks;
367 uint64_t large_body_chunks;
368 uint64_t large_broken_chunks;
369 uint64_t small_title_chunks;
370 uint64_t small_body_chunks;
371 uint64_t broken_chunk_histogram[SMALL_CHUNKS_PER_LARGE_CHUNK + 1];
372
373 uint64_t break_events;
374 uint64_t unbreak_events;
375
376 uint64_t migrates;
377 } stats;
378 };
379
380
381 extern flat_storage_info_t fsi;
382
383
384 /* memset a region to a special value if NDEBUG is not defined. */
385 static inline void DEBUG_CLEAR(void* ptr, const size_t bytes) {
386 #if !defined(NDEBUG)
387 memset(ptr, 0x5a, bytes);
388 #endif /* #if !defined(NDEBUG) */
389 }
390
391
392 static inline bool is_large_chunk(const size_t nkey, const size_t nbytes) {
393 size_t small_chunks_max_size;
394
395 // calculate how many bytes (SMALL_CHUNKS_PER_LARGE_CHUNK - 1) small chunks
396 // can hold. any larger and it is simpler and better to use a large chunk.
397 // note that one of the small chunks is taken up by the header.
398 small_chunks_max_size = SMALL_TITLE_CHUNK_DATA_SZ +
399 (SMALL_BODY_CHUNK_DATA_SZ * (SMALL_CHUNKS_PER_LARGE_CHUNK - 2));
400 if (nkey + nbytes > small_chunks_max_size) {
401 return true;
402 }
403
404 return false;
405 }
406
407
408 static inline bool is_item_large_chunk(const item* it) {
409 return is_large_chunk(it->empty_header.nkey, it->empty_header.nbytes);
410 }
411
412
413 static inline size_t chunks_needed(const size_t nkey, const size_t nbytes) {
414 size_t total_bytes = nkey + nbytes;
415 if (is_large_chunk(nkey, nbytes)) {
416 /* large chunk */
417 if (total_bytes < LARGE_TITLE_CHUNK_DATA_SZ) {
418 return 1;
419 }
420
421 total_bytes -= LARGE_TITLE_CHUNK_DATA_SZ;
422 return 1 + ((total_bytes + LARGE_BODY_CHUNK_DATA_SZ - 1) /
423 LARGE_BODY_CHUNK_DATA_SZ);
424 } else {
425 /* large chunk */
426 if (total_bytes < SMALL_TITLE_CHUNK_DATA_SZ) {
427 return 1;
428 }
429
430 total_bytes -= SMALL_TITLE_CHUNK_DATA_SZ;
431 return 1 + ((total_bytes + SMALL_BODY_CHUNK_DATA_SZ - 1) /
432 SMALL_BODY_CHUNK_DATA_SZ);
433 }
434 }
435
436
437 /* returns the number of chunks in the item. */
438 static inline size_t chunks_in_item(const item* it) {
439 return chunks_needed(it->empty_header.nkey, it->empty_header.nbytes);
440 }
441
442
5d88e647 » ttung
2008-06-16 connection buffer sharing
443 /* returns the number of chunks in the item. */
444 static inline size_t data_chunks_in_item(const item* it) {
445 size_t count = chunks_in_item(it);
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
446 size_t key_only_chunks;
447 size_t title_data_sz;
5d88e647 » ttung
2008-06-16 connection buffer sharing
448
449 /* if we have no data, return 0. */
450 if (it->empty_header.nbytes == 0) {
451 return 0;
452 }
453
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
454 /* exclude chunks taken up entirely by the key */
5d88e647 » ttung
2008-06-16 connection buffer sharing
455 if (is_item_large_chunk(it)) {
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
456 title_data_sz = LARGE_TITLE_CHUNK_DATA_SZ;
457 if (it->empty_header.nkey < title_data_sz) {
458 key_only_chunks = 0;
459 } else {
460 key_only_chunks = 1 + ((it->empty_header.nkey - LARGE_TITLE_CHUNK_DATA_SZ) / LARGE_BODY_CHUNK_DATA_SZ);
461 }
5d88e647 » ttung
2008-06-16 connection buffer sharing
462 } else {
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
463 title_data_sz = SMALL_TITLE_CHUNK_DATA_SZ;
464 if (it->empty_header.nkey < title_data_sz) {
465 key_only_chunks = 0;
466 } else {
467 key_only_chunks = 1 + ((it->empty_header.nkey - SMALL_TITLE_CHUNK_DATA_SZ) / SMALL_BODY_CHUNK_DATA_SZ);
468 }
5d88e647 » ttung
2008-06-16 connection buffer sharing
469 }
470
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
471 count -= key_only_chunks;
5d88e647 » ttung
2008-06-16 connection buffer sharing
472
473 return count;
474 }
475
476
2db47cdd » ttung
2008-05-13 port changes from trunk to storage refactor tree
477 static inline size_t slackspace(const size_t nkey, const size_t nbytes) {
478 size_t item_sz = nkey + nbytes;
479
480 if (is_large_chunk(nkey, nbytes)) {
481 if (item_sz < LARGE_TITLE_CHUNK_DATA_SZ) {
482 return LARGE_TITLE_CHUNK_DATA_SZ - item_sz;
483 } else {
484 size_t additional_chunks;
485 item_sz -= LARGE_TITLE_CHUNK_DATA_SZ;
486 additional_chunks = (item_sz + LARGE_BODY_CHUNK_DATA_SZ - 1) / LARGE_BODY_CHUNK_DATA_SZ;
487
488 return (additional_chunks * LARGE_BODY_CHUNK_DATA_SZ) - item_sz;
489 }
490 } else {
491 if (item_sz < SMALL_TITLE_CHUNK_DATA_SZ) {
492 return SMALL_TITLE_CHUNK_DATA_SZ - item_sz;
493 } else {
494 size_t additional_chunks;
495 item_sz -= SMALL_TITLE_CHUNK_DATA_SZ;
496 additional_chunks = (item_sz + SMALL_BODY_CHUNK_DATA_SZ - 1) / SMALL_BODY_CHUNK_DATA_SZ;
497
498 return (additional_chunks * SMALL_BODY_CHUNK_DATA_SZ) - item_sz;
499 }
500 }
501 }
502
503
504 static inline size_t item_slackspace(item* it) {
505 return slackspace(it->empty_header.nkey, it->empty_header.nbytes);
506 }
507
508
f045fd8a » ttung
2008-05-01 flat memory allocator
509 /**
510 * this takes a chunkptr_t and translates it to a chunk address.
511 */
512 static inline chunk_t* get_chunk_address(chunkptr_t chunkptr) {
513 intptr_t retval = (intptr_t) fsi.flat_storage_start;
514 intptr_t remainder;
515
516 if (chunkptr == NULL_CHUNKPTR) {
517 return NULL;
518 }
519
520 chunkptr --; /* offset by 1 so 0 has special meaning. */
521 remainder = chunkptr % (LARGE_CHUNK_SZ / CHUNK_ADDRESSING_SZ);
522
523 retval += ( (chunkptr - remainder) * CHUNK_ADDRESSING_SZ );
524 retval += ( remainder * SMALL_CHUNK_SZ );
525 return (chunk_t*) retval;
526 }
527
528
529 /**
530 * this takes a chunk address and translates it to a chunkptr_t.
531 */
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
532 static inline chunkptr_t get_chunkptr(const chunk_t* _addr) {
f045fd8a » ttung
2008-05-01 flat memory allocator
533 intptr_t addr = (intptr_t) _addr;
534 intptr_t diff = addr - ((intptr_t) fsi.flat_storage_start);
535 intptr_t large_chunk_index = diff / LARGE_CHUNK_SZ;
536 intptr_t remainder = diff % LARGE_CHUNK_SZ;
537 chunkptr_t retval;
538
539 if (_addr == NULL) {
540 return NULL_CHUNKPTR;
541 }
542
543 assert(addr >= (intptr_t) fsi.flat_storage_start);
544 assert(addr < (intptr_t) fsi.uninitialized_start);
545
546 retval = large_chunk_index * (LARGE_CHUNK_SZ / CHUNK_ADDRESSING_SZ);
547
548 if (remainder == 0) {
549 /* either pointing to a large chunk ptr, or the first small chunk of a
550 * large chunk */
551 } else {
552 assert(remainder % SMALL_CHUNK_SZ == 0);
553 retval += (remainder / SMALL_CHUNK_SZ);
554 }
555 retval ++; /* offset by 1 so 0 has special meaning. */
556 return retval;
557 }
558
559
560 static inline large_chunk_t* get_parent_chunk(small_chunk_t* small) {
561 intptr_t addr = (intptr_t) small;
562 intptr_t diff = addr - ((intptr_t) fsi.flat_storage_start);
563 intptr_t large_chunk_index = diff / LARGE_CHUNK_SZ;
564 intptr_t large_chunk_addr = (large_chunk_index * LARGE_CHUNK_SZ) +
565 (intptr_t) fsi.flat_storage_start;
566 large_chunk_t* lc = (large_chunk_t*) large_chunk_addr;
567
568 assert(lc->flags == (LARGE_CHUNK_INITIALIZED | LARGE_CHUNK_USED |
569 LARGE_CHUNK_BROKEN));
570
571 return lc;
572 }
573
574
575 static inline const large_chunk_t* get_parent_chunk_const(const small_chunk_t* small) {
576 return (const large_chunk_t*) get_parent_chunk( (small_chunk_t*) small );
577 }
578
579
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
580 /* the following are a set of abstractions to remove casting from flat_storage.c */
581 static inline item* get_item_from_small_title(small_title_chunk_t* small_title) {
582 return (item*) small_title;
583 }
584
585 static inline item* get_item_from_large_title(large_title_chunk_t* large_title) {
586 return (item*) large_title;
587 }
588
589 static inline item* get_item_from_chunk(chunk_t* chunk) {
590 item* it = (item*) chunk;
591
592 if (it != NULL) {
593 assert( is_item_large_chunk(it) ?
594 (chunk->lc.flags == (LARGE_CHUNK_INITIALIZED | LARGE_CHUNK_USED | LARGE_CHUNK_TITLE)) :
595 (chunk->sc.flags == (SMALL_CHUNK_INITIALIZED | SMALL_CHUNK_USED | SMALL_CHUNK_TITLE)) );
596 }
597
598 return it;
599 }
600
601 static inline chunk_t* get_chunk_from_item(item* it) {
602 return (chunk_t*) it;
603 }
604
605
606 static inline chunk_t* get_chunk_from_small_chunk(small_chunk_t* sc) {
607 return (chunk_t*) sc;
608 }
609
610
611 static inline const chunk_t* get_chunk_from_small_chunk_const(const small_chunk_t* sc) {
612 return (const chunk_t*) sc;
613 }
614
615
616 static inline item* ITEM(item_ptr_t iptr) { return get_item_from_chunk(get_chunk_address( (chunkptr_t) iptr)); }
617 static inline item_ptr_t ITEM_PTR(item* it) { return (item_ptr_t) get_chunkptr(get_chunk_from_item(it)); }
618 static inline bool ITEM_PTR_IS_NULL(item_ptr_t iptr) { return iptr != NULL_ITEM_PTR; }
f045fd8a » ttung
2008-05-01 flat memory allocator
619
e8cee262 » ttung
2008-08-28 fix slab allocator
620 static inline uint8_t ITEM_nkey(const item* it) { return it->empty_header.nkey; }
f045fd8a » ttung
2008-05-01 flat memory allocator
621 static inline int ITEM_nbytes(item* it) { return it->empty_header.nbytes; }
622 static inline size_t ITEM_ntotal(item* it) {
623 if (is_item_large_chunk(it)) {
624 size_t item_sz = it->empty_header.nkey + it->empty_header.nbytes;
625
626 if (item_sz < LARGE_TITLE_CHUNK_DATA_SZ) {
627 return sizeof(large_chunk_t);
628 } else {
629 size_t additional_chunks;
630 item_sz -= LARGE_TITLE_CHUNK_DATA_SZ;
631 additional_chunks = (item_sz + LARGE_BODY_CHUNK_DATA_SZ - 1) / LARGE_BODY_CHUNK_DATA_SZ;
632
633 return sizeof(large_chunk_t) * (additional_chunks + 1);
634 }
635 } else {
636 size_t item_sz = it->empty_header.nkey + it->empty_header.nbytes;
637
638 if (item_sz < SMALL_TITLE_CHUNK_DATA_SZ) {
639 return sizeof(small_chunk_t);
640 } else {
641 size_t additional_chunks;
642 item_sz -= SMALL_TITLE_CHUNK_DATA_SZ;
643 additional_chunks = (item_sz + SMALL_BODY_CHUNK_DATA_SZ - 1) / SMALL_BODY_CHUNK_DATA_SZ;
644
645 return sizeof(small_chunk_t) * (additional_chunks + 1);
646 }
647 }
648 }
649
650 static inline unsigned int ITEM_flags(item* it) { return it->empty_header.flags; }
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
651 static inline rel_time_t ITEM_time(item* it) { return it->empty_header.time; }
f045fd8a » ttung
2008-05-01 flat memory allocator
652 static inline rel_time_t ITEM_exptime(item* it) { return it->empty_header.exptime; }
653 static inline unsigned short ITEM_refcount(item* it) { return it->empty_header.refcount; }
654
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
655 static inline void ITEM_set_nbytes(item* it, int nbytes) { it->empty_header.nbytes = nbytes; }
f045fd8a » ttung
2008-05-01 flat memory allocator
656 static inline void ITEM_set_exptime(item* it, rel_time_t t) { it->empty_header.exptime = t; }
657
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
658 static inline item_ptr_t ITEM_PTR_h_next(item_ptr_t iptr) { return ITEM(iptr)->empty_header.h_next; }
659 static inline item_ptr_t* ITEM_h_next_p(item* it) { return &it->empty_header.h_next; }
f045fd8a » ttung
2008-05-01 flat memory allocator
660
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
661 static inline void ITEM_set_h_next(item* it, item_ptr_t next) { it->empty_header.h_next = next; }
f045fd8a » ttung
2008-05-01 flat memory allocator
662
663 static inline bool ITEM_is_valid(item* it) { return it->empty_header.it_flags & ITEM_VALID; }
2db47cdd » ttung
2008-05-13 port changes from trunk to storage refactor tree
664 static inline bool ITEM_has_timestamp(item* it) { return it->empty_header.it_flags & ITEM_HAS_TIMESTAMP; }
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
665 static inline bool ITEM_has_ip_address(item* it) { return it->empty_header.it_flags & ITEM_HAS_IP_ADDRESS; }
f045fd8a » ttung
2008-05-01 flat memory allocator
666
667 static inline void ITEM_mark_deleted(item* it) { it->empty_header.it_flags |= ITEM_DELETED; }
668 static inline void ITEM_unmark_deleted(item* it) { it->empty_header.it_flags &= ~ITEM_DELETED; }
2db47cdd » ttung
2008-05-13 port changes from trunk to storage refactor tree
669 static inline void ITEM_set_has_timestamp(item* it) { it->empty_header.it_flags |= ITEM_HAS_TIMESTAMP; }
670 static inline void ITEM_clear_has_timestamp(item* it) { it->empty_header.it_flags &= ~(ITEM_HAS_TIMESTAMP); }
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
671 static inline void ITEM_set_has_ip_address(item* it) { it->empty_header.it_flags |= ITEM_HAS_IP_ADDRESS; }
672 static inline void ITEM_clear_has_ip_address(item* it) { it->empty_header.it_flags &= ~(ITEM_HAS_IP_ADDRESS); }
f045fd8a » ttung
2008-05-01 flat memory allocator
673
674 extern void flat_storage_init(size_t maxbytes);
675 extern char* do_item_cachedump(const chunk_type_t type, const unsigned int limit, unsigned int *bytes);
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
676 extern const char* item_key_copy(const item* it, char* keyptr);
f045fd8a » ttung
2008-05-01 flat memory allocator
677
5d88e647 » ttung
2008-06-16 connection buffer sharing
678 DECL_MT_FUNC(char*, flat_allocator_stats, (size_t* bytes));
f045fd8a » ttung
2008-05-01 flat memory allocator
679
b04e15bf » ttung
2008-09-18 flat allocator broken by my last change
680 FA_STATIC_DECL(bool flat_storage_alloc(void));
681 FA_STATIC_DECL(item* get_lru_item(void));
7a1f4fa7 » ttung
2008-05-06 fixes to flat allocator, merge from trunk
682
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
683
90e3f181 » ttung
2008-07-08 merge LRU queues, other fixes.
684 static inline size_t __fs_MIN(size_t a, size_t b) {
685 if (a < b) {
686 return a;
687 } else {
688 return b;
689 }
690 }
691
692 static inline size_t __fs_MAX(size_t a, size_t b) {
693 if (a > b) {
694 return a;
695 } else {
696 return b;
697 }
698 }
699
700
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
701 /* this macro walks over the item and calls applier with the following
702 * arguments:
703 * applier(it, ptr, bytes)
704 * where: it - the item object
705 * ptr - the pointer to the data
706 * bytes - the number of bytes after ptr that contain the requested bytes
707 */
708 #define ITEM_WALK(_it, _offset, _nbytes, _beyond_item_boundary, applier, _const) \
709 do { \
710 chunk_t* next; \
711 _const char* ptr; \
712 size_t to_scan; /* bytes left in current chunk. */ \
713 /* these are the offsets to the start of the data value. */ \
714 size_t start_offset, end_offset; \
715 size_t left = (_nbytes); /* bytes left to copy */ \
716 \
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
717 assert((_it)->empty_header.nkey + (_it)->empty_header.nbytes >= \
718 (_offset) + (_nbytes) || (_beyond_item_boundary)); \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
719 \
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
720 /* if we have no to copy, then skip. */ \
721 if (left == 0) { \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
722 break; \
723 } \
724 \
725 if (is_item_large_chunk((_it))) { \
726 /* large chunk handling code. */ \
727 \
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
728 next = get_chunk_address((_it)->empty_header.next_chunk); \
729 ptr = &(_it)->large_title.data[0]; \
730 start_offset = 0; \
731 if (next == NULL && (_beyond_item_boundary)) { \
732 end_offset = LARGE_TITLE_CHUNK_DATA_SZ - 1; \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
733 } else { \
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
734 end_offset = __fs_MIN((_offset) + (_nbytes), \
735 start_offset + LARGE_TITLE_CHUNK_DATA_SZ) - 1; \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
736 } \
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
737 to_scan = end_offset - start_offset + 1; \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
738 \
739 /* advance over pages writing while doing the requested action. */ \
740 do { \
741 /* offset must be less than end offset. */ \
742 if ((_offset) <= end_offset) { \
743 /* we have some work to do. */ \
744 \
745 size_t work_start, work_end, work_len; \
746 \
90e3f181 » ttung
2008-07-08 merge LRU queues, other fixes.
747 work_start = __fs_MAX((_offset), start_offset); \
748 work_end = __fs_MIN((_offset) + (_nbytes) - 1, end_offset); \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
749 work_len = work_end - work_start + 1; \
750 \
751 applier((_it), ptr + work_start - start_offset, work_len); \
752 left -= work_len; \
753 } \
754 \
755 if (left == 0) { \
756 break; \
757 } \
758 \
759 start_offset += to_scan; \
760 \
761 assert(next != NULL); \
762 assert( (LARGE_CHUNK_INITIALIZED | LARGE_CHUNK_USED) == next->lc.flags ); \
763 ptr = next->lc.lc_body.data; \
764 next = get_chunk_address(next->lc.lc_body.next_chunk); \
765 if (next == NULL && \
766 (_beyond_item_boundary)) { \
767 end_offset = start_offset + LARGE_BODY_CHUNK_DATA_SZ - 1; \
768 } else { \
90e3f181 » ttung
2008-07-08 merge LRU queues, other fixes.
769 end_offset = __fs_MIN((_offset) + (_nbytes), \
434ed395 » ttung
2008-06-12 fixes to item_walk, a unit test for item_walk
770 start_offset + LARGE_BODY_CHUNK_DATA_SZ) - 1; \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
771 } \
772 to_scan = end_offset - start_offset + 1; \
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
773 } while (start_offset <= ((_it)->empty_header.nkey + \
774 (_it)->empty_header.nbytes)); \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
775 } else { \
776 /* small chunk handling code. */ \
777 \
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
778 next = get_chunk_address((_it)->empty_header.next_chunk); \
779 ptr = &(_it)->small_title.data[0]; \
780 start_offset = 0; \
781 if (next == NULL && (_beyond_item_boundary)) { \
782 end_offset = SMALL_TITLE_CHUNK_DATA_SZ - 1; \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
783 } else { \
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
784 end_offset = __fs_MIN((_offset) + (_nbytes), \
785 start_offset + SMALL_TITLE_CHUNK_DATA_SZ) - 1; \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
786 } \
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
787 to_scan = end_offset - start_offset + 1; \
434ed395 » ttung
2008-06-12 fixes to item_walk, a unit test for item_walk
788 \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
789 /* advance over pages writing while doing the requested action. */ \
790 do { \
791 /* offset must be less than end offset. */ \
792 if ((_offset) <= end_offset) { \
793 /* we have some work to do. */ \
794 \
795 size_t work_start, work_end, work_len; \
796 \
90e3f181 » ttung
2008-07-08 merge LRU queues, other fixes.
797 work_start = __fs_MAX((_offset), start_offset); \
798 work_end = __fs_MIN((_offset) + (_nbytes) - 1, end_offset); \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
799 work_len = work_end - work_start + 1; \
800 \
801 applier((_it), ptr + work_start - start_offset, work_len); \
802 left -= work_len; \
803 } \
804 \
805 if (left == 0) { \
806 break; \
807 } \
808 \
809 start_offset += to_scan; \
810 \
811 assert(next != NULL); \
812 assert( (SMALL_CHUNK_INITIALIZED | SMALL_CHUNK_USED) == next->sc.flags ); \
813 ptr = next->sc.sc_body.data; \
814 next = get_chunk_address(next->sc.sc_body.next_chunk); \
815 if (next == NULL && \
816 (_beyond_item_boundary)) { \
817 end_offset = start_offset + SMALL_BODY_CHUNK_DATA_SZ - 1; \
818 } else { \
90e3f181 » ttung
2008-07-08 merge LRU queues, other fixes.
819 end_offset = __fs_MIN((_offset) + (_nbytes), \
434ed395 » ttung
2008-06-12 fixes to item_walk, a unit test for item_walk
820 start_offset + SMALL_BODY_CHUNK_DATA_SZ) - 1; \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
821 } \
434ed395 » ttung
2008-06-12 fixes to item_walk, a unit test for item_walk
822 /* printf(" cycling start_offset = %ld, end_offset = %ld\n", start_offset, end_offset); */ \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
823 to_scan = end_offset - start_offset + 1; \
65292cdc » ttung
2008-07-29 the flat allocator uses space inefficiently when there are keys that …
824 } while (start_offset <= ((_it)->empty_header.nkey + \
825 (_it)->empty_header.nbytes)); \
55ea2cce » ttung
2008-06-12 fixes to flat allocator, stats gathering, etc.
826 } \
827 assert(left == 0); \
828 } while (0);
829
f045fd8a » ttung
2008-05-01 flat memory allocator
830 #endif /* #if !defined(_flat_storage_h_) */
Something went wrong with that request. Please try again.