Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse code

r15316@tombo: nickm | 2008-04-24 20:58:36 -0400

 Rename internal memory management functions from event_malloc() etc to mm_malloc() etc.


svn:r725
  • Loading branch information...
commit 49868b618a55c375d6c378edc174cd81e478f213 1 parent a55a67d
Nick Mathewson authored
1  ChangeLog
@@ -68,6 +68,7 @@ Changes in current version:
68 68 o Make name_from_addr() threadsafe in http.c
69 69 o Add new thread-safe interfaces to evdns functions.
70 70 o Make all event_tagging interfaces threadsafe.
  71 + o Rename internal memory management functions.
71 72
72 73 Changes in 1.4.0:
73 74 o allow \r or \n individually to separate HTTP headers instead of the standard "\r\n"; from Charles Kerr.
20 buffer.c
@@ -86,7 +86,7 @@ evbuffer_chain_new(size_t size)
86 86 to_alloc <<= 1;
87 87
88 88 /* we get everything in one chunk */
89   - if ((chain = event_malloc(to_alloc)) == NULL)
  89 + if ((chain = mm_malloc(to_alloc)) == NULL)
90 90 return (NULL);
91 91
92 92 memset(chain, 0, EVBUFFER_CHAIN_SIZE);
@@ -101,7 +101,7 @@ evbuffer_new(void)
101 101 {
102 102 struct evbuffer *buffer;
103 103
104   - buffer = event_calloc(1, sizeof(struct evbuffer));
  104 + buffer = mm_calloc(1, sizeof(struct evbuffer));
105 105
106 106 return (buffer);
107 107 }
@@ -112,9 +112,9 @@ evbuffer_free(struct evbuffer *buffer)
112 112 struct evbuffer_chain *chain, *next;
113 113 for (chain = buffer->first; chain != NULL; chain = next) {
114 114 next = chain->next;
115   - event_free(chain);
  115 + mm_free(chain);
116 116 }
117   - event_free(buffer);
  117 + mm_free(buffer);
118 118 }
119 119
120 120 size_t
@@ -218,7 +218,7 @@ evbuffer_drain(struct evbuffer *buf, size_t len)
218 218 for (chain = buf->first; chain != NULL; chain = next) {
219 219 next = chain->next;
220 220
221   - event_free(chain);
  221 + mm_free(chain);
222 222 }
223 223
224 224 ZERO_CHAIN(buf);
@@ -229,7 +229,7 @@ evbuffer_drain(struct evbuffer *buf, size_t len)
229 229 next = chain->next;
230 230 len -= chain->off;
231 231
232   - event_free(chain);
  232 + mm_free(chain);
233 233 }
234 234
235 235 buf->first = chain;
@@ -269,7 +269,7 @@ evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
269 269
270 270 tmp = chain;
271 271 chain = chain->next;
272   - event_free(tmp);
  272 + mm_free(tmp);
273 273 }
274 274
275 275 buf->first = chain;
@@ -404,7 +404,7 @@ evbuffer_pullup(struct evbuffer *buf, int size)
404 404 size -= chain->off;
405 405 buffer += chain->off;
406 406
407   - event_free(chain);
  407 + mm_free(chain);
408 408 }
409 409
410 410 if (chain != NULL) {
@@ -582,7 +582,7 @@ evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
582 582 return (NULL);
583 583 }
584 584
585   - if ((line = event_malloc(n_to_copy+1)) == NULL) {
  585 + if ((line = mm_malloc(n_to_copy+1)) == NULL) {
586 586 event_warn("%s: out of memory\n", __func__);
587 587 evbuffer_drain(buffer, n_to_copy + extra_drain);
588 588 return (NULL);
@@ -762,7 +762,7 @@ evbuffer_expand(struct evbuffer *buf, size_t datlen)
762 762 buf->previous_to_last->next = tmp;
763 763 buf->last = tmp;
764 764
765   - event_free(chain);
  765 + mm_free(chain);
766 766
767 767 return (0);
768 768 }
32 devpoll.c
@@ -135,7 +135,7 @@ devpoll_init(struct event_base *base)
135 135 if (getenv("EVENT_NODEVPOLL"))
136 136 return (NULL);
137 137
138   - if (!(devpollop = event_calloc(1, sizeof(struct devpollop))))
  138 + if (!(devpollop = mm_calloc(1, sizeof(struct devpollop))))
139 139 return (NULL);
140 140
141 141 if (getrlimit(RLIMIT_NOFILE, &rl) == 0 &&
@@ -145,35 +145,35 @@ devpoll_init(struct event_base *base)
145 145 /* Initialize the kernel queue */
146 146 if ((dpfd = open("/dev/poll", O_RDWR)) == -1) {
147 147 event_warn("open: /dev/poll");
148   - event_free(devpollop);
  148 + mm_free(devpollop);
149 149 return (NULL);
150 150 }
151 151
152 152 devpollop->dpfd = dpfd;
153 153
154 154 /* Initialize fields */
155   - devpollop->events = event_calloc(nfiles, sizeof(struct pollfd));
  155 + devpollop->events = mm_calloc(nfiles, sizeof(struct pollfd));
156 156 if (devpollop->events == NULL) {
157   - event_free(devpollop);
  157 + mm_free(devpollop);
158 158 close(dpfd);
159 159 return (NULL);
160 160 }
161 161 devpollop->nevents = nfiles;
162 162
163   - devpollop->fds = event_calloc(nfiles, sizeof(struct evdevpoll));
  163 + devpollop->fds = mm_calloc(nfiles, sizeof(struct evdevpoll));
164 164 if (devpollop->fds == NULL) {
165   - event_free(devpollop->events);
166   - event_free(devpollop);
  165 + mm_free(devpollop->events);
  166 + mm_free(devpollop);
167 167 close(dpfd);
168 168 return (NULL);
169 169 }
170 170 devpollop->nfds = nfiles;
171 171
172   - devpollop->changes = event_calloc(nfiles, sizeof(struct pollfd));
  172 + devpollop->changes = mm_calloc(nfiles, sizeof(struct pollfd));
173 173 if (devpollop->changes == NULL) {
174   - event_free(devpollop->fds);
175   - event_free(devpollop->events);
176   - event_free(devpollop);
  174 + mm_free(devpollop->fds);
  175 + mm_free(devpollop->events);
  176 + mm_free(devpollop);
177 177 close(dpfd);
178 178 return (NULL);
179 179 }
@@ -196,7 +196,7 @@ devpoll_recalc(struct event_base *base, void *arg, int max)
196 196 while (nfds < max)
197 197 nfds <<= 1;
198 198
199   - fds = event_realloc(devpollop->fds, nfds * sizeof(struct evdevpoll));
  199 + fds = mm_realloc(devpollop->fds, nfds * sizeof(struct evdevpoll));
200 200 if (fds == NULL) {
201 201 event_warn("realloc");
202 202 return (-1);
@@ -405,14 +405,14 @@ devpoll_dealloc(struct event_base *base, void *arg)
405 405
406 406 evsignal_dealloc(base);
407 407 if (devpollop->fds)
408   - event_free(devpollop->fds);
  408 + mm_free(devpollop->fds);
409 409 if (devpollop->events)
410   - event_free(devpollop->events);
  410 + mm_free(devpollop->events);
411 411 if (devpollop->changes)
412   - event_free(devpollop->changes);
  412 + mm_free(devpollop->changes);
413 413 if (devpollop->dpfd >= 0)
414 414 close(devpollop->dpfd);
415 415
416 416 memset(devpollop, 0, sizeof(struct devpollop));
417   - event_free(devpollop);
  417 + mm_free(devpollop);
418 418 }
20 epoll.c
@@ -136,23 +136,23 @@ epoll_init(struct event_base *base)
136 136
137 137 FD_CLOSEONEXEC(epfd);
138 138
139   - if (!(epollop = event_calloc(1, sizeof(struct epollop))))
  139 + if (!(epollop = mm_calloc(1, sizeof(struct epollop))))
140 140 return (NULL);
141 141
142 142 epollop->epfd = epfd;
143 143
144 144 /* Initalize fields */
145   - epollop->events = event_malloc(nfiles * sizeof(struct epoll_event));
  145 + epollop->events = mm_malloc(nfiles * sizeof(struct epoll_event));
146 146 if (epollop->events == NULL) {
147   - event_free(epollop);
  147 + mm_free(epollop);
148 148 return (NULL);
149 149 }
150 150 epollop->nevents = nfiles;
151 151
152   - epollop->fds = event_calloc(nfiles, sizeof(struct evepoll));
  152 + epollop->fds = mm_calloc(nfiles, sizeof(struct evepoll));
153 153 if (epollop->fds == NULL) {
154   - event_free(epollop->events);
155   - event_free(epollop);
  154 + mm_free(epollop->events);
  155 + mm_free(epollop);
156 156 return (NULL);
157 157 }
158 158 epollop->nfds = nfiles;
@@ -175,7 +175,7 @@ epoll_recalc(struct event_base *base, void *arg, int max)
175 175 while (nfds < max)
176 176 nfds <<= 1;
177 177
178   - fds = event_realloc(epollop->fds, nfds * sizeof(struct evepoll));
  178 + fds = mm_realloc(epollop->fds, nfds * sizeof(struct evepoll));
179 179 if (fds == NULL) {
180 180 event_warn("realloc");
181 181 return (-1);
@@ -360,12 +360,12 @@ epoll_dealloc(struct event_base *base, void *arg)
360 360
361 361 evsignal_dealloc(base);
362 362 if (epollop->fds)
363   - event_free(epollop->fds);
  363 + mm_free(epollop->fds);
364 364 if (epollop->events)
365   - event_free(epollop->events);
  365 + mm_free(epollop->events);
366 366 if (epollop->epfd >= 0)
367 367 close(epollop->epfd);
368 368
369 369 memset(epollop, 0, sizeof(struct epollop));
370   - event_free(epollop);
  370 + mm_free(epollop);
371 371 }
8 evbuffer.c
@@ -233,17 +233,17 @@ bufferevent_new(evutil_socket_t fd, evbuffercb readcb, evbuffercb writecb,
233 233 {
234 234 struct bufferevent *bufev;
235 235
236   - if ((bufev = event_calloc(1, sizeof(struct bufferevent))) == NULL)
  236 + if ((bufev = mm_calloc(1, sizeof(struct bufferevent))) == NULL)
237 237 return (NULL);
238 238
239 239 if ((bufev->input = evbuffer_new()) == NULL) {
240   - event_free(bufev);
  240 + mm_free(bufev);
241 241 return (NULL);
242 242 }
243 243
244 244 if ((bufev->output = evbuffer_new()) == NULL) {
245 245 evbuffer_free(bufev->input);
246   - event_free(bufev);
  246 + mm_free(bufev);
247 247 return (NULL);
248 248 }
249 249
@@ -288,7 +288,7 @@ bufferevent_free(struct bufferevent *bufev)
288 288 evbuffer_free(bufev->input);
289 289 evbuffer_free(bufev->output);
290 290
291   - event_free(bufev);
  291 + mm_free(bufev);
292 292 }
293 293
294 294 /*
309 evdns.c
@@ -293,8 +293,16 @@ struct server_request {
293 293 };
294 294
295 295 struct evdns_base {
296   - struct request *req_head, *req_waiting_head;
  296 + /* An array of n_req_heads circular lists for inflight requests.
  297 + * Each inflight request req is in req_heads[req->trans_id % n_req_heads].
  298 + */
  299 + struct request **req_heads;
  300 + /* A circular list of requests that we're waiting to send, but haven't
  301 + * sent yet because there are too many requests inflight */
  302 + struct request *req_waiting_head;
  303 + /* A circular list of nameservers. */
297 304 struct nameserver *server_head;
  305 + int n_req_heads;
298 306
299 307 struct event_base *event_base;
300 308
@@ -330,6 +338,8 @@ static struct evdns_base *current_base = NULL;
330 338 ((struct server_request*) \
331 339 (((char*)(base_ptr) - OFFSET_OF(struct server_request, base))))
332 340
  341 +#define REQ_HEAD(base, id) ((base)->req_heads[id % (base)->n_req_heads])
  342 +
333 343 /* These are the timeout values for nameservers. If we find a nameserver is down */
334 344 /* we try to probe it at intervals as given below. Values are in seconds. */
335 345 static const struct timeval global_nameserver_timeouts[] = {{10, 0}, {60, 0}, {300, 0}, {900, 0}, {3600, 0}};
@@ -453,7 +463,8 @@ _evdns_log(int warn, const char *fmt, ...)
453 463 /* failure */
454 464 static struct request *
455 465 request_find_from_trans_id(struct evdns_base *base, u16 trans_id) {
456   - struct request *req = base->req_head, *const started_at = base->req_head;
  466 + struct request *req = REQ_HEAD(base, trans_id);
  467 + struct request *const started_at = req;
457 468
458 469 if (req) {
459 470 do {
@@ -511,6 +522,7 @@ static void
511 522 nameserver_failed(struct nameserver *const ns, const char *msg) {
512 523 struct request *req, *started_at;
513 524 struct evdns_base *base = ns->base;
  525 + int i;
514 526 /* if this nameserver has already been marked as failed */
515 527 /* then don't do anything */
516 528 if (!ns->state) return;
@@ -544,16 +556,18 @@ nameserver_failed(struct nameserver *const ns, const char *msg) {
544 556 /* trying to reassign requests to one */
545 557 if (!base->global_good_nameservers) return;
546 558
547   - req = started_at = base->req_head;
548   - if (req) {
549   - do {
550   - if (req->tx_count == 0 && req->ns == ns) {
551   - /* still waiting to go out, can be moved */
552   - /* to another server */
553   - req->ns = nameserver_pick(base);
554   - }
555   - req = req->next;
556   - } while (req != started_at);
  559 + for (i = 0; i < base->n_req_heads; ++i) {
  560 + req = started_at = base->req_heads[i];
  561 + if (req) {
  562 + do {
  563 + if (req->tx_count == 0 && req->ns == ns) {
  564 + /* still waiting to go out, can be moved */
  565 + /* to another server */
  566 + req->ns = nameserver_pick(base);
  567 + }
  568 + req = req->next;
  569 + } while (req != started_at);
  570 + }
557 571 }
558 572 }
559 573
@@ -601,13 +615,13 @@ request_finished(struct request *const req, struct request **head) {
601 615
602 616 if (!req->request_appended) {
603 617 /* need to free the request data on it's own */
604   - event_free(req->request);
  618 + mm_free(req->request);
605 619 } else {
606 620 /* the request data is appended onto the header */
607 621 /* so everything gets free()ed when we: */
608 622 }
609 623
610   - event_free(req);
  624 + mm_free(req);
611 625
612 626 evdns_requests_pump_waiting_queue(base);
613 627 }
@@ -665,7 +679,7 @@ evdns_requests_pump_waiting_queue(struct evdns_base *base) {
665 679 req->ns = nameserver_pick(base);
666 680 request_trans_id_set(req, transaction_id_pick(base));
667 681
668   - evdns_request_insert(req, &base->req_head);
  682 + evdns_request_insert(req, &REQ_HEAD(base, req->trans_id));
669 683 evdns_request_transmit(req);
670 684 evdns_transmit(base);
671 685 }
@@ -757,19 +771,19 @@ reply_handle(struct request *const req, u16 flags, u32 ttl, struct reply *reply)
757 771 /* a new request was issued so this request is finished and */
758 772 /* the user callback will be made when that request (or a */
759 773 /* child of it) finishes. */
760   - request_finished(req, &req->base->req_head);
  774 + request_finished(req, &REQ_HEAD(req->base, req->trans_id));
761 775 return;
762 776 }
763 777 }
764 778
765 779 /* all else failed. Pass the failure up */
766 780 reply_callback(req, 0, error, NULL);
767   - request_finished(req, &req->base->req_head);
  781 + request_finished(req, &REQ_HEAD(req->base, req->trans_id));
768 782 } else {
769 783 /* all ok, tell the user */
770 784 reply_callback(req, ttl, 0, reply);
771 785 nameserver_up(req->ns);
772   - request_finished(req, &req->base->req_head);
  786 + request_finished(req, &REQ_HEAD(req->base, req->trans_id));
773 787 }
774 788 }
775 789
@@ -986,7 +1000,7 @@ request_parse(u8 *packet, int length, struct evdns_server_port *port, struct soc
986 1000 if (flags & 0x8000) return -1; /* Must not be an answer. */
987 1001 flags &= 0x0110; /* Only RD and CD get preserved. */
988 1002
989   - server_req = event_malloc(sizeof(struct server_request));
  1003 + server_req = mm_malloc(sizeof(struct server_request));
990 1004 if (server_req == NULL) return -1;
991 1005 memset(server_req, 0, sizeof(struct server_request));
992 1006
@@ -996,7 +1010,7 @@ request_parse(u8 *packet, int length, struct evdns_server_port *port, struct soc
996 1010
997 1011 server_req->base.flags = flags;
998 1012 server_req->base.nquestions = 0;
999   - server_req->base.questions = event_malloc(sizeof(struct evdns_server_question *) * questions);
  1013 + server_req->base.questions = mm_malloc(sizeof(struct evdns_server_question *) * questions);
1000 1014 if (server_req->base.questions == NULL)
1001 1015 goto err;
1002 1016
@@ -1009,7 +1023,7 @@ request_parse(u8 *packet, int length, struct evdns_server_port *port, struct soc
1009 1023 GET16(type);
1010 1024 GET16(class);
1011 1025 namelen = strlen(tmp_name);
1012   - q = event_malloc(sizeof(struct evdns_server_question) + namelen);
  1026 + q = mm_malloc(sizeof(struct evdns_server_question) + namelen);
1013 1027 if (!q)
1014 1028 goto err;
1015 1029 q->type = type;
@@ -1036,10 +1050,10 @@ request_parse(u8 *packet, int length, struct evdns_server_port *port, struct soc
1036 1050 if (server_req) {
1037 1051 if (server_req->base.questions) {
1038 1052 for (i = 0; i < server_req->base.nquestions; ++i)
1039   - event_free(server_req->base.questions[i]);
1040   - event_free(server_req->base.questions);
  1053 + mm_free(server_req->base.questions[i]);
  1054 + mm_free(server_req->base.questions);
1041 1055 }
1042   - event_free(server_req);
  1056 + mm_free(server_req);
1043 1057 }
1044 1058 return -1;
1045 1059
@@ -1106,20 +1120,12 @@ evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void))
1106 1120 static u16
1107 1121 transaction_id_pick(struct evdns_base *base) {
1108 1122 for (;;) {
1109   - const struct request *req, *started_at;
1110 1123 u16 trans_id = trans_id_function();
1111 1124
1112 1125 if (trans_id == 0xffff) continue;
1113 1126 /* now check to see if that id is already inflight */
1114   - req = started_at = base->req_head;
1115   - if (req) {
1116   - do {
1117   - if (req->trans_id == trans_id) break;
1118   - req = req->next;
1119   - } while (req != started_at);
1120   - }
1121   - /* we didn't find it, so this is a good id */
1122   - if (req == started_at) return trans_id;
  1127 + if (request_find_from_trans_id(base, trans_id) == NULL)
  1128 + return trans_id;
1123 1129 }
1124 1130 }
1125 1131
@@ -1312,7 +1318,7 @@ dnslabel_clear(struct dnslabel_table *table)
1312 1318 {
1313 1319 int i;
1314 1320 for (i = 0; i < table->n_labels; ++i)
1315   - event_free(table->labels[i].v);
  1321 + mm_free(table->labels[i].v);
1316 1322 table->n_labels = 0;
1317 1323 }
1318 1324
@@ -1337,7 +1343,7 @@ dnslabel_table_add(struct dnslabel_table *table, const char *label, off_t pos)
1337 1343 int p;
1338 1344 if (table->n_labels == MAX_LABELS)
1339 1345 return (-1);
1340   - v = event_strdup(label);
  1346 + v = mm_strdup(label);
1341 1347 if (v == NULL)
1342 1348 return (-1);
1343 1349 p = table->n_labels++;
@@ -1470,7 +1476,7 @@ struct evdns_server_port *
1470 1476 evdns_add_server_port_with_base(struct event_base *base, int socket, int is_tcp, evdns_request_callback_fn_type cb, void *user_data)
1471 1477 {
1472 1478 struct evdns_server_port *port;
1473   - if (!(port = event_malloc(sizeof(struct evdns_server_port))))
  1479 + if (!(port = mm_malloc(sizeof(struct evdns_server_port))))
1474 1480 return NULL;
1475 1481 memset(port, 0, sizeof(struct evdns_server_port));
1476 1482
@@ -1537,12 +1543,12 @@ evdns_server_request_add_reply(struct evdns_server_request *_req, int section, c
1537 1543 while (*itemp) {
1538 1544 itemp = &((*itemp)->next);
1539 1545 }
1540   - item = event_malloc(sizeof(struct server_reply_item));
  1546 + item = mm_malloc(sizeof(struct server_reply_item));
1541 1547 if (!item)
1542 1548 return -1;
1543 1549 item->next = NULL;
1544   - if (!(item->name = event_strdup(name))) {
1545   - event_free(item);
  1550 + if (!(item->name = mm_strdup(name))) {
  1551 + mm_free(item);
1546 1552 return -1;
1547 1553 }
1548 1554 item->type = type;
@@ -1553,16 +1559,16 @@ evdns_server_request_add_reply(struct evdns_server_request *_req, int section, c
1553 1559 item->data = NULL;
1554 1560 if (data) {
1555 1561 if (item->is_name) {
1556   - if (!(item->data = event_strdup(data))) {
1557   - event_free(item->name);
1558   - event_free(item);
  1562 + if (!(item->data = mm_strdup(data))) {
  1563 + mm_free(item->name);
  1564 + mm_free(item);
1559 1565 return -1;
1560 1566 }
1561 1567 item->datalen = (u16)-1;
1562 1568 } else {
1563   - if (!(item->data = event_malloc(datalen))) {
1564   - event_free(item->name);
1565   - event_free(item);
  1569 + if (!(item->data = mm_malloc(datalen))) {
  1570 + mm_free(item->name);
  1571 + mm_free(item);
1566 1572 return -1;
1567 1573 }
1568 1574 item->datalen = datalen;
@@ -1711,7 +1717,7 @@ evdns_server_request_format_response(struct server_request *req, int err)
1711 1717
1712 1718 req->response_len = j;
1713 1719
1714   - if (!(req->response = event_malloc(req->response_len))) {
  1720 + if (!(req->response = mm_malloc(req->response_len))) {
1715 1721 server_request_free_answers(req);
1716 1722 dnslabel_clear(&table);
1717 1723 return (-1);
@@ -1790,10 +1796,10 @@ server_request_free_answers(struct server_request *req)
1790 1796 victim = *list;
1791 1797 while (victim) {
1792 1798 next = victim->next;
1793   - event_free(victim->name);
  1799 + mm_free(victim->name);
1794 1800 if (victim->data)
1795   - event_free(victim->data);
1796   - event_free(victim);
  1801 + mm_free(victim->data);
  1802 + mm_free(victim);
1797 1803 victim = next;
1798 1804 }
1799 1805 *list = NULL;
@@ -1808,8 +1814,8 @@ server_request_free(struct server_request *req)
1808 1814 int i, rc=1;
1809 1815 if (req->base.questions) {
1810 1816 for (i = 0; i < req->base.nquestions; ++i)
1811   - event_free(req->base.questions[i]);
1812   - event_free(req->base.questions);
  1817 + mm_free(req->base.questions[i]);
  1818 + mm_free(req->base.questions);
1813 1819 }
1814 1820
1815 1821 if (req->port) {
@@ -1823,7 +1829,7 @@ server_request_free(struct server_request *req)
1823 1829 }
1824 1830
1825 1831 if (req->response) {
1826   - event_free(req->response);
  1832 + mm_free(req->response);
1827 1833 }
1828 1834
1829 1835 server_request_free_answers(req);
@@ -1835,10 +1841,10 @@ server_request_free(struct server_request *req)
1835 1841
1836 1842 if (rc == 0) {
1837 1843 server_port_free(req->port);
1838   - event_free(req);
  1844 + mm_free(req);
1839 1845 return (1);
1840 1846 }
1841   - event_free(req);
  1847 + mm_free(req);
1842 1848 return (0);
1843 1849 }
1844 1850
@@ -1900,7 +1906,7 @@ evdns_request_timeout_callback(evutil_socket_t fd, short events, void *arg) {
1900 1906 if (req->tx_count >= req->base->global_max_retransmits) {
1901 1907 /* this request has failed */
1902 1908 reply_callback(req, 0, DNS_ERR_TIMEOUT, NULL);
1903   - request_finished(req, &req->base->req_head);
  1909 + request_finished(req, &REQ_HEAD(req->base, req->trans_id));
1904 1910 } else {
1905 1911 /* retransmit it */
1906 1912 evdns_request_transmit(req);
@@ -2015,18 +2021,21 @@ nameserver_send_probe(struct nameserver *const ns) {
2015 2021 static int
2016 2022 evdns_transmit(struct evdns_base *base) {
2017 2023 char did_try_to_transmit = 0;
  2024 + int i;
2018 2025
2019   - if (base->req_head) {
2020   - struct request *const started_at = base->req_head, *req = base->req_head;
2021   - /* first transmit all the requests which are currently waiting */
2022   - do {
2023   - if (req->transmit_me) {
2024   - did_try_to_transmit = 1;
2025   - evdns_request_transmit(req);
2026   - }
  2026 + for (i = 0; i < base->n_req_heads; ++i) {
  2027 + if (base->req_heads[i]) {
  2028 + struct request *const started_at = base->req_heads[i], *req = started_at;
  2029 + /* first transmit all the requests which are currently waiting */
  2030 + do {
  2031 + if (req->transmit_me) {
  2032 + did_try_to_transmit = 1;
  2033 + evdns_request_transmit(req);
  2034 + }
2027 2035
2028   - req = req->next;
2029   - } while (req != started_at);
  2036 + req = req->next;
  2037 + } while (req != started_at);
  2038 + }
2030 2039 }
2031 2040
2032 2041 return did_try_to_transmit;
@@ -2058,7 +2067,7 @@ int
2058 2067 evdns_base_clear_nameservers_and_suspend(struct evdns_base *base)
2059 2068 {
2060 2069 struct nameserver *server = base->server_head, *started_at = base->server_head;
2061   - struct request *req = base->req_head, *req_started_at = base->req_head;
  2070 + int i;
2062 2071
2063 2072 if (!server)
2064 2073 return 0;
@@ -2069,7 +2078,7 @@ evdns_base_clear_nameservers_and_suspend(struct evdns_base *base)
2069 2078 (void) evtimer_del(&server->timeout_event);
2070 2079 if (server->socket >= 0)
2071 2080 CLOSE_SOCKET(server->socket);
2072   - event_free(server);
  2081 + mm_free(server);
2073 2082 if (next == started_at)
2074 2083 break;
2075 2084 server = next;
@@ -2077,28 +2086,33 @@ evdns_base_clear_nameservers_and_suspend(struct evdns_base *base)
2077 2086 base->server_head = NULL;
2078 2087 base->global_good_nameservers = 0;
2079 2088
2080   - while (req) {
2081   - struct request *next = req->next;
2082   - req->tx_count = req->reissue_count = 0;
2083   - req->ns = NULL;
2084   - /* ???? What to do about searches? */
2085   - (void) evtimer_del(&req->timeout_event);
2086   - req->trans_id = 0;
2087   - req->transmit_me = 0;
2088   -
2089   - base->global_requests_waiting++;
2090   - evdns_request_insert(req, &base->req_waiting_head);
2091   - /* We want to insert these suspended elements at the front of
2092   - * the waiting queue, since they were pending before any of
2093   - * the waiting entries were added. This is a circular list,
2094   - * so we can just shift the start back by one.*/
2095   - base->req_waiting_head = base->req_waiting_head->prev;
2096   -
2097   - if (next == req_started_at)
2098   - break;
2099   - req = next;
  2089 + for (i = 0; i < base->n_req_heads; ++i) {
  2090 + struct request *req, *req_started_at;
  2091 + req = req_started_at = base->req_heads[i];
  2092 + while (req) {
  2093 + struct request *next = req->next;
  2094 + req->tx_count = req->reissue_count = 0;
  2095 + req->ns = NULL;
  2096 + /* ???? What to do about searches? */
  2097 + (void) evtimer_del(&req->timeout_event);
  2098 + req->trans_id = 0;
  2099 + req->transmit_me = 0;
  2100 +
  2101 + base->global_requests_waiting++;
  2102 + evdns_request_insert(req, &base->req_waiting_head);
  2103 + /* We want to insert these suspended elements at the front of
  2104 + * the waiting queue, since they were pending before any of
  2105 + * the waiting entries were added. This is a circular list,
  2106 + * so we can just shift the start back by one.*/
  2107 + base->req_waiting_head = base->req_waiting_head->prev;
  2108 +
  2109 + if (next == req_started_at)
  2110 + break;
  2111 + req = next;
  2112 + }
  2113 + base->req_heads[i] = NULL;
2100 2114 }
2101   - base->req_head = NULL;
  2115 +
2102 2116 base->global_requests_inflight = 0;
2103 2117
2104 2118 return 0;
@@ -2140,7 +2154,7 @@ _evdns_nameserver_add_impl(struct evdns_base *base, unsigned long int address, i
2140 2154 } while (server != started_at);
2141 2155 }
2142 2156
2143   - ns = (struct nameserver *) event_malloc(sizeof(struct nameserver));
  2157 + ns = (struct nameserver *) mm_malloc(sizeof(struct nameserver));
2144 2158 if (!ns) return -1;
2145 2159
2146 2160 memset(ns, 0, sizeof(struct nameserver));
@@ -2189,7 +2203,7 @@ _evdns_nameserver_add_impl(struct evdns_base *base, unsigned long int address, i
2189 2203 out2:
2190 2204 CLOSE_SOCKET(ns->socket);
2191 2205 out1:
2192   - event_free(ns);
  2206 + mm_free(ns);
2193 2207 log(EVDNS_LOG_WARN, "Unable to add nameserver %s: error %d", debug_ntoa(address), err);
2194 2208 return err;
2195 2209 }
@@ -2280,7 +2294,7 @@ request_new(struct evdns_base *base, int type, const char *name, int flags,
2280 2294 const u16 trans_id = issuing_now ? transaction_id_pick(base) : 0xffff;
2281 2295 /* the request data is alloced in a single block with the header */
2282 2296 struct request *const req =
2283   - (struct request *) event_malloc(sizeof(struct request) + request_max_len);
  2297 + (struct request *) mm_malloc(sizeof(struct request) + request_max_len);
2284 2298 int rlen;
2285 2299 (void) flags;
2286 2300
@@ -2307,7 +2321,7 @@ request_new(struct evdns_base *base, int type, const char *name, int flags,
2307 2321
2308 2322 return req;
2309 2323 err1:
2310   - event_free(req);
  2324 + mm_free(req);
2311 2325 return NULL;
2312 2326 }
2313 2327
@@ -2317,7 +2331,7 @@ request_submit(struct request *const req) {
2317 2331 if (req->ns) {
2318 2332 /* if it has a nameserver assigned then this is going */
2319 2333 /* straight into the inflight queue */
2320   - evdns_request_insert(req, &base->req_head);
  2334 + evdns_request_insert(req, &REQ_HEAD(base, req->trans_id));
2321 2335 base->global_requests_inflight++;
2322 2336 evdns_request_transmit(req);
2323 2337 } else {
@@ -2455,15 +2469,15 @@ search_state_decref(struct search_state *const state) {
2455 2469 struct search_domain *next, *dom;
2456 2470 for (dom = state->head; dom; dom = next) {
2457 2471 next = dom->next;
2458   - event_free(dom);
  2472 + mm_free(dom);
2459 2473 }
2460   - event_free(state);
  2474 + mm_free(state);
2461 2475 }
2462 2476 }
2463 2477
2464 2478 static struct search_state *
2465 2479 search_state_new(void) {
2466   - struct search_state *state = (struct search_state *) event_malloc(sizeof(struct search_state));
  2480 + struct search_state *state = (struct search_state *) mm_malloc(sizeof(struct search_state));
2467 2481 if (!state) return NULL;
2468 2482 memset(state, 0, sizeof(struct search_state));
2469 2483 state->refcount = 1;
@@ -2501,7 +2515,7 @@ search_postfix_add(struct evdns_base *base, const char *domain) {
2501 2515 if (!base->global_search_state) return;
2502 2516 base->global_search_state->num_domains++;
2503 2517
2504   - sdomain = (struct search_domain *) event_malloc(sizeof(struct search_domain) + domain_len);
  2518 + sdomain = (struct search_domain *) mm_malloc(sizeof(struct search_domain) + domain_len);
2505 2519 if (!sdomain) return;
2506 2520 memcpy( ((u8 *) sdomain) + sizeof(struct search_domain), domain, domain_len);
2507 2521 sdomain->next = base->global_search_state->head;
@@ -2572,7 +2586,7 @@ search_make_new(const struct search_state *const state, int n, const char *const
2572 2586 /* the actual postfix string is kept at the end of the structure */
2573 2587 const u8 *const postfix = ((u8 *) dom) + sizeof(struct search_domain);
2574 2588 const int postfix_len = dom->len;
2575   - char *const newname = (char *) event_malloc(base_len + need_to_append_dot + postfix_len + 1);
  2589 + char *const newname = (char *) mm_malloc(base_len + need_to_append_dot + postfix_len + 1);
2576 2590 if (!newname) return NULL;
2577 2591 memcpy(newname, base_name, base_len);
2578 2592 if (need_to_append_dot) newname[base_len] = '.';
@@ -2603,11 +2617,11 @@ search_request_new(struct evdns_base *base, int type, const char *const name, in
2603 2617 char *const new_name = search_make_new(base->global_search_state, 0, name);
2604 2618 if (!new_name) return 1;
2605 2619 req = request_new(base, type, new_name, flags, user_callback, user_arg);
2606   - event_free(new_name);
  2620 + mm_free(new_name);
2607 2621 if (!req) return 1;
2608 2622 req->search_index = 0;
2609 2623 }
2610   - req->search_origname = event_strdup(name);
  2624 + req->search_origname = mm_strdup(name);
2611 2625 req->search_state = base->global_search_state;
2612 2626 req->search_flags = flags;
2613 2627 base->global_search_state->refcount++;
@@ -2653,7 +2667,7 @@ search_try_next(struct request *const req) {
2653 2667 if (!new_name) return 1;
2654 2668 log(EVDNS_LOG_DEBUG, "Search: now trying %s (%d)", new_name, req->search_index);
2655 2669 newreq = request_new(base, req->request_type, new_name, req->search_flags, req->user_callback, req->user_pointer);
2656   - event_free(new_name);
  2670 + mm_free(new_name);
2657 2671 if (!newreq) return 1;
2658 2672 newreq->search_origname = req->search_origname;
2659 2673 req->search_origname = NULL;
@@ -2674,7 +2688,7 @@ search_request_finished(struct request *const req) {
2674 2688 req->search_state = NULL;
2675 2689 }
2676 2690 if (req->search_origname) {
2677   - event_free(req->search_origname);
  2691 + mm_free(req->search_origname);
2678 2692 req->search_origname = NULL;
2679 2693 }
2680 2694 }
@@ -2735,6 +2749,43 @@ strtoint_clipped(const char *const str, int min, int max)
2735 2749 return r;
2736 2750 }
2737 2751
  2752 +static int
  2753 +evdns_base_set_max_requests_inflight(struct evdns_base *base, int maxinflight)
  2754 +{
  2755 + int old_n_heads = base->n_req_heads, n_heads;
  2756 + struct request **old_heads = base->req_heads, **new_heads, *req;
  2757 + int i;
  2758 + if (maxinflight < 1)
  2759 + maxinflight = 1;
  2760 + n_heads = (maxinflight+4) / 5;
  2761 + assert(n_heads > 0);
  2762 + new_heads = mm_malloc(n_heads * sizeof(struct request*));
  2763 + if (!new_heads)
  2764 + return (-1);
  2765 + for (i=0; i < n_heads; ++i)
  2766 + new_heads[i] = NULL;
  2767 + if (old_heads) {
  2768 + for (i = 0; i < old_n_heads; ++i) {
  2769 + while (old_heads[i]) {
  2770 + req = old_heads[i];
  2771 + if (req->next == req) {
  2772 + old_heads[i] = NULL;
  2773 + } else {
  2774 + old_heads[i] = req->next;
  2775 + req->next->prev = req->prev;
  2776 + req->prev->next = req->next;
  2777 + }
  2778 + evdns_request_insert(req, &new_heads[req->trans_id % n_heads]);
  2779 + }
  2780 + }
  2781 + mm_free(old_heads);
  2782 + }
  2783 + base->req_heads = new_heads;
  2784 + base->n_req_heads = n_heads;
  2785 + base->global_max_requests_inflight = maxinflight;
  2786 + return (0);
  2787 +}
  2788 +
2738 2789 /* exported function */
2739 2790 int
2740 2791 evdns_base_set_option(struct evdns_base *base,
@@ -2767,7 +2818,7 @@ evdns_base_set_option(struct evdns_base *base,
2767 2818 if (!(flags & DNS_OPTION_MISC)) return 0;
2768 2819 log(EVDNS_LOG_DEBUG, "Setting maximum inflight requests to %d",
2769 2820 maxinflight);
2770   - base->global_max_requests_inflight = maxinflight;
  2821 + evdns_base_set_max_requests_inflight(base, maxinflight);
2771 2822 } else if (!strncmp(option, "attempts:", 9)) {
2772 2823 int retries = strtoint(val);
2773 2824 if (retries == -1) return -1;
@@ -2858,7 +2909,7 @@ evdns_base_resolv_conf_parse(struct evdns_base *base, int flags, const char *con
2858 2909 }
2859 2910 if (st.st_size > 65535) { err = 3; goto out1; } /* no resolv.conf should be any bigger */
2860 2911
2861   - resolv = (u8 *) event_malloc((size_t)st.st_size + 1);
  2912 + resolv = (u8 *) mm_malloc((size_t)st.st_size + 1);
2862 2913 if (!resolv) { err = 4; goto out1; }
2863 2914
2864 2915 n = 0;
@@ -2894,7 +2945,7 @@ evdns_base_resolv_conf_parse(struct evdns_base *base, int flags, const char *con
2894 2945 }
2895 2946
2896 2947 out2:
2897   - event_free(resolv);
  2948 + mm_free(resolv);
2898 2949 out1:
2899 2950 close(fd);
2900 2951 return err;
@@ -2918,12 +2969,12 @@ evdns_nameserver_ip_add_line(struct evdns_base *base, const char *ips) {
2918 2969 addr = ips;
2919 2970 while (ISDIGIT(*ips) || *ips == '.' || *ips == ':')
2920 2971 ++ips;
2921   - buf = event_malloc(ips-addr+1);
  2972 + buf = mm_malloc(ips-addr+1);
2922 2973 if (!buf) return 4;
2923 2974 memcpy(buf, addr, ips-addr);
2924 2975 buf[ips-addr] = '\0';
2925 2976 r = evdns_nameserver_ip_add(buf);
2926   - event_free(buf);
  2977 + mm_free(buf);
2927 2978 if (r) return r;
2928 2979 }
2929 2980 return 0;
@@ -2956,7 +3007,7 @@ load_nameservers_with_getnetworkparams(struct evdns_base *base)
2956 3007 goto done;
2957 3008 }
2958 3009
2959   - buf = event_malloc(size);
  3010 + buf = mm_malloc(size);
2960 3011 if (!buf) { status = 4; goto done; }
2961 3012 fixed = buf;
2962 3013 r = fn(fixed, &size);
@@ -2965,8 +3016,8 @@ load_nameservers_with_getnetworkparams(struct evdns_base *base)
2965 3016 goto done;
2966 3017 }
2967 3018 if (r != ERROR_SUCCESS) {
2968   - event_free(buf);
2969   - buf = event_malloc(size);
  3019 + mm_free(buf);
  3020 + buf = mm_malloc(size);
2970 3021 if (!buf) { status = 4; goto done; }
2971 3022 fixed = buf;
2972 3023 r = fn(fixed, &size);
@@ -3002,7 +3053,7 @@ load_nameservers_with_getnetworkparams(struct evdns_base *base)
3002 3053
3003 3054 done:
3004 3055 if (buf)
3005   - event_free(buf);
  3056 + mm_free(buf);
3006 3057 if (handle)
3007 3058 FreeLibrary(handle);
3008 3059 return status;
@@ -3018,7 +3069,7 @@ config_nameserver_from_reg_key(struct evdns_base *base, HKEY key, const char *su
3018 3069 if (RegQueryValueEx(key, subkey, 0, &type, NULL, &bufsz)
3019 3070 != ERROR_MORE_DATA)
3020 3071 return -1;
3021   - if (!(buf = event_malloc(bufsz)))
  3072 + if (!(buf = mm_malloc(bufsz)))
3022 3073 return -1;
3023 3074
3024 3075 if (RegQueryValueEx(key, subkey, 0, &type, (LPBYTE)buf, &bufsz)
@@ -3026,7 +3077,7 @@ config_nameserver_from_reg_key(struct evdns_base *base, HKEY key, const char *su
3026 3077 status = evdns_nameserver_ip_add_line(base,buf);
3027 3078 }
3028 3079
3029   - event_free(buf);
  3080 + mm_free(buf);
3030 3081 return status;
3031 3082 }
3032 3083
@@ -3101,16 +3152,21 @@ struct evdns_base *
3101 3152 evdns_base_new(struct event_base *event_base, int initialize_nameservers)
3102 3153 {
3103 3154 struct evdns_base *base;
3104   - base = event_malloc(sizeof(struct evdns_base));
  3155 + base = mm_malloc(sizeof(struct evdns_base));
3105 3156 if (base == NULL)
3106 3157 return (NULL);
3107 3158 memset(base, 0, sizeof(struct evdns_base));
3108   - base->req_head = base->req_waiting_head = NULL;
  3159 + base->req_waiting_head = NULL;
  3160 +
  3161 + /* Set max requests inflight and allocate req_heads. */
  3162 + base->req_heads = NULL;
  3163 + evdns_base_set_max_requests_inflight(base, 64);
  3164 +
3109 3165 base->server_head = NULL;
3110 3166 base->event_base = event_base;
3111 3167 base->global_good_nameservers = base->global_requests_inflight =
3112 3168 base->global_requests_waiting = 0;
3113   - base->global_max_requests_inflight = 64;
  3169 +
3114 3170 base->global_timeout.tv_sec = 5;
3115 3171 base->global_timeout.tv_usec = 0;
3116 3172 base->global_max_reissues = 1;
@@ -3168,11 +3224,14 @@ evdns_base_free(struct evdns_base *base, int fail_requests)
3168 3224 {
3169 3225 struct nameserver *server, *server_next;
3170 3226 struct search_domain *dom, *dom_next;
  3227 + int i;
3171 3228
3172   - while (base->req_head) {
3173   - if (fail_requests)
3174   - reply_callback(base->req_head, 0, DNS_ERR_SHUTDOWN, NULL);
3175   - request_finished(base->req_head, &base->req_head);
  3229 + for (i = 0; i < base->n_req_heads; ++i) {
  3230 + while (base->req_heads[i]) {
  3231 + if (fail_requests)
  3232 + reply_callback(base->req_heads[i], 0, DNS_ERR_SHUTDOWN, NULL);
  3233 + request_finished(base->req_heads[i], &REQ_HEAD(base, base->req_heads[i]->trans_id));
  3234 + }
3176 3235 }
3177 3236 while (base->req_waiting_head) {
3178 3237 if (fail_requests)
@@ -3188,7 +3247,7 @@ evdns_base_free(struct evdns_base *base, int fail_requests)
3188 3247 (void) event_del(&server->event);
3189 3248 if (server->state == 0)
3190 3249 (void) event_del(&server->timeout_event);
3191   - event_free(server);
  3250 + mm_free(server);
3192 3251 if (server_next == base->server_head)
3193 3252 break;
3194 3253 }
@@ -3198,12 +3257,12 @@ evdns_base_free(struct evdns_base *base, int fail_requests)
3198 3257 if (base->global_search_state) {
3199 3258 for (dom = base->global_search_state->head; dom; dom = dom_next) {
3200 3259 dom_next = dom->next;
3201   - event_free(dom);
  3260 + mm_free(dom);
3202 3261 }
3203   - event_free(base->global_search_state);
  3262 + mm_free(base->global_search_state);
3204 3263 base->global_search_state = NULL;
3205 3264 }
3206   - event_free(base);
  3265 + mm_free(base);
3207 3266 }
3208 3267
3209 3268 void
66 event.c
@@ -183,7 +183,7 @@ event_base_new(void)
183 183 int i;
184 184 struct event_base *base;
185 185
186   - if ((base = event_calloc(1, sizeof(struct event_base))) == NULL)
  186 + if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL)
187 187 event_err(1, "%s: calloc", __func__);
188 188
189 189 event_sigcb = NULL;
@@ -274,12 +274,12 @@ event_base_free(struct event_base *base)
274 274 min_heap_dtor(&base->timeheap);
275 275
276 276 for (i = 0; i < base->nactivequeues; ++i)
277   - event_free(base->activequeues[i]);
278   - event_free(base->activequeues);
  277 + mm_free(base->activequeues[i]);
  278 + mm_free(base->activequeues);
279 279
280 280 assert(TAILQ_EMPTY(&base->eventqueue));
281 281
282   - event_free(base);
  282 + mm_free(base);
283 283 }
284 284
285 285 /* reinitialized the event base after a fork */
@@ -326,21 +326,21 @@ event_base_priority_init(struct event_base *base, int npriorities)
326 326
327 327 if (base->nactivequeues && npriorities != base->nactivequeues) {
328 328 for (i = 0; i < base->nactivequeues; ++i) {
329   - event_free(base->activequeues[i]);
  329 + mm_free(base->activequeues[i]);
330 330 }
331   - event_free(base->activequeues);
  331 + mm_free(base->activequeues);
332 332 }
333 333
334 334 /* Allocate our priority queues */
335 335 base->nactivequeues = npriorities;
336   - base->activequeues = (struct event_list **)event_calloc(
  336 + base->activequeues = (struct event_list **)mm_calloc(
337 337 base->nactivequeues,
338 338 npriorities * sizeof(struct event_list *));
339 339 if (base->activequeues == NULL)
340 340 event_err(1, "%s: calloc", __func__);
341 341
342 342 for (i = 0; i < base->nactivequeues; ++i) {
343   - base->activequeues[i] = event_malloc(sizeof(struct event_list));
  343 + base->activequeues[i] = mm_malloc(sizeof(struct event_list));
344 344 if (base->activequeues[i] == NULL)
345 345 event_err(1, "%s: malloc", __func__);
346 346 TAILQ_INIT(base->activequeues[i]);
@@ -568,7 +568,7 @@ event_once_cb(evutil_socket_t fd, short events, void *arg)
568 568 struct event_once *eonce = arg;
569 569
570 570 (*eonce->cb)(fd, events, eonce->arg);
571   - event_free(eonce);
  571 + mm_free(eonce);
572 572 }
573 573
574 574 /* not threadsafe, event scheduled once. */
@@ -594,7 +594,7 @@ event_base_once(struct event_base *base, evutil_socket_t fd, short events,
594 594 if (events & EV_SIGNAL)
595 595 return (-1);
596 596
597   - if ((eonce = event_calloc(1, sizeof(struct event_once))) == NULL)
  597 + if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
598 598 return (-1);
599 599
600 600 eonce->cb = callback;
@@ -613,7 +613,7 @@ event_base_once(struct event_base *base, evutil_socket_t fd, short events,
613 613 event_set(&eonce->ev, fd, events, event_once_cb, eonce);
614 614 } else {
615 615 /* Bad event combination */
616   - event_free(eonce);
  616 + mm_free(eonce);
617 617 return (-1);
618 618 }
619 619
@@ -621,7 +621,7 @@ event_base_once(struct event_base *base, evutil_socket_t fd, short events,
621 621 if (res == 0)
622 622 res = event_add(&eonce->ev, tv);
623 623 if (res != 0) {
624   - event_free(eonce);
  624 + mm_free(eonce);
625 625 return (res);
626 626 }
627 627
@@ -1089,25 +1089,25 @@ event_get_method(void)
1089 1089 return (current_base->evsel->name);
1090 1090 }
1091 1091
1092   -static void *(*_event_malloc_fn)(size_t sz) = NULL;
1093   -static void *(*_event_realloc_fn)(void *p, size_t sz) = NULL;
1094   -static void (*_event_free_fn)(void *p) = NULL;
  1092 +static void *(*_mm_malloc_fn)(size_t sz) = NULL;
  1093 +static void *(*_mm_realloc_fn)(void *p, size_t sz) = NULL;
  1094 +static void (*_mm_free_fn)(void *p) = NULL;
1095 1095
1096 1096 void *
1097   -event_malloc(size_t sz)
  1097 +mm_malloc(size_t sz)
1098 1098 {
1099   - if (_event_malloc_fn)
1100   - return _event_malloc_fn(sz);
  1099 + if (_mm_malloc_fn)
  1100 + return _mm_malloc_fn(sz);
1101 1101 else
1102 1102 return malloc(sz);
1103 1103 }
1104 1104
1105 1105 void *
1106   -event_calloc(size_t count, size_t size)
  1106 +mm_calloc(size_t count, size_t size)
1107 1107 {
1108   - if (_event_malloc_fn) {
  1108 + if (_mm_malloc_fn) {
1109 1109 size_t sz = count * size;
1110   - void *p = _event_malloc_fn(sz);
  1110 + void *p = _mm_malloc_fn(sz);
1111 1111 if (p)
1112 1112 memset(p, 0, sz);
1113 1113 return p;
@@ -1116,11 +1116,11 @@ event_calloc(size_t count, size_t size)
1116 1116 }
1117 1117
1118 1118 char *
1119   -event_strdup(const char *str)
  1119 +mm_strdup(const char *str)
1120 1120 {
1121   - if (_event_malloc_fn) {
  1121 + if (_mm_malloc_fn) {
1122 1122 size_t ln = strlen(str);
1123   - void *p = _event_malloc_fn(ln+1);
  1123 + void *p = _mm_malloc_fn(ln+1);
1124 1124 if (p)
1125 1125 memcpy(p, str, ln+1);
1126 1126 return p;
@@ -1133,19 +1133,19 @@ event_strdup(const char *str)
1133 1133 }
1134 1134
1135 1135 void *
1136   -event_realloc(void *ptr, size_t sz)
  1136 +mm_realloc(void *ptr, size_t sz)
1137 1137 {
1138   - if (_event_realloc_fn)
1139   - return _event_realloc_fn(ptr, sz);
  1138 + if (_mm_realloc_fn)
  1139 + return _mm_realloc_fn(ptr, sz);
1140 1140 else
1141 1141 return realloc(ptr, sz);
1142 1142 }
1143 1143
1144 1144 void
1145   -event_free(void *ptr)
  1145 +mm_free(void *ptr)
1146 1146 {
1147   - if (_event_realloc_fn)
1148   - _event_free_fn(ptr);
  1147 + if (_mm_realloc_fn)
  1148 + _mm_free_fn(ptr);
1149 1149 else
1150 1150 free(ptr);
1151 1151 }
@@ -1155,9 +1155,9 @@ event_set_mem_functions(void *(*malloc_fn)(size_t sz),
1155 1155 void *(*realloc_fn)(void *ptr, size_t sz),
1156 1156 void (*free_fn)(void *ptr))
1157 1157 {
1158   - _event_malloc_fn = malloc_fn;
1159   - _event_realloc_fn = realloc_fn;
1160   - _event_free_fn = free_fn;
  1158 + _mm_malloc_fn = malloc_fn;
  1159 + _mm_realloc_fn = realloc_fn;
  1160 + _mm_free_fn = free_fn;
1161 1161 }
1162 1162
1163 1163 /* support for threading */
2  event_tagging.c
@@ -445,7 +445,7 @@ evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
445 445 tag != need_tag)
446 446 return (-1);
447 447
448   - *pstring = event_malloc(tag_len + 1);
  448 + *pstring = mm_malloc(tag_len + 1);
449 449 if (*pstring == NULL)
450 450 event_err(1, "%s: malloc", __func__);
451 451 evbuffer_remove(evbuf, *pstring, tag_len);
14 evport.c
@@ -147,21 +147,21 @@ evport_init(struct event_base *base)
147 147 if (getenv("EVENT_NOEVPORT"))
148 148 return (NULL);
149 149
150   - if (!(evpd = event_calloc(1, sizeof(struct evport_data))))
  150 + if (!(evpd = mm_calloc(1, sizeof(struct evport_data))))
151 151 return (NULL);
152 152
153 153 if ((evpd->ed_port = port_create()) == -1) {
154   - event_free(evpd);
  154 + mm_free(evpd);
155 155 return (NULL);
156 156 }
157 157
158 158 /*
159 159 * Initialize file descriptor structure
160 160 */
161   - evpd->ed_fds = event_calloc(DEFAULT_NFDS, sizeof(struct fd_info));
  161 + evpd->ed_fds = mm_calloc(DEFAULT_NFDS, sizeof(struct fd_info));
162 162 if (evpd->ed_fds == NULL) {
163 163 close(evpd->ed_port);
164   - event_free(evpd);
  164 + mm_free(evpd);
165 165 return (NULL);
166 166 }
167 167 evpd->ed_nevents = DEFAULT_NFDS;
@@ -242,7 +242,7 @@ grow(struct evport_data *epdp, int factor)
242 242
243 243 check_evportop(epdp);
244 244
245   - tmp = event_realloc(epdp->ed_fds, sizeof(struct fd_info) * newsize);
  245 + tmp = mm_realloc(epdp->ed_fds, sizeof(struct fd_info) * newsize);
246 246 if (NULL == tmp)
247 247 return -1;
248 248 epdp->ed_fds = tmp;
@@ -509,6 +509,6 @@ evport_dealloc(struct event_base *base, void *arg)
509 509 close(evpd->ed_port);