Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Fix all identifiers with names beginning with underscore.

These are reserved in C.  We'd been erroneously using them to indicate
internal use.

Instead, we now use a trailing underscore whenever we'd been using a leading
underscore.

This is an automatic conversion.  The script that produced was made by
running the following script over the output of

 git ls-tree -r --name-only HEAD | grep  '\.[ch]$' | \
   xargs ctags --c-kinds=defglmpstuvx -o - | grep '^_' | \
   cut -f 1 | sort| uniq

(GNU ctags was required.)

=====
#!/usr/bin/perl -w -n

use strict;

BEGIN { print "#!/usr/bin/perl -w -i -p\n\n"; }

chomp;

next if (/^__func__/ or
	 /^_FILE_OFFSET_BITS/ or
	 /^_FORTIFY_SOURCE/ or
	 /^_GNU_SOURCE/ or
	 /^_WIN32/ or
	 /^_DARWIN_UNLIMITED/ or
	 /^_FILE_OFFSET_BITS/ or
	 /^_LARGEFILE64_SOURCE/ or
	 /^_LFS64_LARGEFILE/ or
	 /^__cdecl/ or
	 /^__attribute__/ or
	 /^__func__/ or
         /^_SYS_TREE_H_/);

my $ident = $_;

my $better = $ident;
$better =~ s/^_//;

if ($ident !~ /EVENT_LOG_/) {
    $better = "${better}_";
}

print "s/(?<![A-Za-z0-9_])$ident(?![A-Za-z0-9_])/$better/g;\n";

=== And then running the script below that it generated over all
=== the .c and .h files again
#!/usr/bin/perl -w -i -p

s/(?<![A-Za-z0-9_])_ARC4_LOCK(?![A-Za-z0-9_])/ARC4_LOCK_/g;
s/(?<![A-Za-z0-9_])_ARC4_UNLOCK(?![A-Za-z0-9_])/ARC4_UNLOCK_/g;
s/(?<![A-Za-z0-9_])_bev_group_random_element(?![A-Za-z0-9_])/bev_group_random_element_/g;
s/(?<![A-Za-z0-9_])_bev_group_refill_callback(?![A-Za-z0-9_])/bev_group_refill_callback_/g;
s/(?<![A-Za-z0-9_])_bev_group_suspend_reading(?![A-Za-z0-9_])/bev_group_suspend_reading_/g;
s/(?<![A-Za-z0-9_])_bev_group_suspend_writing(?![A-Za-z0-9_])/bev_group_suspend_writing_/g;
s/(?<![A-Za-z0-9_])_bev_group_unsuspend_reading(?![A-Za-z0-9_])/bev_group_unsuspend_reading_/g;
s/(?<![A-Za-z0-9_])_bev_group_unsuspend_writing(?![A-Za-z0-9_])/bev_group_unsuspend_writing_/g;
s/(?<![A-Za-z0-9_])_bev_refill_callback(?![A-Za-z0-9_])/bev_refill_callback_/g;
s/(?<![A-Za-z0-9_])_bufferevent_add_event(?![A-Za-z0-9_])/bufferevent_add_event_/g;
s/(?<![A-Za-z0-9_])_bufferevent_cancel_all(?![A-Za-z0-9_])/bufferevent_cancel_all_/g;
s/(?<![A-Za-z0-9_])_bufferevent_decref_and_unlock(?![A-Za-z0-9_])/bufferevent_decref_and_unlock_/g;
s/(?<![A-Za-z0-9_])_bufferevent_decrement_read_buckets(?![A-Za-z0-9_])/bufferevent_decrement_read_buckets_/g;
s/(?<![A-Za-z0-9_])_bufferevent_decrement_write_buckets(?![A-Za-z0-9_])/bufferevent_decrement_write_buckets_/g;
s/(?<![A-Za-z0-9_])_bufferevent_del_generic_timeout_cbs(?![A-Za-z0-9_])/bufferevent_del_generic_timeout_cbs_/g;
s/(?<![A-Za-z0-9_])_bufferevent_generic_adj_timeouts(?![A-Za-z0-9_])/bufferevent_generic_adj_timeouts_/g;
s/(?<![A-Za-z0-9_])_bufferevent_get_read_max(?![A-Za-z0-9_])/bufferevent_get_read_max_/g;
s/(?<![A-Za-z0-9_])_bufferevent_get_rlim_max(?![A-Za-z0-9_])/bufferevent_get_rlim_max_/g;
s/(?<![A-Za-z0-9_])_bufferevent_get_write_max(?![A-Za-z0-9_])/bufferevent_get_write_max_/g;
s/(?<![A-Za-z0-9_])_bufferevent_incref_and_lock(?![A-Za-z0-9_])/bufferevent_incref_and_lock_/g;
s/(?<![A-Za-z0-9_])_bufferevent_init_generic_timeout_cbs(?![A-Za-z0-9_])/bufferevent_init_generic_timeout_cbs_/g;
s/(?<![A-Za-z0-9_])_bufferevent_ratelim_init(?![A-Za-z0-9_])/bufferevent_ratelim_init_/g;
s/(?<![A-Za-z0-9_])_bufferevent_run_eventcb(?![A-Za-z0-9_])/bufferevent_run_eventcb_/g;
s/(?<![A-Za-z0-9_])_bufferevent_run_readcb(?![A-Za-z0-9_])/bufferevent_run_readcb_/g;
s/(?<![A-Za-z0-9_])_bufferevent_run_writecb(?![A-Za-z0-9_])/bufferevent_run_writecb_/g;
s/(?<![A-Za-z0-9_])_ev(?![A-Za-z0-9_])/ev_/g;
s/(?<![A-Za-z0-9_])_evbuffer_chain_pin(?![A-Za-z0-9_])/evbuffer_chain_pin_/g;
s/(?<![A-Za-z0-9_])_evbuffer_chain_unpin(?![A-Za-z0-9_])/evbuffer_chain_unpin_/g;
s/(?<![A-Za-z0-9_])_evbuffer_decref_and_unlock(?![A-Za-z0-9_])/evbuffer_decref_and_unlock_/g;
s/(?<![A-Za-z0-9_])_evbuffer_expand_fast(?![A-Za-z0-9_])/evbuffer_expand_fast_/g;
s/(?<![A-Za-z0-9_])_evbuffer_incref(?![A-Za-z0-9_])/evbuffer_incref_/g;
s/(?<![A-Za-z0-9_])_evbuffer_incref_and_lock(?![A-Za-z0-9_])/evbuffer_incref_and_lock_/g;
s/(?<![A-Za-z0-9_])_EVBUFFER_IOVEC_IS_NATIVE(?![A-Za-z0-9_])/EVBUFFER_IOVEC_IS_NATIVE_/g;
s/(?<![A-Za-z0-9_])_evbuffer_overlapped_get_fd(?![A-Za-z0-9_])/evbuffer_overlapped_get_fd_/g;
s/(?<![A-Za-z0-9_])_evbuffer_overlapped_set_fd(?![A-Za-z0-9_])/evbuffer_overlapped_set_fd_/g;
s/(?<![A-Za-z0-9_])_evbuffer_read_setup_vecs(?![A-Za-z0-9_])/evbuffer_read_setup_vecs_/g;
s/(?<![A-Za-z0-9_])_evbuffer_validate(?![A-Za-z0-9_])/evbuffer_validate_/g;
s/(?<![A-Za-z0-9_])_evdns_log(?![A-Za-z0-9_])/evdns_log_/g;
s/(?<![A-Za-z0-9_])_evdns_nameserver_add_impl(?![A-Za-z0-9_])/evdns_nameserver_add_impl_/g;
s/(?<![A-Za-z0-9_])_EVENT_CONFIG_H_(?![A-Za-z0-9_])/EVENT_CONFIG_H__/g;
s/(?<![A-Za-z0-9_])_event_debug_assert_is_setup(?![A-Za-z0-9_])/event_debug_assert_is_setup_/g;
s/(?<![A-Za-z0-9_])_event_debug_assert_not_added(?![A-Za-z0-9_])/event_debug_assert_not_added_/g;
s/(?<![A-Za-z0-9_])_event_debug_get_logging_mask(?![A-Za-z0-9_])/event_debug_get_logging_mask_/g;
s/(?<![A-Za-z0-9_])_event_debug_logging_mask(?![A-Za-z0-9_])/event_debug_logging_mask_/g;
s/(?<![A-Za-z0-9_])_event_debug_map_lock(?![A-Za-z0-9_])/event_debug_map_lock_/g;
s/(?<![A-Za-z0-9_])_event_debug_mode_on(?![A-Za-z0-9_])/event_debug_mode_on_/g;
s/(?<![A-Za-z0-9_])_event_debug_note_add(?![A-Za-z0-9_])/event_debug_note_add_/g;
s/(?<![A-Za-z0-9_])_event_debug_note_del(?![A-Za-z0-9_])/event_debug_note_del_/g;
s/(?<![A-Za-z0-9_])_event_debug_note_setup(?![A-Za-z0-9_])/event_debug_note_setup_/g;
s/(?<![A-Za-z0-9_])_event_debug_note_teardown(?![A-Za-z0-9_])/event_debug_note_teardown_/g;
s/(?<![A-Za-z0-9_])_event_debugx(?![A-Za-z0-9_])/event_debugx_/g;
s/(?<![A-Za-z0-9_])_EVENT_DEFINED_LISTENTRY(?![A-Za-z0-9_])/EVENT_DEFINED_LISTENTRY_/g;
s/(?<![A-Za-z0-9_])_EVENT_DEFINED_TQENTRY(?![A-Za-z0-9_])/EVENT_DEFINED_TQENTRY_/g;
s/(?<![A-Za-z0-9_])_EVENT_DEFINED_TQHEAD(?![A-Za-z0-9_])/EVENT_DEFINED_TQHEAD_/g;
s/(?<![A-Za-z0-9_])_EVENT_DNS_USE_FTIME_FOR_ID(?![A-Za-z0-9_])/EVENT_DNS_USE_FTIME_FOR_ID_/g;
s/(?<![A-Za-z0-9_])_EVENT_ERR_ABORT(?![A-Za-z0-9_])/EVENT_ERR_ABORT_/g;
s/(?<![A-Za-z0-9_])_EVENT_EVCONFIG__PRIVATE_H(?![A-Za-z0-9_])/EVENT_EVCONFIG__PRIVATE_H_/g;
s/(?<![A-Za-z0-9_])_event_iocp_port_unlock_and_free(?![A-Za-z0-9_])/event_iocp_port_unlock_and_free_/g;
s/(?<![A-Za-z0-9_])_EVENT_LOG_DEBUG(?![A-Za-z0-9_])/EVENT_LOG_DEBUG/g;
s/(?<![A-Za-z0-9_])_EVENT_LOG_ERR(?![A-Za-z0-9_])/EVENT_LOG_ERR/g;
s/(?<![A-Za-z0-9_])_EVENT_LOG_MSG(?![A-Za-z0-9_])/EVENT_LOG_MSG/g;
s/(?<![A-Za-z0-9_])_EVENT_LOG_WARN(?![A-Za-z0-9_])/EVENT_LOG_WARN/g;
s/(?<![A-Za-z0-9_])_event_strlcpy(?![A-Za-z0-9_])/event_strlcpy_/g;
s/(?<![A-Za-z0-9_])_EVHTTP_REQ_UNKNOWN(?![A-Za-z0-9_])/EVHTTP_REQ_UNKNOWN_/g;
s/(?<![A-Za-z0-9_])_EVLOCK_SORTLOCKS(?![A-Za-z0-9_])/EVLOCK_SORTLOCKS_/g;
s/(?<![A-Za-z0-9_])_evrpc_hooks(?![A-Za-z0-9_])/evrpc_hooks_/g;
s/(?<![A-Za-z0-9_])_evsig_restore_handler(?![A-Za-z0-9_])/evsig_restore_handler_/g;
s/(?<![A-Za-z0-9_])_evsig_set_handler(?![A-Za-z0-9_])/evsig_set_handler_/g;
s/(?<![A-Za-z0-9_])_evthread_cond_fns(?![A-Za-z0-9_])/evthread_cond_fns_/g;
s/(?<![A-Za-z0-9_])_evthread_debug_get_real_lock(?![A-Za-z0-9_])/evthread_debug_get_real_lock_/g;
s/(?<![A-Za-z0-9_])_evthread_id_fn(?![A-Za-z0-9_])/evthread_id_fn_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_cond_alloc(?![A-Za-z0-9_])/evthreadimpl_cond_alloc_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_cond_free(?![A-Za-z0-9_])/evthreadimpl_cond_free_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_cond_signal(?![A-Za-z0-9_])/evthreadimpl_cond_signal_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_cond_wait(?![A-Za-z0-9_])/evthreadimpl_cond_wait_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_get_id(?![A-Za-z0-9_])/evthreadimpl_get_id_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_is_lock_debugging_enabled(?![A-Za-z0-9_])/evthreadimpl_is_lock_debugging_enabled_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_lock_alloc(?![A-Za-z0-9_])/evthreadimpl_lock_alloc_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_lock_free(?![A-Za-z0-9_])/evthreadimpl_lock_free_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_locking_enabled(?![A-Za-z0-9_])/evthreadimpl_locking_enabled_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_lock_lock(?![A-Za-z0-9_])/evthreadimpl_lock_lock_/g;
s/(?<![A-Za-z0-9_])_evthreadimpl_lock_unlock(?![A-Za-z0-9_])/evthreadimpl_lock_unlock_/g;
s/(?<![A-Za-z0-9_])_evthread_is_debug_lock_held(?![A-Za-z0-9_])/evthread_is_debug_lock_held_/g;
s/(?<![A-Za-z0-9_])_evthread_lock_debugging_enabled(?![A-Za-z0-9_])/evthread_lock_debugging_enabled_/g;
s/(?<![A-Za-z0-9_])_evthread_lock_fns(?![A-Za-z0-9_])/evthread_lock_fns_/g;
s/(?<![A-Za-z0-9_])_EVUTIL_NIL_CONDITION(?![A-Za-z0-9_])/EVUTIL_NIL_CONDITION_/g;
s/(?<![A-Za-z0-9_])_EVUTIL_NIL_STMT(?![A-Za-z0-9_])/EVUTIL_NIL_STMT_/g;
s/(?<![A-Za-z0-9_])_evutil_weakrand(?![A-Za-z0-9_])/evutil_weakrand_/g;
s/(?<![A-Za-z0-9_])_http_close_detection(?![A-Za-z0-9_])/http_close_detection_/g;
s/(?<![A-Za-z0-9_])_http_connection_test(?![A-Za-z0-9_])/http_connection_test_/g;
s/(?<![A-Za-z0-9_])_http_incomplete_test(?![A-Za-z0-9_])/http_incomplete_test_/g;
s/(?<![A-Za-z0-9_])_http_stream_in_test(?![A-Za-z0-9_])/http_stream_in_test_/g;
s/(?<![A-Za-z0-9_])_internal(?![A-Za-z0-9_])/internal_/g;
s/(?<![A-Za-z0-9_])_mm_free_fn(?![A-Za-z0-9_])/mm_free_fn_/g;
s/(?<![A-Za-z0-9_])_mm_malloc_fn(?![A-Za-z0-9_])/mm_malloc_fn_/g;
s/(?<![A-Za-z0-9_])_mm_realloc_fn(?![A-Za-z0-9_])/mm_realloc_fn_/g;
s/(?<![A-Za-z0-9_])_original_cond_fns(?![A-Za-z0-9_])/original_cond_fns_/g;
s/(?<![A-Za-z0-9_])_original_lock_fns(?![A-Za-z0-9_])/original_lock_fns_/g;
s/(?<![A-Za-z0-9_])_rpc_hook_ctx(?![A-Za-z0-9_])/rpc_hook_ctx_/g;
s/(?<![A-Za-z0-9_])_SYS_QUEUE_H_(?![A-Za-z0-9_])/SYS_QUEUE_H__/g;
s/(?<![A-Za-z0-9_])_t(?![A-Za-z0-9_])/t_/g;
s/(?<![A-Za-z0-9_])_t32(?![A-Za-z0-9_])/t32_/g;
s/(?<![A-Za-z0-9_])_test_ai_eq(?![A-Za-z0-9_])/test_ai_eq_/g;
s/(?<![A-Za-z0-9_])_URI_ADD(?![A-Za-z0-9_])/URI_ADD_/g;
s/(?<![A-Za-z0-9_])_URI_FREE_STR(?![A-Za-z0-9_])/URI_FREE_STR_/g;
s/(?<![A-Za-z0-9_])_URI_SET_STR(?![A-Za-z0-9_])/URI_SET_STR_/g;
s/(?<![A-Za-z0-9_])_warn_helper(?![A-Za-z0-9_])/warn_helper_/g;
  • Loading branch information...
commit cb9da0bf38c18e6406bbba4f5373fb41e4dac2f5 1 parent 639383a
Nick Mathewson authored
Showing with 709 additions and 709 deletions.
  1. +2 −2 WIN32-Code/evconfig-private.h
  2. +3 −3 WIN32-Code/event2/event-config.h
  3. +8 −8 arc4random.c
  4. +71 −71 buffer.c
  5. +11 −11 buffer_iocp.c
  6. +16 −16 bufferevent-internal.h
  7. +28 −28 bufferevent.c
  8. +35 −35 bufferevent_async.c
  9. +16 −16 bufferevent_filter.c
  10. +33 −33 bufferevent_openssl.c
  11. +10 −10 bufferevent_pair.c
  12. +40 −40 bufferevent_ratelim.c
  13. +22 −22 bufferevent_sock.c
  14. +3 −3 compat/sys/queue.h
  15. +7 −7 evbuffer-internal.h
  16. +25 −25 evdns.c
  17. +9 −9 event-internal.h
  18. +76 −76 event.c
  19. +2 −2 event_iocp.c
  20. +3 −3 evrpc-internal.h
  21. +4 −4 evrpc.c
  22. +77 −77 evthread-internal.h
  23. +80 −80 evthread.c
  24. +1 −1  evutil.c
  25. +4 −4 evutil_rand.c
  26. +1 −1  http-internal.h
  27. +24 −24 http.c
  28. +2 −2 include/event2/buffer.h
  29. +6 −6 include/event2/event.h
  30. +9 −9 include/event2/event_struct.h
  31. +4 −4 include/event2/keyvalq_struct.h
  32. +1 −1  include/event2/util.h
  33. +2 −2 iocp-internal.h
  34. +2 −2 kqueue.c
  35. +8 −8 log-internal.h
  36. +20 −20 log.c
  37. +5 −5 signal.c
  38. +2 −2 strlcpy-internal.h
  39. +1 −1  strlcpy.c
  40. +2 −2 test/regress.h
  41. +3 −3 test/regress_buffer.c
  42. +12 −12 test/regress_http.c
  43. +3 −3 test/regress_rpc.c
  44. +11 −11 test/regress_util.c
  45. +5 −5 util-internal.h
View
4 WIN32-Code/evconfig-private.h
@@ -1,5 +1,5 @@
-#if !defined(_EVENT_EVCONFIG__PRIVATE_H) && !defined(__MINGW32__)
-#define _EVENT_EVCONFIG__PRIVATE_H
+#if !defined(EVENT_EVCONFIG__PRIVATE_H_) && !defined(__MINGW32__)
+#define EVENT_EVCONFIG__PRIVATE_H_
/* Nothing to see here. Move along. */
View
6 WIN32-Code/event2/event-config.h
@@ -7,8 +7,8 @@
*
* Do not rely on macros in this file existing in later versions.
*/
-#ifndef _EVENT_CONFIG_H_
-#define _EVENT_CONFIG_H_
+#ifndef EVENT_CONFIG_H__
+#define EVENT_CONFIG_H__
/* config.h. Generated by configure. */
/* config.h.in. Generated from configure.in by autoheader. */
@@ -23,7 +23,7 @@
/* Define is no secure id variant is available */
/* #define _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID 1 */
-#define _EVENT_DNS_USE_FTIME_FOR_ID 1
+#define EVENT_DNS_USE_FTIME_FOR_ID_ 1
/* Define to 1 if you have the <arpa/inet.h> header file. */
/* #undef EVENT__HAVE_ARPA_INET_H */
View
16 arc4random.c
@@ -438,9 +438,9 @@ ARC4RANDOM_EXPORT int
arc4random_stir(void)
{
int val;
- _ARC4_LOCK();
+ ARC4_LOCK_();
val = arc4_stir();
- _ARC4_UNLOCK();
+ ARC4_UNLOCK_();
return val;
}
#endif
@@ -450,7 +450,7 @@ ARC4RANDOM_EXPORT void
arc4random_addrandom(const unsigned char *dat, int datlen)
{
int j;
- _ARC4_LOCK();
+ ARC4_LOCK_();
if (!rs_initialized)
arc4_stir();
for (j = 0; j < datlen; j += 256) {
@@ -460,7 +460,7 @@ arc4random_addrandom(const unsigned char *dat, int datlen)
* crazy like passing us all the files in /var/log. */
arc4_addrandom(dat + j, datlen - j);
}
- _ARC4_UNLOCK();
+ ARC4_UNLOCK_();
}
#endif
@@ -469,11 +469,11 @@ ARC4RANDOM_EXPORT ARC4RANDOM_UINT32
arc4random(void)
{
ARC4RANDOM_UINT32 val;
- _ARC4_LOCK();
+ ARC4_LOCK_();
arc4_count -= 4;
arc4_stir_if_needed();
val = arc4_getword();
- _ARC4_UNLOCK();
+ ARC4_UNLOCK_();
return val;
}
#endif
@@ -482,14 +482,14 @@ ARC4RANDOM_EXPORT void
arc4random_buf(void *_buf, size_t n)
{
unsigned char *buf = _buf;
- _ARC4_LOCK();
+ ARC4_LOCK_();
arc4_stir_if_needed();
while (n--) {
if (--arc4_count <= 0)
arc4_stir();
buf[n] = arc4_getbyte();
}
- _ARC4_UNLOCK();
+ ARC4_UNLOCK_();
}
#ifndef ARC4RANDOM_NOUNIFORM
View
142 buffer.c
@@ -135,8 +135,8 @@
/* evbuffer_ptr support */
#define PTR_NOT_FOUND(ptr) do { \
(ptr)->pos = -1; \
- (ptr)->_internal.chain = NULL; \
- (ptr)->_internal.pos_in_chain = 0; \
+ (ptr)->internal_.chain = NULL; \
+ (ptr)->internal_.pos_in_chain = 0; \
} while (0)
static void evbuffer_chain_align(struct evbuffer_chain *chain);
@@ -237,7 +237,7 @@ evbuffer_chain_free(struct evbuffer_chain *chain)
EVUTIL_ASSERT(info->parent != NULL);
EVBUFFER_LOCK(info->source);
evbuffer_chain_free(info->parent);
- _evbuffer_decref_and_unlock(info->source);
+ evbuffer_decref_and_unlock_(info->source);
}
mm_free(chain);
@@ -328,14 +328,14 @@ evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
}
void
-_evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag)
+evbuffer_chain_pin_(struct evbuffer_chain *chain, unsigned flag)
{
EVUTIL_ASSERT((chain->flags & flag) == 0);
chain->flags |= flag;
}
void
-_evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag)
+evbuffer_chain_unpin_(struct evbuffer_chain *chain, unsigned flag)
{
EVUTIL_ASSERT((chain->flags & flag) != 0);
chain->flags &= ~flag;
@@ -384,7 +384,7 @@ evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
}
void
-_evbuffer_incref(struct evbuffer *buf)
+evbuffer_incref_(struct evbuffer *buf)
{
EVBUFFER_LOCK(buf);
++buf->refcnt;
@@ -392,7 +392,7 @@ _evbuffer_incref(struct evbuffer *buf)
}
void
-_evbuffer_incref_and_lock(struct evbuffer *buf)
+evbuffer_incref_and_lock_(struct evbuffer *buf)
{
EVBUFFER_LOCK(buf);
++buf->refcnt;
@@ -511,7 +511,7 @@ evbuffer_invoke_callbacks(struct evbuffer *buffer)
if (buffer->deferred_cbs) {
if (buffer->deferred.queued)
return;
- _evbuffer_incref_and_lock(buffer);
+ evbuffer_incref_and_lock_(buffer);
if (buffer->parent)
bufferevent_incref(buffer->parent);
EVBUFFER_UNLOCK(buffer);
@@ -532,7 +532,7 @@ evbuffer_deferred_callback(struct deferred_cb *cb, void *arg)
EVBUFFER_LOCK(buffer);
parent = buffer->parent;
evbuffer_run_callbacks(buffer, 1);
- _evbuffer_decref_and_unlock(buffer);
+ evbuffer_decref_and_unlock_(buffer);
if (parent)
bufferevent_decref(parent);
}
@@ -549,7 +549,7 @@ evbuffer_remove_all_callbacks(struct evbuffer *buffer)
}
void
-_evbuffer_decref_and_unlock(struct evbuffer *buffer)
+evbuffer_decref_and_unlock_(struct evbuffer *buffer)
{
struct evbuffer_chain *chain, *next;
ASSERT_EVBUFFER_LOCKED(buffer);
@@ -579,7 +579,7 @@ void
evbuffer_free(struct evbuffer *buffer)
{
EVBUFFER_LOCK(buffer);
- _evbuffer_decref_and_unlock(buffer);
+ evbuffer_decref_and_unlock_(buffer);
}
void
@@ -636,13 +636,13 @@ evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec
to_alloc += vec[n].iov_len;
}
- if (_evbuffer_expand_fast(buf, to_alloc, 2) < 0) {
+ if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) {
goto done;
}
for (n = 0; n < n_vec; n++) {
/* XXX each 'add' call here does a bunch of setup that's
- * obviated by _evbuffer_expand_fast, and some cleanup that we
+ * obviated by evbuffer_expand_fast_, and some cleanup that we
* would like to do only once. Instead we should just extract
* the part of the code that's needed. */
@@ -679,9 +679,9 @@ evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
n = 1;
} else {
- if (_evbuffer_expand_fast(buf, size, n_vecs)<0)
+ if (evbuffer_expand_fast_(buf, size, n_vecs)<0)
goto done;
- n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs,
+ n = evbuffer_read_setup_vecs_(buf, size, vec, n_vecs,
&chainp, 0);
}
@@ -911,7 +911,7 @@ APPEND_CHAIN_MULTICAST(struct evbuffer *dst, struct evbuffer *src)
/* reference evbuffer containing source chain so it
* doesn't get released while the chain is still
* being referenced to */
- _evbuffer_incref(src);
+ evbuffer_incref_(src);
extra->source = src;
/* reference source chain which now becomes immutable */
evbuffer_chain_incref(chain);
@@ -1181,8 +1181,8 @@ evbuffer_copyout_from(struct evbuffer *buf, const struct evbuffer_ptr *pos,
EVBUFFER_LOCK(buf);
if (pos) {
- chain = pos->_internal.chain;
- pos_in_chain = pos->_internal.pos_in_chain;
+ chain = pos->internal_.chain;
+ pos_in_chain = pos->internal_.pos_in_chain;
if (datlen + pos->pos > buf->total_len)
datlen = buf->total_len - pos->pos;
} else {
@@ -1446,14 +1446,14 @@ evbuffer_readline(struct evbuffer *buffer)
static inline ev_ssize_t
evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
{
- struct evbuffer_chain *chain = it->_internal.chain;
- size_t i = it->_internal.pos_in_chain;
+ struct evbuffer_chain *chain = it->internal_.chain;
+ size_t i = it->internal_.pos_in_chain;
while (chain != NULL) {
char *buffer = (char *)chain->buffer + chain->misalign;
char *cp = memchr(buffer+i, chr, chain->off-i);
if (cp) {
- it->_internal.chain = chain;
- it->_internal.pos_in_chain = cp - buffer;
+ it->internal_.chain = chain;
+ it->internal_.pos_in_chain = cp - buffer;
it->pos += (cp - buffer - i);
return it->pos;
}
@@ -1495,14 +1495,14 @@ find_eol_char(char *s, size_t len)
static ev_ssize_t
evbuffer_find_eol_char(struct evbuffer_ptr *it)
{
- struct evbuffer_chain *chain = it->_internal.chain;
- size_t i = it->_internal.pos_in_chain;
+ struct evbuffer_chain *chain = it->internal_.chain;
+ size_t i = it->internal_.pos_in_chain;
while (chain != NULL) {
char *buffer = (char *)chain->buffer + chain->misalign;
char *cp = find_eol_char(buffer+i, chain->off-i);
if (cp) {
- it->_internal.chain = chain;
- it->_internal.pos_in_chain = cp - buffer;
+ it->internal_.chain = chain;
+ it->internal_.pos_in_chain = cp - buffer;
it->pos += (cp - buffer) - i;
return it->pos;
}
@@ -1519,8 +1519,8 @@ evbuffer_strspn(
struct evbuffer_ptr *ptr, const char *chrset)
{
int count = 0;
- struct evbuffer_chain *chain = ptr->_internal.chain;
- size_t i = ptr->_internal.pos_in_chain;
+ struct evbuffer_chain *chain = ptr->internal_.chain;
+ size_t i = ptr->internal_.pos_in_chain;
if (!chain)
return 0;
@@ -1533,8 +1533,8 @@ evbuffer_strspn(
if (buffer[i] == *p++)
goto next;
}
- ptr->_internal.chain = chain;
- ptr->_internal.pos_in_chain = i;
+ ptr->internal_.chain = chain;
+ ptr->internal_.pos_in_chain = i;
ptr->pos += count;
return count;
next:
@@ -1543,8 +1543,8 @@ evbuffer_strspn(
i = 0;
if (! chain->next) {
- ptr->_internal.chain = chain;
- ptr->_internal.pos_in_chain = i;
+ ptr->internal_.chain = chain;
+ ptr->internal_.pos_in_chain = i;
ptr->pos += count;
return count;
}
@@ -1557,8 +1557,8 @@ evbuffer_strspn(
static inline int
evbuffer_getchr(struct evbuffer_ptr *it)
{
- struct evbuffer_chain *chain = it->_internal.chain;
- size_t off = it->_internal.pos_in_chain;
+ struct evbuffer_chain *chain = it->internal_.chain;
+ size_t off = it->internal_.pos_in_chain;
if (chain == NULL)
return -1;
@@ -1576,7 +1576,7 @@ evbuffer_search_eol(struct evbuffer *buffer,
int ok = 0;
/* Avoid locking in trivial edge cases */
- if (start && start->_internal.chain == NULL) {
+ if (start && start->internal_.chain == NULL) {
PTR_NOT_FOUND(&it);
if (eol_len_out)
*eol_len_out = extra_drain;
@@ -1589,8 +1589,8 @@ evbuffer_search_eol(struct evbuffer *buffer,
memcpy(&it, start, sizeof(it));
} else {
it.pos = 0;
- it._internal.chain = buffer->first;
- it._internal.pos_in_chain = 0;
+ it.internal_.chain = buffer->first;
+ it.internal_.pos_in_chain = 0;
}
/* the eol_style determines our first stop character and how many
@@ -1997,7 +1997,7 @@ evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
/* Make sure that datlen bytes are available for writing in the last n
* chains. Never copies or moves data. */
int
-_evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n)
+evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n)
{
struct evbuffer_chain *chain = buf->last, *tmp, *next;
size_t avail;
@@ -2168,7 +2168,7 @@ evbuffer_expand(struct evbuffer *buf, size_t datlen)
@return The number of buffers we're using.
*/
int
-_evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
+evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch,
struct evbuffer_iovec *vecs, int n_vecs_avail,
struct evbuffer_chain ***chainp, int exact)
{
@@ -2253,19 +2253,19 @@ evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
#ifdef USE_IOVEC_IMPL
/* Since we can use iovecs, we're willing to use the last
* NUM_READ_IOVEC chains. */
- if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) {
+ if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) {
result = -1;
goto done;
} else {
IOV_TYPE vecs[NUM_READ_IOVEC];
-#ifdef _EVBUFFER_IOVEC_IS_NATIVE
- nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs,
+#ifdef EVBUFFER_IOVEC_IS_NATIVE_
+ nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs,
NUM_READ_IOVEC, &chainp, 1);
#else
/* We aren't using the native struct iovec. Therefore,
we are on win32. */
struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
- nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2,
+ nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2,
&chainp, 1);
for (i=0; i < nvecs; ++i)
@@ -2537,8 +2537,8 @@ evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
{
if (howfar > (size_t)pos->pos)
return -1;
- if (pos->_internal.chain && howfar <= pos->_internal.pos_in_chain) {
- pos->_internal.pos_in_chain -= howfar;
+ if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) {
+ pos->internal_.pos_in_chain -= howfar;
pos->pos -= howfar;
return 0;
} else {
@@ -2568,9 +2568,9 @@ evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
case EVBUFFER_PTR_ADD:
/* this avoids iterating over all previous chains if
we just want to advance the position */
- chain = pos->_internal.chain;
+ chain = pos->internal_.chain;
pos->pos += position;
- position = pos->_internal.pos_in_chain;
+ position = pos->internal_.pos_in_chain;
break;
}
@@ -2580,12 +2580,12 @@ evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
position = 0;
}
if (chain) {
- pos->_internal.chain = chain;
- pos->_internal.pos_in_chain = position + left;
+ pos->internal_.chain = chain;
+ pos->internal_.pos_in_chain = position + left;
} else if (left == 0) {
/* The first byte in the (nonexistent) chain after the last chain */
- pos->_internal.chain = NULL;
- pos->_internal.pos_in_chain = 0;
+ pos->internal_.chain = NULL;
+ pos->internal_.pos_in_chain = 0;
} else {
PTR_NOT_FOUND(pos);
result = -1;
@@ -2613,8 +2613,8 @@ evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
if (pos->pos + len > buf->total_len)
return -1;
- chain = pos->_internal.chain;
- position = pos->_internal.pos_in_chain;
+ chain = pos->internal_.chain;
+ position = pos->internal_.pos_in_chain;
while (len && chain) {
size_t n_comparable;
if (len + position > chain->off)
@@ -2652,15 +2652,15 @@ evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, con
if (start) {
memcpy(&pos, start, sizeof(pos));
- chain = pos._internal.chain;
+ chain = pos.internal_.chain;
} else {
pos.pos = 0;
- chain = pos._internal.chain = buffer->first;
- pos._internal.pos_in_chain = 0;
+ chain = pos.internal_.chain = buffer->first;
+ pos.internal_.pos_in_chain = 0;
}
if (end)
- last_chain = end->_internal.chain;
+ last_chain = end->internal_.chain;
if (!len || len > EV_SSIZE_MAX)
goto done;
@@ -2670,12 +2670,12 @@ evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, con
while (chain) {
const unsigned char *start_at =
chain->buffer + chain->misalign +
- pos._internal.pos_in_chain;
+ pos.internal_.pos_in_chain;
p = memchr(start_at, first,
- chain->off - pos._internal.pos_in_chain);
+ chain->off - pos.internal_.pos_in_chain);
if (p) {
pos.pos += p - start_at;
- pos._internal.pos_in_chain += p - start_at;
+ pos.internal_.pos_in_chain += p - start_at;
if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
if (end && pos.pos + (ev_ssize_t)len > end->pos)
goto not_found;
@@ -2683,17 +2683,17 @@ evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, con
goto done;
}
++pos.pos;
- ++pos._internal.pos_in_chain;
- if (pos._internal.pos_in_chain == chain->off) {
- chain = pos._internal.chain = chain->next;
- pos._internal.pos_in_chain = 0;
+ ++pos.internal_.pos_in_chain;
+ if (pos.internal_.pos_in_chain == chain->off) {
+ chain = pos.internal_.chain = chain->next;
+ pos.internal_.pos_in_chain = 0;
}
} else {
if (chain == last_chain)
goto not_found;
- pos.pos += chain->off - pos._internal.pos_in_chain;
- chain = pos._internal.chain = chain->next;
- pos._internal.pos_in_chain = 0;
+ pos.pos += chain->off - pos.internal_.pos_in_chain;
+ chain = pos.internal_.chain = chain->next;
+ pos.internal_.pos_in_chain = 0;
}
}
@@ -2714,19 +2714,19 @@ evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
ev_ssize_t len_so_far = 0;
/* Avoid locking in trivial edge cases */
- if (start_at && start_at->_internal.chain == NULL)
+ if (start_at && start_at->internal_.chain == NULL)
return 0;
EVBUFFER_LOCK(buffer);
if (start_at) {
- chain = start_at->_internal.chain;
+ chain = start_at->internal_.chain;
len_so_far = chain->off
- - start_at->_internal.pos_in_chain;
+ - start_at->internal_.pos_in_chain;
idx = 1;
if (n_vec > 0) {
vec[0].iov_base = chain->buffer + chain->misalign
- + start_at->_internal.pos_in_chain;
+ + start_at->internal_.pos_in_chain;
vec[0].iov_len = len_so_far;
}
chain = chain->next;
View
22 buffer_iocp.c
@@ -88,7 +88,7 @@ pin_release(struct evbuffer_overlapped *eo, unsigned flag)
for (i = 0; i < eo->n_buffers; ++i) {
EVUTIL_ASSERT(chain);
next = chain->next;
- _evbuffer_chain_unpin(chain, flag);
+ evbuffer_chain_unpin_(chain, flag);
chain = next;
}
}
@@ -131,7 +131,7 @@ evbuffer_commit_read(struct evbuffer *evbuf, ev_ssize_t nBytes)
evbuffer_invoke_callbacks(evbuf);
- _evbuffer_decref_and_unlock(evbuf);
+ evbuffer_decref_and_unlock_(evbuf);
}
void
@@ -145,7 +145,7 @@ evbuffer_commit_write(struct evbuffer *evbuf, ev_ssize_t nBytes)
evbuffer_drain(evbuf, nBytes);
pin_release(buf,EVBUFFER_MEM_PINNED_W);
buf->write_in_progress = 0;
- _evbuffer_decref_and_unlock(evbuf);
+ evbuffer_decref_and_unlock_(evbuf);
}
struct evbuffer *
@@ -204,7 +204,7 @@ evbuffer_launch_write(struct evbuffer *buf, ev_ssize_t at_most,
for (i=0; i < MAX_WSABUFS && chain; ++i, chain=chain->next) {
WSABUF *b = &buf_o->buffers[i];
b->buf = (char*)( chain->buffer + chain->misalign );
- _evbuffer_chain_pin(chain, EVBUFFER_MEM_PINNED_W);
+ evbuffer_chain_pin_(chain, EVBUFFER_MEM_PINNED_W);
if ((size_t)at_most > chain->off) {
/* XXXX Cast is safe for now, since win32 has no
@@ -221,7 +221,7 @@ evbuffer_launch_write(struct evbuffer *buf, ev_ssize_t at_most,
}
buf_o->n_buffers = i;
- _evbuffer_incref(buf);
+ evbuffer_incref_(buf);
if (WSASend(buf_o->fd, buf_o->buffers, i, &bytesSent, 0,
&ol->overlapped, NULL)) {
int error = WSAGetLastError();
@@ -265,11 +265,11 @@ evbuffer_launch_read(struct evbuffer *buf, size_t at_most,
buf_o->n_buffers = 0;
memset(buf_o->buffers, 0, sizeof(buf_o->buffers));
- if (_evbuffer_expand_fast(buf, at_most, MAX_WSABUFS) == -1)
+ if (evbuffer_expand_fast_(buf, at_most, MAX_WSABUFS) == -1)
goto done;
evbuffer_freeze(buf, 0);
- nvecs = _evbuffer_read_setup_vecs(buf, at_most,
+ nvecs = evbuffer_read_setup_vecs_(buf, at_most,
vecs, MAX_WSABUFS, &chainp, 1);
for (i=0;i<nvecs;++i) {
WSABUF_FROM_EVBUFFER_IOV(
@@ -282,12 +282,12 @@ evbuffer_launch_read(struct evbuffer *buf, size_t at_most,
npin=0;
for ( ; chain; chain = chain->next) {
- _evbuffer_chain_pin(chain, EVBUFFER_MEM_PINNED_R);
+ evbuffer_chain_pin_(chain, EVBUFFER_MEM_PINNED_R);
++npin;
}
EVUTIL_ASSERT(npin == nvecs);
- _evbuffer_incref(buf);
+ evbuffer_incref_(buf);
if (WSARecv(buf_o->fd, buf_o->buffers, nvecs, &bytesRead, &flags,
&ol->overlapped, NULL)) {
int error = WSAGetLastError();
@@ -308,14 +308,14 @@ evbuffer_launch_read(struct evbuffer *buf, size_t at_most,
}
evutil_socket_t
-_evbuffer_overlapped_get_fd(struct evbuffer *buf)
+evbuffer_overlapped_get_fd_(struct evbuffer *buf)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
return buf_o ? buf_o->fd : -1;
}
void
-_evbuffer_overlapped_set_fd(struct evbuffer *buf, evutil_socket_t fd)
+evbuffer_overlapped_set_fd_(struct evbuffer *buf, evutil_socket_t fd)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
EVBUFFER_LOCK(buf);
View
32 bufferevent-internal.h
@@ -321,27 +321,27 @@ int bufferevent_enable_locking(struct bufferevent *bufev, void *lock);
void bufferevent_incref(struct bufferevent *bufev);
/** Internal: Lock bufev and increase its reference count.
* unlocking it otherwise. */
-void _bufferevent_incref_and_lock(struct bufferevent *bufev);
+void bufferevent_incref_and_lock_(struct bufferevent *bufev);
/** Internal: Decrement the reference count on bufev. Returns 1 if it freed
* the bufferevent.*/
int bufferevent_decref(struct bufferevent *bufev);
/** Internal: Drop the reference count on bufev, freeing as necessary, and
* unlocking it otherwise. Returns 1 if it freed the bufferevent. */
-int _bufferevent_decref_and_unlock(struct bufferevent *bufev);
+int bufferevent_decref_and_unlock_(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have a read callback, schedule
* a readcb. Otherwise just run the readcb. */
-void _bufferevent_run_readcb(struct bufferevent *bufev);
+void bufferevent_run_readcb_(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have a write callback, schedule
* a writecb. Otherwise just run the writecb. */
-void _bufferevent_run_writecb(struct bufferevent *bufev);
+void bufferevent_run_writecb_(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have an eventcb, schedule
* it to run with events "what". Otherwise just run the eventcb. */
-void _bufferevent_run_eventcb(struct bufferevent *bufev, short what);
+void bufferevent_run_eventcb_(struct bufferevent *bufev, short what);
/** Internal: Add the event 'ev' with timeout tv, unless tv is set to 0, in
* which case add ev with no timeout. */
-int _bufferevent_add_event(struct event *ev, const struct timeval *tv);
+int bufferevent_add_event_(struct event *ev, const struct timeval *tv);
/* =========
* These next functions implement timeouts for bufferevents that aren't doing
@@ -350,15 +350,15 @@ int _bufferevent_add_event(struct event *ev, const struct timeval *tv);
/** Internal use: Set up the ev_read and ev_write callbacks so that
* the other "generic_timeout" functions will work on it. Call this from
* the constructor function. */
-void _bufferevent_init_generic_timeout_cbs(struct bufferevent *bev);
+void bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev);
/** Internal use: Delete the ev_read and ev_write callbacks if they're pending.
* Call this from the destructor function. */
-int _bufferevent_del_generic_timeout_cbs(struct bufferevent *bev);
+int bufferevent_del_generic_timeout_cbs_(struct bufferevent *bev);
/** Internal use: Add or delete the generic timeout events as appropriate.
* (If an event is enabled and a timeout is set, we add the event. Otherwise
* we delete it.) Call this from anything that changes the timeout values,
* that enabled EV_READ or EV_WRITE, or that disables EV_READ or EV_WRITE. */
-int _bufferevent_generic_adj_timeouts(struct bufferevent *bev);
+int bufferevent_generic_adj_timeouts_(struct bufferevent *bev);
/** Internal use: We have just successfully read data into an inbuf, so
* reset the read timeout (if any). */
@@ -385,8 +385,8 @@ int _bufferevent_generic_adj_timeouts(struct bufferevent *bev);
#define BEV_UPCAST(b) EVUTIL_UPCAST((b), struct bufferevent_private, bev)
#ifdef EVENT__DISABLE_THREAD_SUPPORT
-#define BEV_LOCK(b) _EVUTIL_NIL_STMT
-#define BEV_UNLOCK(b) _EVUTIL_NIL_STMT
+#define BEV_LOCK(b) EVUTIL_NIL_STMT_
+#define BEV_UNLOCK(b) EVUTIL_NIL_STMT_
#else
/** Internal: Grab the lock (if any) on a bufferevent */
#define BEV_LOCK(b) do { \
@@ -404,14 +404,14 @@ int _bufferevent_generic_adj_timeouts(struct bufferevent *bev);
/* ==== For rate-limiting. */
-int _bufferevent_decrement_write_buckets(struct bufferevent_private *bev,
+int bufferevent_decrement_write_buckets_(struct bufferevent_private *bev,
ev_ssize_t bytes);
-int _bufferevent_decrement_read_buckets(struct bufferevent_private *bev,
+int bufferevent_decrement_read_buckets_(struct bufferevent_private *bev,
ev_ssize_t bytes);
-ev_ssize_t _bufferevent_get_read_max(struct bufferevent_private *bev);
-ev_ssize_t _bufferevent_get_write_max(struct bufferevent_private *bev);
+ev_ssize_t bufferevent_get_read_max_(struct bufferevent_private *bev);
+ev_ssize_t bufferevent_get_write_max_(struct bufferevent_private *bev);
-int _bufferevent_ratelim_init(struct bufferevent_private *bev);
+int bufferevent_ratelim_init_(struct bufferevent_private *bev);
#ifdef __cplusplus
}
View
56 bufferevent.c
@@ -60,7 +60,7 @@
#include "evbuffer-internal.h"
#include "util-internal.h"
-static void _bufferevent_cancel_all(struct bufferevent *bev);
+static void bufferevent_cancel_all_(struct bufferevent *bev);
void
@@ -160,7 +160,7 @@ bufferevent_run_deferred_callbacks_locked(struct deferred_cb *_, void *arg)
EVUTIL_SET_SOCKET_ERROR(err);
bufev->errorcb(bufev, what, bufev->cbarg);
}
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
}
static void
@@ -204,7 +204,7 @@ bufferevent_run_deferred_callbacks_unlocked(struct deferred_cb *_, void *arg)
EVUTIL_SET_SOCKET_ERROR(err);
UNLOCKED(errorcb(bufev,what,cbarg));
}
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
#undef UNLOCKED
}
@@ -218,7 +218,7 @@ bufferevent_run_deferred_callbacks_unlocked(struct deferred_cb *_, void *arg)
void
-_bufferevent_run_readcb(struct bufferevent *bufev)
+bufferevent_run_readcb_(struct bufferevent *bufev)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
@@ -235,7 +235,7 @@ _bufferevent_run_readcb(struct bufferevent *bufev)
}
void
-_bufferevent_run_writecb(struct bufferevent *bufev)
+bufferevent_run_writecb_(struct bufferevent *bufev)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
@@ -252,7 +252,7 @@ _bufferevent_run_writecb(struct bufferevent *bufev)
}
void
-_bufferevent_run_eventcb(struct bufferevent *bufev, short what)
+bufferevent_run_eventcb_(struct bufferevent *bufev, short what)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
@@ -298,7 +298,7 @@ bufferevent_init_common(struct bufferevent_private *bufev_private,
bufev->be_ops = ops;
- _bufferevent_ratelim_init(bufev_private);
+ bufferevent_ratelim_init_(bufev_private);
/*
* Set to EV_WRITE so that using bufferevent_write is going to
@@ -434,7 +434,7 @@ bufferevent_enable(struct bufferevent *bufev, short event)
short impl_events = event;
int r = 0;
- _bufferevent_incref_and_lock(bufev);
+ bufferevent_incref_and_lock_(bufev);
if (bufev_private->read_suspended)
impl_events &= ~EV_READ;
if (bufev_private->write_suspended)
@@ -445,7 +445,7 @@ bufferevent_enable(struct bufferevent *bufev, short event)
if (impl_events && bufev->be_ops->enable(bufev, impl_events) < 0)
r = -1;
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
return r;
}
@@ -598,7 +598,7 @@ bufferevent_flush(struct bufferevent *bufev,
}
void
-_bufferevent_incref_and_lock(struct bufferevent *bufev)
+bufferevent_incref_and_lock_(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_private =
BEV_UPCAST(bufev);
@@ -625,7 +625,7 @@ _bufferevent_transfer_lock_ownership(struct bufferevent *donor,
#endif
int
-_bufferevent_decref_and_unlock(struct bufferevent *bufev)
+bufferevent_decref_and_unlock_(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
@@ -692,7 +692,7 @@ int
bufferevent_decref(struct bufferevent *bufev)
{
BEV_LOCK(bufev);
- return _bufferevent_decref_and_unlock(bufev);
+ return bufferevent_decref_and_unlock_(bufev);
}
void
@@ -700,8 +700,8 @@ bufferevent_free(struct bufferevent *bufev)
{
BEV_LOCK(bufev);
bufferevent_setcb(bufev, NULL, NULL, NULL, NULL);
- _bufferevent_cancel_all(bufev);
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_cancel_all_(bufev);
+ bufferevent_decref_and_unlock_(bufev);
}
void
@@ -778,7 +778,7 @@ bufferevent_getfd(struct bufferevent *bev)
}
static void
-_bufferevent_cancel_all(struct bufferevent *bev)
+bufferevent_cancel_all_(struct bufferevent *bev)
{
union bufferevent_ctrl_data d;
memset(&d, 0, sizeof(d));
@@ -815,23 +815,23 @@ static void
bufferevent_generic_read_timeout_cb(evutil_socket_t fd, short event, void *ctx)
{
struct bufferevent *bev = ctx;
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
bufferevent_disable(bev, EV_READ);
- _bufferevent_run_eventcb(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING);
+ bufferevent_decref_and_unlock_(bev);
}
static void
bufferevent_generic_write_timeout_cb(evutil_socket_t fd, short event, void *ctx)
{
struct bufferevent *bev = ctx;
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
bufferevent_disable(bev, EV_WRITE);
- _bufferevent_run_eventcb(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING);
+ bufferevent_decref_and_unlock_(bev);
}
void
-_bufferevent_init_generic_timeout_cbs(struct bufferevent *bev)
+bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev)
{
evtimer_assign(&bev->ev_read, bev->ev_base,
bufferevent_generic_read_timeout_cb, bev);
@@ -840,7 +840,7 @@ _bufferevent_init_generic_timeout_cbs(struct bufferevent *bev)
}
int
-_bufferevent_del_generic_timeout_cbs(struct bufferevent *bev)
+bufferevent_del_generic_timeout_cbs_(struct bufferevent *bev)
{
int r1,r2;
r1 = event_del(&bev->ev_read);
@@ -851,7 +851,7 @@ _bufferevent_del_generic_timeout_cbs(struct bufferevent *bev)
}
int
-_bufferevent_generic_adj_timeouts(struct bufferevent *bev)
+bufferevent_generic_adj_timeouts_(struct bufferevent *bev)
{
const short enabled = bev->enabled;
struct bufferevent_private *bev_p =
@@ -875,7 +875,7 @@ _bufferevent_generic_adj_timeouts(struct bufferevent *bev)
}
int
-_bufferevent_add_event(struct event *ev, const struct timeval *tv)
+bufferevent_add_event_(struct event *ev, const struct timeval *tv)
{
if (tv->tv_sec == 0 && tv->tv_usec == 0)
return event_add(ev, NULL);
@@ -884,15 +884,15 @@ _bufferevent_add_event(struct event *ev, const struct timeval *tv)
}
/* For use by user programs only; internally, we should be calling
- either _bufferevent_incref_and_lock(), or BEV_LOCK. */
+ either bufferevent_incref_and_lock_(), or BEV_LOCK. */
void
bufferevent_lock(struct bufferevent *bev)
{
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
}
void
bufferevent_unlock(struct bufferevent *bev)
{
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
View
70 bufferevent_async.c
@@ -94,7 +94,7 @@ const struct bufferevent_ops bufferevent_ops_async = {
be_async_enable,
be_async_disable,
be_async_destruct,
- _bufferevent_generic_adj_timeouts,
+ bufferevent_generic_adj_timeouts_,
be_async_flush,
be_async_ctrl,
};
@@ -201,7 +201,7 @@ bev_async_consider_writing(struct bufferevent_async *beva)
/* This is safe so long as bufferevent_get_write_max never returns
* more than INT_MAX. That's true for now. XXXX */
- limit = (int)_bufferevent_get_write_max(&beva->bev);
+ limit = (int)bufferevent_get_write_max_(&beva->bev);
if (at_most >= (size_t)limit && limit >= 0)
at_most = limit;
@@ -216,10 +216,10 @@ bev_async_consider_writing(struct bufferevent_async *beva)
&beva->write_overlapped)) {
bufferevent_decref(bev);
beva->ok = 0;
- _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR);
} else {
beva->write_in_progress = at_most;
- _bufferevent_decrement_write_buckets(&beva->bev, at_most);
+ bufferevent_decrement_write_buckets_(&beva->bev, at_most);
bev_async_add_write(beva);
}
}
@@ -256,8 +256,8 @@ bev_async_consider_reading(struct bufferevent_async *beva)
}
/* XXXX This over-commits. */
- /* XXXX see also not above on cast on _bufferevent_get_write_max() */
- limit = (int)_bufferevent_get_read_max(&beva->bev);
+ /* XXXX see also not above on cast on bufferevent_get_write_max_() */
+ limit = (int)bufferevent_get_read_max_(&beva->bev);
if (at_most >= (size_t)limit && limit >= 0)
at_most = limit;
@@ -269,11 +269,11 @@ bev_async_consider_reading(struct bufferevent_async *beva)
bufferevent_incref(bev);
if (evbuffer_launch_read(bev->input, at_most, &beva->read_overlapped)) {
beva->ok = 0;
- _bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
+ bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR);
bufferevent_decref(bev);
} else {
beva->read_in_progress = at_most;
- _bufferevent_decrement_read_buckets(&beva->bev, at_most);
+ bufferevent_decrement_read_buckets_(&beva->bev, at_most);
bev_async_add_read(beva);
}
@@ -291,12 +291,12 @@ be_async_outbuf_callback(struct evbuffer *buf,
/* If we added data to the outbuf and were not writing before,
* we may want to write now. */
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
if (cbinfo->n_added)
bev_async_consider_writing(bev_async);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
static void
@@ -310,12 +310,12 @@ be_async_inbuf_callback(struct evbuffer *buf,
/* If we drained data from the inbuf and were not reading before,
* we may want to read now */
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
if (cbinfo->n_deleted)
bev_async_consider_reading(bev_async);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
static int
@@ -379,7 +379,7 @@ be_async_destruct(struct bufferevent *bev)
bev_async_del_read(bev_async);
bev_async_del_write(bev_async);
- fd = _evbuffer_overlapped_get_fd(bev->input);
+ fd = evbuffer_overlapped_get_fd_(bev->input);
if (bev_p->options & BEV_OPT_CLOSE_ON_FREE) {
/* XXXX possible double-close */
evutil_closesocket(fd);
@@ -387,7 +387,7 @@ be_async_destruct(struct bufferevent *bev)
/* delete this in case non-blocking connect was used */
if (event_initialized(&bev->ev_write)) {
event_del(&bev->ev_write);
- _bufferevent_del_generic_timeout_cbs(bev);
+ bufferevent_del_generic_timeout_cbs_(bev);
}
}
@@ -399,7 +399,7 @@ bev_async_set_wsa_error(struct bufferevent *bev, struct event_overlapped *eo)
DWORD bytes, flags;
evutil_socket_t fd;
- fd = _evbuffer_overlapped_get_fd(bev->input);
+ fd = evbuffer_overlapped_get_fd_(bev->input);
WSAGetOverlappedResult(fd, &eo->overlapped, &bytes, FALSE, &flags);
}
@@ -422,7 +422,7 @@ connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
EVUTIL_ASSERT(bev_a->bev.connecting);
bev_a->bev.connecting = 0;
- sock = _evbuffer_overlapped_get_fd(bev_a->bev.bev.input);
+ sock = evbuffer_overlapped_get_fd_(bev_a->bev.bev.input);
/* XXXX Handle error? */
setsockopt(sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0);
@@ -431,12 +431,12 @@ connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
else
bev_async_set_wsa_error(bev, eo);
- _bufferevent_run_eventcb(bev,
+ bufferevent_run_eventcb_(bev,
ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR);
event_base_del_virtual(bev->ev_base);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
static void
@@ -454,7 +454,7 @@ read_complete(struct event_overlapped *eo, ev_uintptr_t key,
evbuffer_commit_read(bev->input, nbytes);
bev_a->read_in_progress = 0;
if (amount_unread)
- _bufferevent_decrement_read_buckets(&bev_a->bev, -amount_unread);
+ bufferevent_decrement_read_buckets_(&bev_a->bev, -amount_unread);
if (!ok)
bev_async_set_wsa_error(bev, eo);
@@ -463,20 +463,20 @@ read_complete(struct event_overlapped *eo, ev_uintptr_t key,
if (ok && nbytes) {
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
if (evbuffer_get_length(bev->input) >= bev->wm_read.low)
- _bufferevent_run_readcb(bev);
+ bufferevent_run_readcb_(bev);
bev_async_consider_reading(bev_a);
} else if (!ok) {
what |= BEV_EVENT_ERROR;
bev_a->ok = 0;
- _bufferevent_run_eventcb(bev, what);
+ bufferevent_run_eventcb_(bev, what);
} else if (!nbytes) {
what |= BEV_EVENT_EOF;
bev_a->ok = 0;
- _bufferevent_run_eventcb(bev, what);
+ bufferevent_run_eventcb_(bev, what);
}
}
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
static void
@@ -496,7 +496,7 @@ write_complete(struct event_overlapped *eo, ev_uintptr_t key,
bev_a->write_in_progress = 0;
if (amount_unwritten)
- _bufferevent_decrement_write_buckets(&bev_a->bev,
+ bufferevent_decrement_write_buckets_(&bev_a->bev,
-amount_unwritten);
@@ -508,20 +508,20 @@ write_complete(struct event_overlapped *eo, ev_uintptr_t key,
BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
if (evbuffer_get_length(bev->output) <=
bev->wm_write.low)
- _bufferevent_run_writecb(bev);
+ bufferevent_run_writecb_(bev);
bev_async_consider_writing(bev_a);
} else if (!ok) {
what |= BEV_EVENT_ERROR;
bev_a->ok = 0;
- _bufferevent_run_eventcb(bev, what);
+ bufferevent_run_eventcb_(bev, what);
} else if (!nbytes) {
what |= BEV_EVENT_EOF;
bev_a->ok = 0;
- _bufferevent_run_eventcb(bev, what);
+ bufferevent_run_eventcb_(bev, what);
}
}
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
struct bufferevent *
@@ -573,7 +573,7 @@ bufferevent_async_new(struct event_base *base,
bev_a->ok = fd >= 0;
if (bev_a->ok)
- _bufferevent_init_generic_timeout_cbs(bev);
+ bufferevent_init_generic_timeout_cbs_(bev);
return bev;
err:
@@ -586,7 +586,7 @@ bufferevent_async_set_connected(struct bufferevent *bev)
{
struct bufferevent_async *bev_async = upcast(bev);
bev_async->ok = 1;
- _bufferevent_init_generic_timeout_cbs(bev);
+ bufferevent_init_generic_timeout_cbs_(bev);
/* Now's a good time to consider reading/writing */
be_async_enable(bev, bev->enabled);
}
@@ -657,24 +657,24 @@ be_async_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
{
switch (op) {
case BEV_CTRL_GET_FD:
- data->fd = _evbuffer_overlapped_get_fd(bev->input);
+ data->fd = evbuffer_overlapped_get_fd_(bev->input);
return 0;
case BEV_CTRL_SET_FD: {
struct event_iocp_port *iocp;
- if (data->fd == _evbuffer_overlapped_get_fd(bev->input))
+ if (data->fd == evbuffer_overlapped_get_fd_(bev->input))
return 0;
if (!(iocp = event_base_get_iocp(bev->ev_base)))
return -1;
if (event_iocp_port_associate(iocp, data->fd, 1) < 0)
return -1;
- _evbuffer_overlapped_set_fd(bev->input, data->fd);
- _evbuffer_overlapped_set_fd(bev->output, data->fd);
+ evbuffer_overlapped_set_fd_(bev->input, data->fd);
+ evbuffer_overlapped_set_fd_(bev->output, data->fd);
return 0;
}
case BEV_CTRL_CANCEL_ALL: {
struct bufferevent_async *bev_a = upcast(bev);
- evutil_socket_t fd = _evbuffer_overlapped_get_fd(bev->input);
+ evutil_socket_t fd = evbuffer_overlapped_get_fd_(bev->input);
if (fd != (evutil_socket_t)INVALID_SOCKET &&
(bev_a->bev.options & BEV_OPT_CLOSE_ON_FREE)) {
closesocket(fd);
View
32 bufferevent_filter.c
@@ -100,7 +100,7 @@ const struct bufferevent_ops bufferevent_ops_filter = {
be_filter_enable,
be_filter_disable,
be_filter_destruct,
- _bufferevent_generic_adj_timeouts,
+ bufferevent_generic_adj_timeouts_,
be_filter_flush,
be_filter_ctrl,
};
@@ -204,7 +204,7 @@ bufferevent_filter_new(struct bufferevent *underlying,
bufev_f->outbuf_cb = evbuffer_add_cb(downcast(bufev_f)->output,
bufferevent_filtered_outbuf_cb, bufev_f);
- _bufferevent_init_generic_timeout_cbs(downcast(bufev_f));
+ bufferevent_init_generic_timeout_cbs_(downcast(bufev_f));
bufferevent_incref(underlying);
bufferevent_enable(underlying, EV_READ|EV_WRITE);
@@ -243,7 +243,7 @@ be_filter_destruct(struct bufferevent *bev)
}
}
- _bufferevent_del_generic_timeout_cbs(bev);
+ bufferevent_del_generic_timeout_cbs_(bev);
}
static int
@@ -372,7 +372,7 @@ be_filter_process_output(struct bufferevent_filtered *bevf,
if (processed &&
evbuffer_get_length(bufev->output) <= bufev->wm_write.low) {
/* call the write callback.*/
- _bufferevent_run_writecb(bufev);
+ bufferevent_run_writecb_(bufev);
if (res == BEV_OK &&
(bufev->enabled & EV_WRITE) &&
@@ -405,9 +405,9 @@ bufferevent_filtered_outbuf_cb(struct evbuffer *buf,
int processed_any = 0;
/* Somebody added more data to the output buffer. Try to
* process it, if we should. */
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
}
@@ -421,7 +421,7 @@ be_filter_readcb(struct bufferevent *underlying, void *_me)
struct bufferevent *bufev = downcast(bevf);
int processed_any = 0;
- _bufferevent_incref_and_lock(bufev);
+ bufferevent_incref_and_lock_(bufev);
if (bevf->got_eof)
state = BEV_FINISHED;
@@ -437,9 +437,9 @@ be_filter_readcb(struct bufferevent *underlying, void *_me)
* force readcb calls as needed. */
if (processed_any &&
evbuffer_get_length(bufev->input) >= bufev->wm_read.low)
- _bufferevent_run_readcb(bufev);
+ bufferevent_run_readcb_(bufev);
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
}
/* Called when the underlying socket has drained enough that we can write to
@@ -451,9 +451,9 @@ be_filter_writecb(struct bufferevent *underlying, void *_me)
struct bufferevent *bev = downcast(bevf);
int processed_any = 0;
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_decref_and_unlock_(bev);
}
/* Called when the underlying socket has given us an error */
@@ -463,10 +463,10 @@ be_filter_eventcb(struct bufferevent *underlying, short what, void *_me)
struct bufferevent_filtered *bevf = _me;
struct bufferevent *bev = downcast(bevf);
- _bufferevent_incref_and_lock(bev);
+ bufferevent_incref_and_lock_(bev);
/* All we can really to is tell our own eventcb. */
- _bufferevent_run_eventcb(bev, what);
- _bufferevent_decref_and_unlock(bev);
+ bufferevent_run_eventcb_(bev, what);
+ bufferevent_decref_and_unlock_(bev);
}
static int
@@ -477,7 +477,7 @@ be_filter_flush(struct bufferevent *bufev,
int processed_any = 0;
EVUTIL_ASSERT(bevf);
- _bufferevent_incref_and_lock(bufev);
+ bufferevent_incref_and_lock_(bufev);
if (iotype & EV_READ) {
be_filter_process_input(bevf, mode, &processed_any);
@@ -489,7 +489,7 @@ be_filter_flush(struct bufferevent *bufev,
/* XXX does this want to recursively call lower-level flushes? */
bufferevent_flush(bevf->underlying, iotype, mode);
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
return processed_any;
}
View
66 bufferevent_openssl.c
@@ -383,9 +383,9 @@ start_reading(struct bufferevent_openssl *bev_ssl)
} else {
struct bufferevent *bev = &bev_ssl->bev.bev;
int r;
- r = _bufferevent_add_event(&bev->ev_read, &bev->timeout_read);
+ r = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
if (r == 0 && bev_ssl->read_blocked_on_write)
- r = _bufferevent_add_event(&bev->ev_write,
+ r = bufferevent_add_event_(&bev->ev_write,
&bev->timeout_write);
return r;
}
@@ -402,9 +402,9 @@ start_writing(struct bufferevent_openssl *bev_ssl)
;
} else {
struct bufferevent *bev = &bev_ssl->bev.bev;
- r = _bufferevent_add_event(&bev->ev_write, &bev->timeout_write);
+ r = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
if (!r && bev_ssl->write_blocked_on_read)
- r = _bufferevent_add_event(&bev->ev_read,
+ r = bufferevent_add_event_(&bev->ev_read,
&bev->timeout_read);
}
return r;
@@ -531,7 +531,7 @@ conn_closed(struct bufferevent_openssl *bev_ssl, int when, int errcode, int ret)
/* when is BEV_EVENT_{READING|WRITING} */
event = when | event;
- _bufferevent_run_eventcb(&bev_ssl->bev.bev, event);
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev, event);
}
static void
@@ -552,9 +552,9 @@ decrement_buckets(struct bufferevent_openssl *bev_ssl)
unsigned long w = num_w - bev_ssl->counts.n_written;
unsigned long r = num_r - bev_ssl->counts.n_read;
if (w)
- _bufferevent_decrement_write_buckets(&bev_ssl->bev, w);
+ bufferevent_decrement_write_buckets_(&bev_ssl->bev, w);
if (r)
- _bufferevent_decrement_read_buckets(&bev_ssl->bev, r);
+ bufferevent_decrement_read_buckets_(&bev_ssl->bev, r);
bev_ssl->counts.n_written = num_w;
bev_ssl->counts.n_read = num_r;
}
@@ -569,7 +569,7 @@ do_read(struct bufferevent_openssl *bev_ssl, int n_to_read)
int r, n, i, n_used = 0, blocked = 0, atmost;
struct evbuffer_iovec space[2];
- atmost = _bufferevent_get_read_max(&bev_ssl->bev);
+ atmost = bufferevent_get_read_max_(&bev_ssl->bev);
if (n_to_read > atmost)
n_to_read = atmost;
@@ -620,7 +620,7 @@ do_read(struct bufferevent_openssl *bev_ssl, int n_to_read)
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
if (evbuffer_get_length(input) >= bev->wm_read.low)
- _bufferevent_run_readcb(bev);
+ bufferevent_run_readcb_(bev);
}
return blocked ? 0 : 1;
@@ -637,7 +637,7 @@ do_write(struct bufferevent_openssl *bev_ssl, int atmost)
if (bev_ssl->last_write > 0)
atmost = bev_ssl->last_write;
else
- atmost = _bufferevent_get_write_max(&bev_ssl->bev);
+ atmost = bufferevent_get_write_max_(&bev_ssl->bev);
n = evbuffer_peek(output, atmost, NULL, space, 8);
if (n < 0)
@@ -698,7 +698,7 @@ do_write(struct bufferevent_openssl *bev_ssl, int atmost)
BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
if (evbuffer_get_length(output) <= bev->wm_write.low)
- _bufferevent_run_writecb(bev);
+ bufferevent_run_writecb_(bev);
}
return blocked ? 0 : 1;
}
@@ -742,7 +742,7 @@ bytes_to_read(struct bufferevent_openssl *bev)
}
/* Respect the rate limit */
- limit = _bufferevent_get_read_max(&bev->bev);
+ limit = bufferevent_get_read_max_(&bev->bev);
if (result > limit) {
result = limit;
}
@@ -892,33 +892,33 @@ be_openssl_eventcb(struct bufferevent *bev_base, short what, void *ctx)
eat it. */
}
if (event)
- _bufferevent_run_eventcb(&bev_ssl->bev.bev, event);
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev, event);
}
static void
be_openssl_readeventcb(evutil_socket_t fd, short what, void *ptr)
{
struct bufferevent_openssl *bev_ssl = ptr;
- _bufferevent_incref_and_lock(&bev_ssl->bev.bev);
+ bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
if (what & EV_TIMEOUT) {
- _bufferevent_run_eventcb(&bev_ssl->bev.bev,
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev,
BEV_EVENT_TIMEOUT|BEV_EVENT_READING);
} else
consider_reading(bev_ssl);
- _bufferevent_decref_and_unlock(&bev_ssl->bev.bev);
+ bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
}
static void
be_openssl_writeeventcb(evutil_socket_t fd, short what, void *ptr)
{
struct bufferevent_openssl *bev_ssl = ptr;
- _bufferevent_incref_and_lock(&bev_ssl->bev.bev);
+ bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
if (what & EV_TIMEOUT) {
- _bufferevent_run_eventcb(&bev_ssl->bev.bev,
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev,
BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING);
}
consider_writing(bev_ssl);
- _bufferevent_decref_and_unlock(&bev_ssl->bev.bev);
+ bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
}
static int
@@ -945,9 +945,9 @@ set_open_callbacks(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
event_assign(&bev->ev_write, bev->ev_base, fd,
EV_WRITE|EV_PERSIST, be_openssl_writeeventcb, bev_ssl);
if (rpending)
- r1 = _bufferevent_add_event(&bev->ev_read, &bev->timeout_read);
+ r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
if (wpending)
- r2 = _bufferevent_add_event(&bev->ev_write, &bev->timeout_write);
+ r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
if (fd >= 0) {
bev_ssl->fd_is_set = 1;
}
@@ -978,7 +978,7 @@ do_handshake(struct bufferevent_openssl *bev_ssl)
set_open_callbacks(bev_ssl, -1); /* XXXX handle failure */
/* Call do_read and do_write as needed */
bufferevent_enable(&bev_ssl->bev.bev, bev_ssl->bev.bev.enabled);
- _bufferevent_run_eventcb(&bev_ssl->bev.bev,
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev,
BEV_EVENT_CONNECTED);
return 1;
} else {
@@ -1016,12 +1016,12 @@ be_openssl_handshakeeventcb(evutil_socket_t fd, short what, void *ptr)
{
struct bufferevent_openssl *bev_ssl = ptr;
- _bufferevent_incref_and_lock(&bev_ssl->bev.bev);
+ bufferevent_incref_and_lock_(&bev_ssl->bev.bev);
if (what & EV_TIMEOUT) {
- _bufferevent_run_eventcb(&bev_ssl->bev.bev, BEV_EVENT_TIMEOUT);
+ bufferevent_run_eventcb_(&bev_ssl->bev.bev, BEV_EVENT_TIMEOUT);
} else
do_handshake(bev_ssl);/* XXX handle failure */
- _bufferevent_decref_and_unlock(&bev_ssl->bev.bev);
+ bufferevent_decref_and_unlock_(&bev_ssl->bev.bev);
}
static int
@@ -1047,8 +1047,8 @@ set_handshake_callbacks(struct bufferevent_openssl *bev_ssl, evutil_socket_t fd)
event_assign(&bev->ev_write, bev->ev_base, fd,
EV_WRITE|EV_PERSIST, be_openssl_handshakeeventcb, bev_ssl);
if (fd >= 0) {
- r1 = _bufferevent_add_event(&bev->ev_read, &bev->timeout_read);
- r2 = _bufferevent_add_event(&bev->ev_write, &bev->timeout_write);
+ r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
+ r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
bev_ssl->fd_is_set = 1;
}
return (r1<0 || r2<0) ? -1 : 0;
@@ -1081,7 +1081,7 @@ be_openssl_outbuf_cb(struct evbuffer *buf,
if (cbinfo->n_added && bev_ssl->state == BUFFEREVENT_SSL_OPEN) {
if (cbinfo->orig_size == 0)
- r = _bufferevent_add_event(&bev_ssl->bev.bev.ev_write,
+ r = bufferevent_add_event_(&bev_ssl->bev.bev.ev_write,
&bev_ssl->bev.bev.timeout_write);
consider_writing(bev_ssl);
}
@@ -1145,7 +1145,7 @@ be_openssl_destruct(struct bufferevent *bev)
struct bufferevent_openssl *bev_ssl = upcast(bev);
if (bev_ssl->underlying) {
- _bufferevent_del_generic_timeout_cbs(bev);
+ bufferevent_del_generic_timeout_cbs_(bev);
} else {
event_del(&bev->ev_read);
event_del(&bev->ev_write);
@@ -1186,13 +1186,13 @@ be_openssl_adj_timeouts(struct bufferevent *bev)
struct bufferevent_openssl *bev_ssl = upcast(bev);
if (bev_ssl->underlying)
- return _bufferevent_generic_adj_timeouts(bev);
+ return bufferevent_generic_adj_timeouts_(bev);
else {
int r1=0, r2=0;
if (event_pending(&bev->ev_read, EV_READ, NULL))
- r1 = _bufferevent_add_event(&bev->ev_read, &bev->timeout_read);
+ r1 = bufferevent_add_event_(&bev->ev_read, &bev->timeout_read);
if (event_pending(&bev->ev_write, EV_WRITE, NULL))
- r2 = _bufferevent_add_event(&bev->ev_write, &bev->timeout_write);
+ r2 = bufferevent_add_event_(&bev->ev_write, &bev->timeout_write);
return (r1<0 || r2<0) ? -1 : 0;
}
}
@@ -1290,7 +1290,7 @@ bufferevent_openssl_new_impl(struct event_base *base,
bufferevent_enable_locking(&bev_ssl->bev.bev, NULL);
if (underlying) {
- _bufferevent_init_generic_timeout_cbs(&bev_ssl->bev.bev);
+ bufferevent_init_generic_timeout_cbs_(&bev_ssl->bev.bev);
bufferevent_incref(underlying);
}
View
20 bufferevent_pair.c
@@ -67,10 +67,10 @@ static inline void
incref_and_lock(struct bufferevent *b)
{
struct bufferevent_pair *bevp;
- _bufferevent_incref_and_lock(b);
+ bufferevent_incref_and_lock_(b);
bevp = upcast(b);
if (bevp->partner)
- _bufferevent_incref_and_lock(downcast(bevp->partner));
+ bufferevent_incref_and_lock_(downcast(bevp->partner));
}
static inline void
@@ -78,8 +78,8 @@ decref_and_unlock(struct bufferevent *b)
{
struct bufferevent_pair *bevp = upcast(b);
if (bevp->partner)
- _bufferevent_decref_and_unlock(downcast(bevp->partner));
- _bufferevent_decref_and_unlock(b);
+ bufferevent_decref_and_unlock_(downcast(bevp->partner));
+ bufferevent_decref_and_unlock_(b);
}
/* XXX Handle close */
@@ -104,7 +104,7 @@ bufferevent_pair_elt_new(struct event_base *base,
return NULL;
}
- _bufferevent_init_generic_timeout_cbs(&bufev->bev.bev);
+ bufferevent_init_generic_timeout_cbs_(&bufev->bev.bev);
return bufev;
}
@@ -186,10 +186,10 @@ be_pair_transfer(struct bufferevent *src, struct bufferevent *dst,
dst_size = evbuffer_get_length(dst->input);
if (dst_size >= dst->wm_read.low) {
- _bufferevent_run_readcb(dst);
+ bufferevent_run_readcb_(dst);
}
if (src_size <= src->wm_write.low) {
- _bufferevent_run_writecb(src);
+ bufferevent_run_writecb_(src);
}
done:
evbuffer_freeze(src->output, 1);
@@ -275,7 +275,7 @@ be_pair_destruct(struct bufferevent *bev)
bev_p->partner = NULL;
}
- _bufferevent_del_generic_timeout_cbs(bev);
+ bufferevent_del_generic_timeout_cbs_(bev);
}
static int
@@ -300,7 +300,7 @@ be_pair_flush(struct bufferevent *bev, short iotype,
be_pair_transfer(bev, partner, 1);
if (mode == BEV_FINISHED) {
- _bufferevent_run_eventcb(partner, iotype|BEV_EVENT_EOF);
+ bufferevent_run_eventcb_(partner, iotype|BEV_EVENT_EOF);
}
decref_and_unlock(bev);
return 0;
@@ -327,7 +327,7 @@ const struct bufferevent_ops bufferevent_ops_pair = {
be_pair_enable,
be_pair_disable,
be_pair_destruct,
- _bufferevent_generic_adj_timeouts,
+ bufferevent_generic_adj_timeouts_,
be_pair_flush,
NULL, /* ctrl */
};
View
80 bufferevent_ratelim.c
@@ -185,17 +185,17 @@ ev_token_bucket_cfg_free(struct ev_token_bucket_cfg *cfg)
#define LOCK_GROUP(g) EVLOCK_LOCK((g)->lock, 0)
#define UNLOCK_GROUP(g) EVLOCK_UNLOCK((g)->lock, 0)
-static int _bev_group_suspend_reading(struct bufferevent_rate_limit_group *g);
-static int _bev_group_suspend_writing(struct bufferevent_rate_limit_group *g);
-static void _bev_group_unsuspend_reading(struct bufferevent_rate_limit_group *g);
-static void _bev_group_unsuspend_writing(struct bufferevent_rate_limit_group *g);
+static int bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g);
+static int bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g);
+static void bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g);
+static void bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g);
/** Helper: figure out the maximum amount we should write if is_write, or
the maximum amount we should read if is_read. Return that maximum, or
0 if our bucket is wholly exhausted.
*/
static inline ev_ssize_t
-_bufferevent_get_rlim_max(struct bufferevent_private *bev, int is_write)
+bufferevent_get_rlim_max_(struct bufferevent_private *bev, int is_write)
{
/* needs lock on bev. */
ev_ssize_t max_so_far = is_write?bev->max_single_write:bev->max_single_read;
@@ -258,19 +258,19 @@ _bufferevent_get_rlim_max(struct bufferevent_private *bev, int is_write)
}
ev_ssize_t
-_bufferevent_get_read_max(struct bufferevent_private *bev)
+bufferevent_get_read_max_(struct bufferevent_private *bev)
{
- return _bufferevent_get_rlim_max(bev, 0);
+ return bufferevent_get_rlim_max_(bev, 0);
}
ev_ssize_t
-_bufferevent_get_write_max(struct bufferevent_private *bev)
+bufferevent_get_write_max_(struct bufferevent_private *bev)
{
- return _bufferevent_get_rlim_max(bev, 1);
+ return bufferevent_get_rlim_max_(bev, 1);
}
int
-_bufferevent_decrement_read_buckets(struct bufferevent_private *bev, ev_ssize_t bytes)
+bufferevent_decrement_read_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes)
{
/* XXXXX Make sure all users of this function check its return value */
int r = 0;
@@ -297,9 +297,9 @@ _bufferevent_decrement_read_buckets(struct bufferevent_private *bev, ev_ssize_t
bev->rate_limiting->group->rate_limit.read_limit -= bytes;
bev->rate_limiting->group->total_read += bytes;
if (bev->rate_limiting->group->rate_limit.read_limit <= 0) {
- _bev_group_suspend_reading(bev->rate_limiting->group);
+ bev_group_suspend_reading_(bev->rate_limiting->group);
} else if (bev->rate_limiting->group->read_suspended) {
- _bev_group_unsuspend_reading(bev->rate_limiting->group);
+ bev_group_unsuspend_reading_(bev->rate_limiting->group);
}
UNLOCK_GROUP(bev->rate_limiting->group);
}
@@ -308,7 +308,7 @@ _bufferevent_decrement_read_buckets(struct bufferevent_private *bev, ev_ssize_t
}
int
-_bufferevent_decrement_write_buckets(struct bufferevent_private *bev, ev_ssize_t bytes)
+bufferevent_decrement_write_buckets_(struct bufferevent_private *bev, ev_ssize_t bytes)
{
/* XXXXX Make sure all users of this function check its return value */
int r = 0;
@@ -335,9 +335,9 @@ _bufferevent_decrement_write_buckets(struct bufferevent_private *bev, ev_ssize_t
bev->rate_limiting->group->rate_limit.write_limit -= bytes;
bev->rate_limiting->group->total_written += bytes;
if (bev->rate_limiting->group->rate_limit.write_limit <= 0) {
- _bev_group_suspend_writing(bev->rate_limiting->group);
+ bev_group_suspend_writing_(bev->rate_limiting->group);
} else if (bev->rate_limiting->group->write_suspended) {
- _bev_group_unsuspend_writing(bev->rate_limiting->group);
+ bev_group_unsuspend_writing_(bev->rate_limiting->group);
}
UNLOCK_GROUP(bev->rate_limiting->group);
}
@@ -347,7 +347,7 @@ _bufferevent_decrement_write_buckets(struct bufferevent_private *bev, ev_ssize_t
/** Stop reading on every bufferevent in <b>g</b> */
static int
-_bev_group_suspend_reading(struct bufferevent_rate_limit_group *g)
+bev_group_suspend_reading_(struct bufferevent_rate_limit_group *g)
{
/* Needs group lock */
struct bufferevent_private *bev;
@@ -372,7 +372,7 @@ _bev_group_suspend_reading(struct bufferevent_rate_limit_group *g)
/** Stop writing on every bufferevent in <b>g</b> */
static int
-_bev_group_suspend_writing(struct bufferevent_rate_limit_group *g)
+bev_group_suspend_writing_(struct bufferevent_rate_limit_group *g)
{
/* Needs group lock */
struct bufferevent_private *bev;
@@ -391,7 +391,7 @@ _bev_group_suspend_writing(struct bufferevent_rate_limit_group *g)
/** Timer callback invoked on a single bufferevent with one or more exhausted
buckets when they are ready to refill. */
static void
-_bev_refill_callback(evutil_socket_t fd, short what, void *arg)
+bev_refill_callback_(evutil_socket_t fd, short what, void *arg)
{
unsigned tick;
struct timeval now;
@@ -440,7 +440,7 @@ _bev_refill_callback(evutil_socket_t fd, short what, void *arg)
/** Helper: grab a random element from a bufferevent group. */
static struct bufferevent_private *
-_bev_group_random_element(struct bufferevent_rate_limit_group *group)
+bev_group_random_element_(struct bufferevent_rate_limit_group *group)
{
int which;
struct bufferevent_private *bev;
@@ -452,7 +452,7 @@ _bev_group_random_element(struct bufferevent_rate_limit_group *group)
EVUTIL_ASSERT(! LIST_EMPTY(&group->members));
- which = _evutil_weakrand() % group->n_members;
+ which = evutil_weakrand_() % group->n_members;
bev = LIST_FIRST(&group->members);
while (which--)
@@ -470,7 +470,7 @@ _bev_group_random_element(struct bufferevent_rate_limit_group *group)
*/
#define FOREACH_RANDOM_ORDER(block) \
do { \
- first = _bev_group_random_element(g); \
+ first = bev_group_random_element_(g); \
for (bev = first; bev != LIST_END(&g->members); \
bev = LIST_NEXT(bev, rate_limiting->next_in_group)) { \
block ; \
@@ -482,7 +482,7 @@ _bev_group_random_element(struct bufferevent_rate_limit_group *group)
} while (0)
static void
-_bev_group_unsuspend_reading(struct bufferevent_rate_limit_group *g)
+bev_group_unsuspend_reading_(struct bufferevent_rate_limit_group *g)
{
int again = 0;
struct bufferevent_private *bev, *first;
@@ -501,7 +501,7 @@ _bev_group_unsuspend_reading(struct bufferevent_rate_limit_group *g)
}
static void
-_bev_group_unsuspend_writing(struct bufferevent_rate_limit_group *g)
+bev_group_unsuspend_writing_(struct bufferevent_rate_limit_group *g)
{
int again = 0;
struct bufferevent_private *bev, *first;
@@ -523,7 +523,7 @@ _bev_group_unsuspend_writing(struct bufferevent_rate_limit_group *g)
and unsuspend group members as needed.
*/
static void
-_bev_group_refill_callback(evutil_socket_t fd, short what, void *arg)
+bev_group_refill_callback_(evutil_socket_t fd, short what, void *arg)
{
struct bufferevent_rate_limit_group *g = arg;
unsigned tick;
@@ -538,11 +538,11 @@ _bev_group_refill_callback(evutil_socket_t fd, short what, void *arg)
if (g->pending_unsuspend_read ||
(g->read_suspended && (g->rate_limit.read_limit >= g->min_share))) {
- _bev_group_unsuspend_reading(g);
+ bev_group_unsuspend_reading_(g);
}
if (g->pending_unsuspend_write ||
(g->write_suspended && (g->rate_limit.write_limit >= g->min_share))){
- _bev_group_unsuspend_writing(g);
+ bev_group_unsuspend_writing_(g);
}
/* XXXX Rather than waiting to the next tick to unsuspend stuff
@@ -607,7 +607,7 @@ bufferevent_set_rate_limit(struct bufferevent *bev,
event_del(&rlim->refill_bucket_event);
}
evtimer_assign(&rlim->refill_bucket_event, bev->ev_base,
- _bev_refill_callback, bevp);
+ bev_refill_callback_, bevp);
if (rlim->limit.read_limit > 0) {
bufferevent_unsuspend_read(bev, BEV_SUSPEND_BW);
@@ -652,7 +652,7 @@ bufferevent_rate_limit_group_new(struct event_base *base,
ev_token_bucket_init(&g->rate_limit, cfg, tick, 0);
event_assign(&g->master_refill_event, base, -1, EV_PERSIST,
- _bev_group_refill_callback, g);
+ bev_group_refill_callback_, g);
/*XXXX handle event_add failure */
event_add(&g->master_refill_event, &cfg->tick_timeout);
@@ -743,7 +743,7 @@ bufferevent_add_to_rate_limit_group(struct bufferevent *bev,
return -1;
}
evtimer_assign(&rlim->refill_bucket_event, bev->ev_base,
- _bev_refill_callback, bevp);
+ bev_refill_callback_, bevp);
bevp->rate_limiting = rlim;
}
@@ -811,7 +811,7 @@ bufferevent_remove_from_rate_limit_group_internal(struct bufferevent *bev,
* === */
/* Mostly you don't want to use this function from inside libevent;
- * _bufferevent_get_read_max() is more likely what you want*/
+ * bufferevent_get_read_max_() is more likely what you want*/
ev_ssize_t
bufferevent_get_read_limit(struct bufferevent *bev)
{
@@ -830,7 +830,7 @@ bufferevent_get_read_limit(struct bufferevent *bev)
}
/* Mostly you don't want to use this function from inside libevent;
- * _bufferevent_get_write_max() is more likely what you want*/
+ * bufferevent_get_write_max_() is more likely what you want*/
ev_ssize_t
bufferevent_get_write_limit(struct bufferevent *bev)
{
@@ -903,7 +903,7 @@ bufferevent_get_max_to_read(struct bufferevent *bev)
{
ev_ssize_t r;
BEV_LOCK(bev);
- r = _bufferevent_get_read_max(BEV_UPCAST(bev));
+ r = bufferevent_get_read_max_(BEV_UPCAST(bev));
BEV_UNLOCK(bev);
return r;
}
@@ -913,14 +913,14 @@ bufferevent_get_max_to_write(struct bufferevent *bev)
{
ev_ssize_t r;
BEV_LOCK(bev);
- r = _bufferevent_get_write_max(BEV_UPCAST(bev));
+ r = bufferevent_get_write_max_(BEV_UPCAST(bev));
BEV_UNLOCK(bev);
return r;
}
/* Mostly you don't want to use this function from inside libevent;
- * _bufferevent_get_read_max() is more likely what you want*/
+ * bufferevent_get_read_max_() is more likely what you want*/
ev_ssize_t
bufferevent_rate_limit_group_get_read_limit(
struct bufferevent_rate_limit_group *grp)
@@ -933,7 +933,7 @@ bufferevent_rate_limit_group_get_read_limit(
}
/* Mostly you don't want to use this function from inside libevent;
- * _bufferevent_get_write_max() is more likely what you want. */
+ * bufferevent_get_write_max_() is more likely what you want. */
ev_ssize_t
bufferevent_rate_limit_group_get_write_limit(
struct bufferevent_rate_limit_group *grp)
@@ -1012,9 +1012,9 @@ bufferevent_rate_limit_group_decrement_read(
new_limit = (grp->rate_limit.read_limit -= decr);
if (old_limit > 0 && new_limit <= 0) {
- _bev_group_suspend_reading(grp);
+ bev_group_suspend_reading_(grp);
} else if (old_limit <= 0 && new_limit > 0) {
- _bev_group_unsuspend_reading(grp);
+ bev_group_unsuspend_reading_(grp);
}
UNLOCK_GROUP(grp);
@@ -1032,9 +1032,9 @@ bufferevent_rate_limit_group_decrement_write(
new_limit = (grp->rate_limit.write_limit -= decr);
if (old_limit > 0 && new_limit <= 0) {
- _bev_group_suspend_writing(grp);
+ bev_group_suspend_writing_(grp);
} else if (old_limit <= 0 && new_limit > 0) {
- _bev_group_unsuspend_writing(grp);
+ bev_group_unsuspend_writing_(grp);
}
UNLOCK_GROUP(grp);
@@ -1059,7 +1059,7 @@ bufferevent_rate_limit_group_reset_totals(struct bufferevent_rate_limit_group *g
}
int
-_bufferevent_ratelim_init(struct bufferevent_private *bev)
+bufferevent_ratelim_init_(struct bufferevent_private *bev)
{
bev->rate_limiting = NULL;
bev->max_single_read = MAX_SINGLE_READ_DEFAULT;
View
44 bufferevent_sock.c
@@ -97,7 +97,7 @@ const struct bufferevent_ops bufferevent_ops_socket = {
};
#define be_socket_add(ev, t) \
- _bufferevent_add_event((ev), (t))
+ bufferevent_add_event_((ev), (t))
static void
bufferevent_socket_outbuf_cb(struct evbuffer *buf,
@@ -131,7 +131,7 @@ bufferevent_readcb(evutil_socket_t fd, short event, void *arg)
short what = BEV_EVENT_READING;
ev_ssize_t howmuch = -1, readmax=-1;
- _bufferevent_incref_and_lock(bufev);
+ bufferevent_incref_and_lock_(bufev);
if (event == EV_TIMEOUT) {
what |= BEV_EVENT_TIMEOUT;
@@ -152,7 +152,7 @@ bufferevent_readcb(evutil_socket_t fd, short event, void *arg)
goto done;
}
}
- readmax = _bufferevent_get_read_max(bufev_p);
+ readmax = bufferevent_get_read_max_(bufev_p);
if (howmuch < 0 || howmuch > readmax) /* The use of -1 for "unlimited"
* uglifies this code. XXXX */
howmuch = readmax;
@@ -177,11 +177,11 @@ bufferevent_readcb(evutil_socket_t fd, short event, void *arg)
if (res <= 0)
goto error;
- _bufferevent_decrement_read_buckets(bufev_p, res);
+ bufferevent_decrement_read_buckets_(bufev_p, res);
/* Invoke the user callback - must always be called last */
if (evbuffer_get_length(input) >= bufev->wm_read.low)
- _bufferevent_run_readcb(bufev);
+ bufferevent_run_readcb_(bufev);
goto done;
@@ -190,10 +190,10 @@ bufferevent_readcb(evutil_socket_t fd, short event, void *arg)
error:
bufferevent_disable(bufev, EV_READ);
- _bufferevent_run_eventcb(bufev, what);
+ bufferevent_run_eventcb_(bufev, what);
done:
- _bufferevent_decref_and_unlock(bufev);
+ bufferevent_decref_and_unlock_(bufev);
}
static void
@@ -207,7 +207,7 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
int connected = 0;
ev_ssize_t atmost = -1;
- _bufferevent_incref_and_lock(bufev);
+ bufferevent_incref_and_lock_(bufev);
if (event == EV_TIMEOUT) {
what |= BEV_EVENT_TIMEOUT;
@@ -229,7 +229,7 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
if (c < 0) {
event_del(&bufev->ev_write);
event_del(&bufev->ev_read);
- _bufferevent_run_eventcb(bufev, BEV_EVENT_ERROR);
+ bufferevent_run_eventcb_(bufev, BEV_EVENT_ERROR);
goto done;
} else {
connected = 1;
@@ -237,12 +237,12 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
if (BEV_IS_ASYNC(bufev)) {
event_del(&bufev->ev_write);
bufferevent_async_set_connected(bufev);
- _bufferevent_run_eventcb(bufev,
+ bufferevent_run_eventcb_(bufev,
BEV_EVENT_CONNECTED);
goto done;
}
#endif
- _bufferevent_run_eventcb(bufev,
+ bufferevent_run_eventcb_(bufev,
BEV_EVENT_CONNECTED);
if (!(bufev->enabled & EV_WRITE) ||
bufev_p->write_suspended) {
@@ -252,7 +252,7 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
}
}
- atmost = _bufferevent_get_write_max(bufev_p);
+ atmost = bufferevent_get_write_max_(bufev_p);
if (bufev_p->write_suspended)
goto done;
@@ -276,7 +276,7 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
if (res <= 0)
goto error;
- _bufferevent_decrement_write_buckets(bufev_p, res);
+ bufferevent_decrement_write_buckets_(bufev_p, res);
}
if (evbuffer_get_length(bufev->output) == 0) {
@@ -289,7 +289,7 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
*/
if ((res || !connected) &&
evbuffer_get_length(bufev->output) <= bufev->wm_write.low) {
- _bufferevent_run_writecb(bufev);
+ bufferevent_run_writecb_(bufev);
}
goto done;
@@ -302,10 +302,10 @@ bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
error: