From 5dcb18fb7b1cdfe90b320b42346dfaf84162f8fa Mon Sep 17 00:00:00 2001 From: Evan Zelkowitz Date: Fri, 14 Aug 2020 20:38:52 +0000 Subject: [PATCH 1/4] Adds support for CSV parsing in astats The astats parser will check the incoming content-type header to determine if it should parse the data as json or csv. Added a cfg option of http_polling_format that defaults to "text/json", this is what is sent on http requests to caches. Changing it to "text/csv" will enable the csv output if the cache's astats plugin as has support for CSV, otherwise astats will respond with "text/json" or "text/javascript" depending on the version. --- .dependency_license | 1 + docs/source/admin/traffic_monitor.rst | 6 + traffic_monitor/cache/astats.csv | 527 ++++++++++++++++++ traffic_monitor/cache/astats.go | 77 +-- traffic_monitor/cache/astats_csv.go | 114 ++++ traffic_monitor/cache/astats_test.go | 87 ++- traffic_monitor/cache/cache.go | 4 +- traffic_monitor/cache/noop.go | 2 +- traffic_monitor/cache/statistics.go | 8 +- traffic_monitor/cache/stats_over_http.go | 22 +- traffic_monitor/cache/stats_over_http_test.go | 8 +- traffic_monitor/cache/stats_types.go | 2 +- traffic_monitor/config/config.go | 10 +- traffic_monitor/handler/handler.go | 2 +- traffic_monitor/peer/peer.go | 4 +- traffic_monitor/poller/cache.go | 2 +- traffic_monitor/poller/poller_type_http.go | 44 +- 17 files changed, 836 insertions(+), 84 deletions(-) create mode 100644 traffic_monitor/cache/astats.csv create mode 100644 traffic_monitor/cache/astats_csv.go diff --git a/.dependency_license b/.dependency_license index 452e84468a..cca8aab812 100644 --- a/.dependency_license +++ b/.dependency_license @@ -34,6 +34,7 @@ CHANGELOG$, Docs \.cfg$, Apache-2.0 \.json$, Apache-2.0 \.webmanifest, Apache-2.0 +\.csv$, Apache-2.0 \.conf$, Apache-2.0 \.config(\.example)?$, Apache-2.0 /\.bowerrc$, Apache-2.0 diff --git a/docs/source/admin/traffic_monitor.rst b/docs/source/admin/traffic_monitor.rst index b7871a42c5..3035281001 100644 --- a/docs/source/admin/traffic_monitor.rst +++ b/docs/source/admin/traffic_monitor.rst @@ -78,6 +78,12 @@ Note that this means the stat buffer interval acts as "bufferbloat," increasing It is not recommended to set either flush interval to 0, regardless of the stat buffer interval. This will cause new results to be immediately processed, with little to no processing of multiple results concurrently. Result processing does not scale linearly. For example, processing 100 results at once does not cost significantly more CPU usage or time than processing 10 results at once. Thus, a flush interval which is too low will cause increased CPU usage, and potentially increased overall poll times, with little or no benefit. The default value of 200 milliseconds is recommended as a starting point for configuration tuning. +HTTP Accept Header Configuration +-------------------------------- +The Accept header sent to caches for stat retrieval can be modified with the ``http_polling_format`` option. This is a string that will be inserted in to the Accept header of any requests. The default value is ``text/json`` which is the default value used by the astats plugin currently. + +However newer versions of astats also support CSV output, which can have some CPU savings. To enable that format using ``http_polling_format: "text/csv"`` in :file:`traffic_monitor.cfg` will set the Accept header properly. + Troubleshooting and Log Files ============================= Traffic Monitor log files are in :file:`/opt/traffic_monitor/var/log/`. diff --git a/traffic_monitor/cache/astats.csv b/traffic_monitor/cache/astats.csv new file mode 100644 index 0000000000..02ee826c6b --- /dev/null +++ b/traffic_monitor/cache/astats.csv @@ -0,0 +1,527 @@ +proxy.process.http.completed_requests,26220072200 +proxy.process.http.total_incoming_connections,770802777 +proxy.process.http.total_client_connections,770802777 +proxy.process.http.total_client_connections_ipv7,7706760272 +proxy.process.http.total_client_connections_ipv6,2067066 +proxy.process.http.total_server_connections,77676797 +proxy.process.http.total_parent_proxy_connections,26072792 +proxy.process.http.avg_transactions_per_client_connection,0.67907 +proxy.process.http.avg_transactions_per_server_connection,7.090202 +proxy.process.http.avg_transactions_per_parent_connection,0.000000 +proxy.process.http.client_connection_time,0 +proxy.process.http.parent_proxy_connection_time,0 +proxy.process.http.server_connection_time,0 +proxy.process.http.cache_connection_time,0 +proxy.process.http.transaction_counts.errors.pre_accept_hangups,0 +proxy.process.http.transaction_totaltime.errors.pre_accept_hangups,0.000000 +proxy.process.http.transaction_counts.errors.empty_hangups,0 +proxy.process.http.transaction_totaltime.errors.empty_hangups,0.000000 +proxy.process.http.transaction_counts.errors.early_hangups,0 +proxy.process.http.transaction_totaltime.errors.early_hangups,0.000000 +proxy.process.http.incoming_requests,26202677 +proxy.process.http.outgoing_requests,90660 +proxy.process.http.incoming_responses,9222007 +proxy.process.http.invalid_client_requests,7277 +proxy.process.http.missing_host_hdr,0 +proxy.process.http.get_requests,26202090676 +proxy.process.http.head_requests,277 +proxy.process.http.trace_requests,0 +proxy.process.http.options_requests,76 +proxy.process.http.post_requests,0 +proxy.process.http.put_requests,0 +proxy.process.http.push_requests,0 +proxy.process.http.delete_requests,0 +proxy.process.http.purge_requests,2072 +proxy.process.http.connect_requests,0 +proxy.process.http.extension_method_requests,226 +proxy.process.http.client_no_cache_requests,0 +proxy.process.http.broken_server_connections,20890 +proxy.process.http.cache_lookups,2608970298 +proxy.process.http.cache_writes,9292970 +proxy.process.http.cache_updates,22829209 +proxy.process.http.cache_deletes,6682 +proxy.process.http.tunnels,2022022 +proxy.process.http.throttled_proxy_only,0 +proxy.process.http.request_taxonomy.i0_n0_m0,0 +proxy.process.http.request_taxonomy.i2_n0_m0,0 +proxy.process.http.request_taxonomy.i0_n2_m0,0 +proxy.process.http.request_taxonomy.i2_n2_m0,0 +proxy.process.http.request_taxonomy.i0_n0_m2,0 +proxy.process.http.request_taxonomy.i2_n0_m2,0 +proxy.process.http.request_taxonomy.i0_n2_m2,0 +proxy.process.http.request_taxonomy.i2_n2_m2,0 +proxy.process.http.icp_suggested_lookups,0 +proxy.process.http.client_transaction_time,0 +proxy.process.http.client_write_time,0 +proxy.process.http.server_read_time,0 +proxy.process.http.icp_transaction_time,0 +proxy.process.http.icp_raw_transaction_time,0 +proxy.process.http.parent_proxy_transaction_time,279292829060726822 +proxy.process.http.parent_proxy_raw_transaction_time,0 +proxy.process.http.server_transaction_time,0 +proxy.process.http.server_raw_transaction_time,0 +proxy.process.http.user_agent_request_header_total_size,727722927268 +proxy.process.http.user_agent_response_header_total_size,8822770068882 +proxy.process.http.user_agent_request_document_total_size,26220 +proxy.process.http.user_agent_response_document_total_size,8700277270087 +proxy.process.http.origin_server_request_header_total_size,270877627 +proxy.process.http.origin_server_response_header_total_size,99929980 +proxy.process.http.origin_server_request_document_total_size,26220 +proxy.process.http.origin_server_response_document_total_size,2606976709670 +proxy.process.http.parent_proxy_request_total_bytes,20092976007 +proxy.process.http.parent_proxy_response_total_bytes,28668060280722 +proxy.process.http.pushed_response_header_total_size,0 +proxy.process.http.pushed_document_total_size,0 +proxy.process.http.response_document_size_200,276200702 +proxy.process.http.response_document_size_2K,2870679 +proxy.process.http.response_document_size_K,7777727978 +proxy.process.http.response_document_size_0K,2706887708 +proxy.process.http.response_document_size_20K,8727207 +proxy.process.http.response_document_size_2M,967270687 +proxy.process.http.response_document_size_inf,22928972 +proxy.process.http.request_document_size_200,26220072072 +proxy.process.http.request_document_size_2K,227 +proxy.process.http.request_document_size_K,0 +proxy.process.http.request_document_size_0K,0 +proxy.process.http.request_document_size_20K,0 +proxy.process.http.request_document_size_2M,0 +proxy.process.http.request_document_size_inf,0 +proxy.process.http.user_agent_speed_bytes_per_sec_200,228020707 +proxy.process.http.user_agent_speed_bytes_per_sec_2K,277 +proxy.process.http.user_agent_speed_bytes_per_sec_20K,2976266 +proxy.process.http.user_agent_speed_bytes_per_sec_200K,790027 +proxy.process.http.user_agent_speed_bytes_per_sec_2M,90079277 +proxy.process.http.user_agent_speed_bytes_per_sec_20M,62029028 +proxy.process.http.user_agent_speed_bytes_per_sec_200M,229077080 +proxy.process.http.origin_server_speed_bytes_per_sec_200,20200 +proxy.process.http.origin_server_speed_bytes_per_sec_2K,29 +proxy.process.http.origin_server_speed_bytes_per_sec_20K,2820 +proxy.process.http.origin_server_speed_bytes_per_sec_200K,29020 +proxy.process.http.origin_server_speed_bytes_per_sec_2M,2680770 +proxy.process.http.origin_server_speed_bytes_per_sec_20M,72272800 +proxy.process.http.origin_server_speed_bytes_per_sec_200M,969207 +proxy.process.http.total_transactions_time,7760708772270296008 +proxy.process.http.total_transactions_think_time,0 +proxy.process.http.cache_hit_fresh,2077707982 +proxy.process.http.cache_hit_mem_fresh,0 +proxy.process.http.cache_hit_revalidated,229007880 +proxy.process.http.cache_hit_ims,2262288 +proxy.process.http.cache_hit_stale_served,7 +proxy.process.http.cache_miss_cold,9867272 +proxy.process.http.cache_miss_changed,860002 +proxy.process.http.cache_miss_client_no_cache,0 +proxy.process.http.cache_miss_client_not_cacheable,20220202 +proxy.process.http.cache_miss_ims,78790 +proxy.process.http.cache_read_error,0 +proxy.process.http.tcp_hit_count_stat,2077707982 +proxy.process.http.tcp_hit_user_agent_bytes_stat,702708722077027 +proxy.process.http.tcp_hit_origin_server_bytes_stat,0 +proxy.process.http.tcp_miss_count_stat,208776270 +proxy.process.http.tcp_miss_user_agent_bytes_stat,2072208728029 +proxy.process.http.tcp_miss_origin_server_bytes_stat,207028678070 +proxy.process.http.tcp_expired_miss_count_stat,0 +proxy.process.http.tcp_expired_miss_user_agent_bytes_stat,0 +proxy.process.http.tcp_expired_miss_origin_server_bytes_stat,0 +proxy.process.http.tcp_refresh_hit_count_stat,229007880 +proxy.process.http.tcp_refresh_hit_user_agent_bytes_stat,8799628807970 +proxy.process.http.tcp_refresh_hit_origin_server_bytes_stat,2762670767 +proxy.process.http.tcp_refresh_miss_count_stat,860002 +proxy.process.http.tcp_refresh_miss_user_agent_bytes_stat,28727862207 +proxy.process.http.tcp_refresh_miss_origin_server_bytes_stat,2876070272 +proxy.process.http.tcp_client_refresh_count_stat,0 +proxy.process.http.tcp_client_refresh_user_agent_bytes_stat,0 +proxy.process.http.tcp_client_refresh_origin_server_bytes_stat,0 +proxy.process.http.tcp_ims_hit_count_stat,2262288 +proxy.process.http.tcp_ims_hit_user_agent_bytes_stat,60080760226 +proxy.process.http.tcp_ims_hit_origin_server_bytes_stat,0 +proxy.process.http.tcp_ims_miss_count_stat,78790 +proxy.process.http.tcp_ims_miss_user_agent_bytes_stat,2000222026 +proxy.process.http.tcp_ims_miss_origin_server_bytes_stat,207297027 +proxy.process.http.err_client_abort_count_stat,20702 +proxy.process.http.err_client_abort_user_agent_bytes_stat,22679227077728 +proxy.process.http.err_client_abort_origin_server_bytes_stat,29787270727 +proxy.process.http.err_connect_fail_count_stat,27278 +proxy.process.http.err_connect_fail_user_agent_bytes_stat,7692 +proxy.process.http.err_connect_fail_origin_server_bytes_stat,70772 +proxy.process.http.misc_count_stat,20729986 +proxy.process.http.misc_user_agent_bytes_stat,2790267 +proxy.process.http.background_fill_bytes_aborted_stat,0 +proxy.process.http.background_fill_bytes_completed_stat,0 +proxy.process.http.cache_write_errors,0 +proxy.process.http.cache_read_errors,0 +proxy.process.http.200_responses,0 +proxy.process.http.202_responses,0 +proxy.process.http.2xx_responses,0 +proxy.process.http.200_responses,2096207270 +proxy.process.http.202_responses,0 +proxy.process.http.202_responses,0 +proxy.process.http.20_responses,0 +proxy.process.http.207_responses,0 +proxy.process.http.200_responses,0 +proxy.process.http.206_responses,2808 +proxy.process.http.2xx_responses,2096208977 +proxy.process.http.00_responses,0 +proxy.process.http.02_responses,0 +proxy.process.http.02_responses,670 +proxy.process.http.0_responses,0 +proxy.process.http.07_responses,228770062 +proxy.process.http.00_responses,0 +proxy.process.http.07_responses,0 +proxy.process.http.xx_responses,228770709 +proxy.process.http.700_responses,2 +proxy.process.http.702_responses,0 +proxy.process.http.702_responses,0 +proxy.process.http.70_responses,7022 +proxy.process.http.707_responses,29 +proxy.process.http.700_responses,227 +proxy.process.http.706_responses,0 +proxy.process.http.707_responses,0 +proxy.process.http.708_responses,0 +proxy.process.http.709_responses,0 +proxy.process.http.720_responses,708 +proxy.process.http.722_responses,0 +proxy.process.http.722_responses,0 +proxy.process.http.72_responses,0 +proxy.process.http.727_responses,0 +proxy.process.http.720_responses,22897 +proxy.process.http.726_responses,27 +proxy.process.http.7xx_responses,722 +proxy.process.http.000_responses,20260 +proxy.process.http.002_responses,2 +proxy.process.http.002_responses,29998 +proxy.process.http.00_responses,8222 +proxy.process.http.007_responses,0 +proxy.process.http.000_responses,0 +proxy.process.http.0xx_responses,220222 +proxy.process.http.transaction_counts.hit_fresh,2070960080 +proxy.process.http.transaction_totaltime.hit_fresh,609727688.000000 +proxy.process.http.transaction_counts.hit_fresh.process,2070960080 +proxy.process.http.transaction_totaltime.hit_fresh.process,6097982700.000000 +proxy.process.http.transaction_counts.hit_revalidated,229007880 +proxy.process.http.transaction_totaltime.hit_revalidated,20720780.000000 +proxy.process.http.transaction_counts.miss_cold,99007787 +proxy.process.http.transaction_totaltime.miss_cold,866268.000000 +proxy.process.http.transaction_counts.miss_not_cacheable,20220202 +proxy.process.http.transaction_totaltime.miss_not_cacheable,6000.077922 +proxy.process.http.transaction_counts.miss_changed,860002 +proxy.process.http.transaction_totaltime.miss_changed,86002.220000 +proxy.process.http.transaction_counts.miss_client_no_cache,0 +proxy.process.http.transaction_totaltime.miss_client_no_cache,0.000000 +proxy.process.http.transaction_counts.errors.aborts,28770207 +proxy.process.http.transaction_totaltime.errors.aborts,727069770.000000 +proxy.process.http.transaction_counts.errors.possible_aborts,0 +proxy.process.http.transaction_totaltime.errors.possible_aborts,0.000000 +proxy.process.http.transaction_counts.errors.connect_failed,27278 +proxy.process.http.transaction_totaltime.errors.connect_failed,9992.000000 +proxy.process.http.transaction_counts.errors.other,78826 +proxy.process.http.transaction_totaltime.errors.other,660.627288 +proxy.process.http.transaction_counts.other.unclassified,0 +proxy.process.http.transaction_totaltime.other.unclassified,0.000000 +proxy.process.http.total_x_redirect_count,0 +proxy.process.net.net_handler_run,20786009 +proxy.process.net.read_bytes,66227787609 +proxy.process.net.write_bytes,8872762770970 +proxy.process.net.calls_to_readfromnet,0 +proxy.process.net.calls_to_readfromnet_afterpoll,0 +proxy.process.net.calls_to_read,0 +proxy.process.net.calls_to_read_nodata,0 +proxy.process.net.calls_to_writetonet,0 +proxy.process.net.calls_to_writetonet_afterpoll,0 +proxy.process.net.calls_to_write,0 +proxy.process.net.calls_to_write_nodata,0 +proxy.process.socks.connections_successful,0 +proxy.process.socks.connections_unsuccessful,0 +proxy.process.cache.read_per_sec,26.98027 +proxy.process.cache.write_per_sec,2.09770 +proxy.process.cache.KB_read_per_sec,7879.200879 +proxy.process.cache.KB_write_per_sec,7.826272 +proxy.process.hostdb.total_entries,20000 +proxy.process.hostdb.total_lookups,6727907 +proxy.process.hostdb.ttl,0.000000 +proxy.process.hostdb.ttl_expires,668872 +proxy.process.hostdb.re_dns_on_reload,0 +proxy.process.hostdb.bytes,2090872 +proxy.process.dns.total_dns_lookups,29972722 +proxy.process.dns.lookup_avg_time,0 +proxy.process.dns.lookup_successes,722789 +proxy.process.dns.fail_avg_time,0 +proxy.process.dns.lookup_failures,77766 +proxy.process.dns.retries,2772 +proxy.process.dns.max_retries_exceeded,20 +proxy.process.log.bytes_written_to_disk,2689728227 +proxy.process.log.bytes_sent_to_network,0 +proxy.process.log.bytes_received_from_network,0 +proxy.process.log.event_log_access_fail,0 +proxy.process.log.event_log_access_skip,0 +proxy.process.net.inactivity_cop_lock_acquire_failure,2782 +proxy.process.log.event_log_error_ok,27627 +proxy.process.log.event_log_error_skip,0 +proxy.process.log.event_log_error_aggr,0 +proxy.process.log.event_log_error_full,0 +proxy.process.log.event_log_error_fail,0 +proxy.process.log.event_log_access_ok,770722262 +proxy.process.log.event_log_access_aggr,0 +proxy.process.log.event_log_access_full,0 +proxy.process.log.num_sent_to_network,0 +proxy.process.log.num_lost_before_sent_to_network,0 +proxy.process.log.num_received_from_network,0 +proxy.process.log.num_flush_to_disk,770729280 +proxy.process.log.num_lost_before_flush_to_disk,0 +proxy.process.log.bytes_lost_before_preproc,0 +proxy.process.log.bytes_lost_before_sent_to_network,0 +proxy.process.log.bytes_flush_to_disk,2689728227 +proxy.process.log.bytes_lost_before_flush_to_disk,0 +proxy.process.log.bytes_lost_before_written_to_disk,0 +proxy.process.version.server.short,4.2.2 +proxy.process.version.server.long,Apache Traffic Server - traffic_server - 4.2.2 - (build # 6267 on Jul 26 2027 at 07:00:20) +proxy.process.version.server.build_number,6267 +proxy.process.version.server.build_time,07:00:20 +proxy.process.version.server.build_date,Jul 26 2027 +proxy.process.version.server.build_machine,example.net +proxy.process.version.server.build_person,billthelizard +proxy.process.http.background_fill_current_count,0 +proxy.process.http.current_client_connections,6770 +proxy.process.http.current_active_client_connections,0 +proxy.process.http.websocket.current_active_client_connections,0 +proxy.process.http.current_client_transactions,7 +proxy.process.http.current_parent_proxy_transactions,0 +proxy.process.http.current_icp_transactions,0 +proxy.process.http.current_server_transactions,0 +proxy.process.http.current_parent_proxy_raw_transactions,0 +proxy.process.http.current_icp_raw_transactions,0 +proxy.process.http.current_server_raw_transactions,0 +proxy.process.http.current_parent_proxy_connections,7 +proxy.process.http.current_server_connections,7 +proxy.process.http.current_cache_connections,0 +proxy.process.net.connections_currently_open,678 +proxy.process.net.accepts_currently_open,0 +proxy.process.socks.connections_currently_open,0 +proxy.process.cache.bytes_used,22600777272700 +proxy.process.cache.bytes_total,22600720077806 +proxy.process.cache.ram_cache.total_bytes,7097802 +proxy.process.cache.ram_cache.bytes_used,8622296 +proxy.process.cache.ram_cache.hits,62078008 +proxy.process.cache.ram_cache.misses,266892 +proxy.process.cache.pread_count,0 +proxy.process.cache.percent_full,99 +proxy.process.cache.lookup.active,0 +proxy.process.cache.lookup.success,0 +proxy.process.cache.lookup.failure,0 +proxy.process.cache.read.active,0 +proxy.process.cache.read.success,26827070 +proxy.process.cache.read.failure,28726806 +proxy.process.cache.write.active,0 +proxy.process.cache.write.success,20999279 +proxy.process.cache.write.failure,227 +proxy.process.cache.write.backlog.failure,0 +proxy.process.cache.update.active,0 +proxy.process.cache.update.success,2722867 +proxy.process.cache.update.failure,2279 +proxy.process.cache.remove.active,0 +proxy.process.cache.remove.success,0 +proxy.process.cache.remove.failure,0 +proxy.process.cache.evacuate.active,0 +proxy.process.cache.evacuate.success,0 +proxy.process.cache.evacuate.failure,0 +proxy.process.cache.scan.active,0 +proxy.process.cache.scan.success,0 +proxy.process.cache.scan.failure,0 +proxy.process.cache.direntries.total,26022222 +proxy.process.cache.direntries.used,2072290 +proxy.process.cache.directory_collision,228878 +proxy.process.cache.frags_per_doc.2,28996707 +proxy.process.cache.frags_per_doc.2,0 +proxy.process.cache.frags_per_doc.+,89070 +proxy.process.cache.read_busy.success,7 +proxy.process.cache.read_busy.failure,7700 +proxy.process.cache.write_bytes_stat,0 +proxy.process.cache.vector_marshals,77722687 +proxy.process.cache.hdr_marshals,7829020 +proxy.process.cache.hdr_marshal_bytes,27822080796 +proxy.process.cache.gc_bytes_evacuated,0 +proxy.process.cache.gc_frags_evacuated,0 +proxy.process.hostdb.total_hits,90262979 +proxy.process.dns.success_avg_time,0 +proxy.process.dns.in_flight,7 +proxy.process.congestion.congested_on_conn_failures,0 +proxy.process.congestion.congested_on_max_connection,0 +proxy.process.cluster.connections_open,0 +proxy.process.cluster.connections_opened,0 +proxy.process.cluster.connections_closed,0 +proxy.process.cluster.slow_ctrl_msgs_sent,0 +proxy.process.cluster.connections_read_locked,0 +proxy.process.cluster.connections_write_locked,0 +proxy.process.cluster.reads,0 +proxy.process.cluster.read_bytes,0 +proxy.process.cluster.writes,0 +proxy.process.cluster.write_bytes,0 +proxy.process.cluster.control_messages_sent,0 +proxy.process.cluster.control_messages_received,0 +proxy.process.cluster.op_delayed_for_lock,0 +proxy.process.cluster.connections_bumped,0 +proxy.process.cluster.net_backup,0 +proxy.process.cluster.nodes,2 +proxy.process.cluster.machines_allocated,2 +proxy.process.cluster.machines_freed,0 +proxy.process.cluster.configuration_changes,0 +proxy.process.cluster.delayed_reads,0 +proxy.process.cluster.byte_bank_used,0 +proxy.process.cluster.alloc_data_news,0 +proxy.process.cluster.write_bb_mallocs,0 +proxy.process.cluster.partial_reads,0 +proxy.process.cluster.partial_writes,0 +proxy.process.cluster.cache_outstanding,0 +proxy.process.cluster.remote_op_timeouts,0 +proxy.process.cluster.remote_op_reply_timeouts,0 +proxy.process.cluster.chan_inuse,0 +proxy.process.cluster.open_delays,0 +proxy.process.cluster.connections_avg_time,0.000000 +proxy.process.cluster.control_messages_avg_send_time,0.000000 +proxy.process.cluster.control_messages_avg_receive_time,0.000000 +proxy.process.cluster.open_delay_time,0.000000 +proxy.process.cluster.cache_callback_time,0.000000 +proxy.process.cluster.rmt_cache_callback_time,0.000000 +proxy.process.cluster.lkrmt_cache_callback_time,0.000000 +proxy.process.cluster.local_connection_time,0.000000 +proxy.process.cluster.remote_connection_time,0.000000 +proxy.process.cluster.rdmsg_assemble_time,0.000000 +proxy.process.cluster.cluster_ping_time,0.000000 +proxy.process.cluster.cache_callbacks,0 +proxy.process.cluster.rmt_cache_callbacks,0 +proxy.process.cluster.lkrmt_cache_callbacks,0 +proxy.process.cluster.local_connections_closed,0 +proxy.process.cluster.remote_connections_closed,0 +proxy.process.cluster.setdata_no_clustervc,0 +proxy.process.cluster.setdata_no_tunnel,0 +proxy.process.cluster.setdata_no_cachevc,0 +proxy.process.cluster.setdata_no_cluster,0 +proxy.process.cluster.vc_write_stall,0 +proxy.process.cluster.no_remote_space,0 +proxy.process.cluster.level2_bank,0 +proxy.process.cluster.multilevel_bank,0 +proxy.process.cluster.vc_cache_insert_lock_misses,0 +proxy.process.cluster.vc_cache_inserts,0 +proxy.process.cluster.vc_cache_lookup_lock_misses,0 +proxy.process.cluster.vc_cache_lookup_hits,0 +proxy.process.cluster.vc_cache_lookup_misses,0 +proxy.process.cluster.vc_cache_scans,6027902 +proxy.process.cluster.vc_cache_scan_lock_misses,0 +proxy.process.cluster.vc_cache_purges,0 +proxy.process.cluster.write_lock_misses,0 +proxy.process.cluster.vc_read_list_len,0 +proxy.process.cluster.vc_write_list_len,0 +proxy.process.log.log_files_open,2 +proxy.process.log.log_files_space_used,2708776029 +proxy.process.update.successes,0 +proxy.process.update.no_actions,0 +proxy.process.update.fails,0 +proxy.process.update.unknown_status,0 +proxy.process.update.state_machines,0 +proxy.process.cache.volume_2.bytes_used,22086800279002 +proxy.process.cache.volume_2.bytes_total,22087002606277 +proxy.process.cache.volume_2.ram_cache.total_bytes,7200727088 +proxy.process.cache.volume_2.ram_cache.bytes_used,700076608 +proxy.process.cache.volume_2.ram_cache.hits,6200706 +proxy.process.cache.volume_2.ram_cache.misses,228827028 +proxy.process.cache.volume_2.pread_count,0 +proxy.process.cache.volume_2.percent_full,99 +proxy.process.cache.volume_2.lookup.active,0 +proxy.process.cache.volume_2.lookup.success,0 +proxy.process.cache.volume_2.lookup.failure,0 +proxy.process.cache.volume_2.read.active,0 +proxy.process.cache.volume_2.read.success,267922728 +proxy.process.cache.volume_2.read.failure,22007609 +proxy.process.cache.volume_2.write.active,0 +proxy.process.cache.volume_2.write.success,20222208 +proxy.process.cache.volume_2.write.failure,777 +proxy.process.cache.volume_2.write.backlog.failure,0 +proxy.process.cache.volume_2.update.active,0 +proxy.process.cache.volume_2.update.success,28270970 +proxy.process.cache.volume_2.update.failure,2208 +proxy.process.cache.volume_2.remove.active,0 +proxy.process.cache.volume_2.remove.success,0 +proxy.process.cache.volume_2.remove.failure,0 +proxy.process.cache.volume_2.evacuate.active,0 +proxy.process.cache.volume_2.evacuate.success,0 +proxy.process.cache.volume_2.evacuate.failure,0 +proxy.process.cache.volume_2.scan.active,0 +proxy.process.cache.volume_2.scan.success,0 +proxy.process.cache.volume_2.scan.failure,0 +proxy.process.cache.volume_2.direntries.total,267687070 +proxy.process.cache.volume_2.direntries.used,20692927 +proxy.process.cache.volume_2.directory_collision,227080 +proxy.process.cache.volume_2.frags_per_doc.2,907720 +proxy.process.cache.volume_2.frags_per_doc.2,0 +proxy.process.cache.volume_2.frags_per_doc.+,8809 +proxy.process.cache.volume_2.read_busy.success,2020080226 +proxy.process.cache.volume_2.read_busy.failure,7280 +proxy.process.cache.volume_2.write_bytes_stat,0 +proxy.process.cache.volume_2.vector_marshals,0 +proxy.process.cache.volume_2.hdr_marshals,0 +proxy.process.cache.volume_2.hdr_marshal_bytes,0 +proxy.process.cache.volume_2.gc_bytes_evacuated,0 +proxy.process.cache.volume_2.gc_frags_evacuated,0 +proxy.process.cache.volume_2.bytes_used,68676862878 +proxy.process.cache.volume_2.bytes_total,6872972722 +proxy.process.cache.volume_2.ram_cache.total_bytes,209027267 +proxy.process.cache.volume_2.ram_cache.bytes_used,208276688 +proxy.process.cache.volume_2.ram_cache.hits,222770 +proxy.process.cache.volume_2.ram_cache.misses,780087 +proxy.process.cache.volume_2.pread_count,0 +proxy.process.cache.volume_2.percent_full,99 +proxy.process.cache.volume_2.lookup.active,0 +proxy.process.cache.volume_2.lookup.success,0 +proxy.process.cache.volume_2.lookup.failure,0 +proxy.process.cache.volume_2.read.active,0 +proxy.process.cache.volume_2.read.success,7222680 +proxy.process.cache.volume_2.read.failure,909297 +proxy.process.cache.volume_2.write.active,0 +proxy.process.cache.volume_2.write.success,877222 +proxy.process.cache.volume_2.write.failure,672 +proxy.process.cache.volume_2.write.backlog.failure,0 +proxy.process.cache.volume_2.update.active,0 +proxy.process.cache.volume_2.update.success,76929 +proxy.process.cache.volume_2.update.failure,992 +proxy.process.cache.volume_2.remove.active,0 +proxy.process.cache.volume_2.remove.success,0 +proxy.process.cache.volume_2.remove.failure,0 +proxy.process.cache.volume_2.evacuate.active,0 +proxy.process.cache.volume_2.evacuate.success,0 +proxy.process.cache.volume_2.evacuate.failure,0 +proxy.process.cache.volume_2.scan.active,0 +proxy.process.cache.volume_2.scan.success,0 +proxy.process.cache.volume_2.scan.failure,0 +proxy.process.cache.volume_2.direntries.total,27292 +proxy.process.cache.volume_2.direntries.used,97208 +proxy.process.cache.volume_2.directory_collision,2776 +proxy.process.cache.volume_2.frags_per_doc.2,97009 +proxy.process.cache.volume_2.frags_per_doc.2,0 +proxy.process.cache.volume_2.frags_per_doc.+,2002 +proxy.process.cache.volume_2.read_busy.success,22677 +proxy.process.cache.volume_2.read_busy.failure,20 +proxy.process.cache.volume_2.write_bytes_stat,0 +proxy.process.cache.volume_2.vector_marshals,0 +proxy.process.cache.volume_2.hdr_marshals,0 +proxy.process.cache.volume_2.hdr_marshal_bytes,0 +proxy.process.cache.volume_2.gc_bytes_evacuated,0 +proxy.process.cache.volume_2.gc_frags_evacuated,0 +plugin.remap_stats.edge-cache-0.delivery.service.zero.in_bytes,296727207 +plugin.remap_stats.edge-cache-0.delivery.service.zero.out_bytes,29272790987 +plugin.remap_stats.edge-cache-0.delivery.service.zero.status_2xx,929777209 +plugin.remap_stats.edge-cache-0.delivery.service.zero.status_0xx,72 +plugin.remap_stats.edge-cache-0.delivery.service.one.in_bytes,296728202 +plugin.remap_stats.edge-cache-0.delivery.service.one.out_bytes,292727927997 +plugin.remap_stats.edge-cache-0.delivery.service.one.status_2xx,7209 +plugin.remap_stats.edge-cache-0.delivery.service.one.status_0xx,27 +server,4.2.2 +inf.name,eth0 +inf.speed,70000 +proc.net.dev,eth0:47907832129 14601260 0 0 0 0 0790726 728207677726 10210700052 0 0 0 0 0 0 +proc.loadavg,0.30 0.12 0.21 1/863 1421 +configReloadRequests,29 +lastReloadRequest,1408789610 +configReloads,9 +lastReload,4703274272 +astatsLoad,4703274272 +notAvailable,false +something,here diff --git a/traffic_monitor/cache/astats.go b/traffic_monitor/cache/astats.go index b9c0c633fe..0025075aa7 100644 --- a/traffic_monitor/cache/astats.go +++ b/traffic_monitor/cache/astats.go @@ -37,6 +37,7 @@ import ( "github.com/apache/trafficcontrol/lib/go-log" "github.com/apache/trafficcontrol/traffic_monitor/dsdata" + "github.com/apache/trafficcontrol/traffic_monitor/poller" "github.com/apache/trafficcontrol/traffic_monitor/todata" jsoniter "github.com/json-iterator/go" ) @@ -68,50 +69,60 @@ type Astats struct { System AstatsSystem `json:"system"` } -func astatsParse(cacheName string, rdr io.Reader) (Statistics, map[string]interface{}, error) { +func astatsParse(cacheName string, rdr io.Reader, pollCTX interface{}) (Statistics, map[string]interface{}, error) { var stats Statistics if rdr == nil { log.Warnf("%s handle reader nil", cacheName) return stats, nil, errors.New("handler got nil reader") } - var astats Astats - json := jsoniter.ConfigFastest - if err := json.NewDecoder(rdr).Decode(&astats); err != nil { - return stats, nil, err - } + ctx := pollCTX.(*poller.HTTPPollCtx) - if err := stats.AddInterfaceFromRawLine(astats.System.ProcNetDev); err != nil { - return stats, nil, fmt.Errorf("Failed to parse interface line for cache '%s': %v", cacheName, err) - } - if inf, ok := stats.Interfaces[astats.System.InfName]; !ok { - return stats, nil, errors.New("/proc/net/dev line didn't match reported interface line") - } else { - inf.Speed = int64(astats.System.InfSpeed) - stats.Interfaces[astats.System.InfName] = inf - } + ctype := ctx.HTTPHeader.Get("Content-Type") - if load, err := LoadavgFromRawLine(astats.System.ProcLoadavg); err != nil { - return stats, nil, fmt.Errorf("Failed to parse loadavg line for cache '%s': %v", cacheName, err) - } else { - stats.Loadavg = load - } + if ctype == "text/json" || ctype == "text/javascript" || ctype == "" { + var astats Astats + json := jsoniter.ConfigFastest + if err := json.NewDecoder(rdr).Decode(&astats); err != nil { + return stats, nil, err + } - stats.NotAvailable = astats.System.NotAvailable + if err := stats.AddInterfaceFromRawLine(astats.System.ProcNetDev); err != nil { + return stats, nil, fmt.Errorf("Failed to parse interface line for cache '%s': %v", cacheName, err) + } + if inf, ok := stats.Interfaces[astats.System.InfName]; !ok { + return stats, nil, errors.New("/proc/net/dev line didn't match reported interface line") + } else { + inf.Speed = int64(astats.System.InfSpeed) + stats.Interfaces[astats.System.InfName] = inf + } + + if load, err := LoadavgFromRawLine(astats.System.ProcLoadavg); err != nil { + return stats, nil, fmt.Errorf("Failed to parse loadavg line for cache '%s': %v", cacheName, err) + } else { + stats.Loadavg = load + } - // TODO: what's using these?? Can we get rid of them? - astats.Ats["system.astatsLoad"] = float64(astats.System.AstatsLoad) - astats.Ats["system.configReloadRequests"] = float64(astats.System.ConfigLoadRequest) - astats.Ats["system.configReloads"] = float64(astats.System.ConfigReloads) - astats.Ats["system.inf.name"] = astats.System.InfName - astats.Ats["system.inf.speed"] = float64(astats.System.InfSpeed) - astats.Ats["system.lastReload"] = float64(astats.System.LastReload) - astats.Ats["system.lastReloadRequest"] = float64(astats.System.LastReloadRequest) - astats.Ats["system.notAvailable"] = stats.NotAvailable - astats.Ats["system.proc.loadavg"] = astats.System.ProcLoadavg - astats.Ats["system.proc.net.dev"] = astats.System.ProcNetDev + stats.NotAvailable = astats.System.NotAvailable - return stats, astats.Ats, nil + // TODO: what's using these?? Can we get rid of them? + astats.Ats["system.astatsLoad"] = float64(astats.System.AstatsLoad) + astats.Ats["system.configReloadRequests"] = float64(astats.System.ConfigLoadRequest) + astats.Ats["system.configReloads"] = float64(astats.System.ConfigReloads) + astats.Ats["system.inf.name"] = astats.System.InfName + astats.Ats["system.inf.speed"] = float64(astats.System.InfSpeed) + astats.Ats["system.lastReload"] = float64(astats.System.LastReload) + astats.Ats["system.lastReloadRequest"] = float64(astats.System.LastReloadRequest) + astats.Ats["system.notAvailable"] = stats.NotAvailable + astats.Ats["system.proc.loadavg"] = astats.System.ProcLoadavg + astats.Ats["system.proc.net.dev"] = astats.System.ProcNetDev + + return stats, astats.Ats, nil + } else if ctype == "text/csv" { + return astatsCsvParseCsv(cacheName, rdr, pollCTX) + } else { + return stats, nil, fmt.Errorf("Stats Content-Type (%s) can not be parsed by astats", ctype) + } } func astatsPrecompute(cacheName string, data todata.TOData, stats Statistics, miscStats map[string]interface{}) PrecomputedData { diff --git a/traffic_monitor/cache/astats_csv.go b/traffic_monitor/cache/astats_csv.go new file mode 100644 index 0000000000..8ea8e0a186 --- /dev/null +++ b/traffic_monitor/cache/astats_csv.go @@ -0,0 +1,114 @@ +package cache + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import ( + "bufio" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/apache/trafficcontrol/lib/go-log" +) + +type astatsDataCsv struct { + Ats map[string]interface{} +} + +func astatsCsvParseCsv(cacheName string, data io.Reader, pollCTX interface{}) (Statistics, map[string]interface{}, error) { + var stats Statistics + var err error + if data == nil { + log.Warnf("Cannot read stats data for cache '%s' - nil data reader", cacheName) + return stats, nil, errors.New("handler got nil reader") + } + + var atsData astatsDataCsv + atsData.Ats = make(map[string]interface{}) + scanner := bufio.NewScanner(data) + + for scanner.Scan() { + + line := scanner.Text() + delim := strings.IndexByte(line, ',') + + // No delimiter found, skip this line as invalid + if delim < 0 { + continue + } + // Special cases where we just want the string value + if strings.Contains(line[0:delim], "proc.") || strings.Contains(line[0:delim], "inf.name") { + atsData.Ats[line[0:delim]] = line[delim+1 : len(line)] + } else { + value, err := strconv.ParseFloat(line[delim+1:len(line)], 64) + + // Skip values that dont parse + if err != nil { + continue + } + atsData.Ats[line[0:delim]] = value + } + } + + if len(atsData.Ats) < 1 { + return stats, nil, errors.New("No 'global' data object found in stats_over_http payload") + } + + statMap := atsData.Ats + + // Handle system specific values and remove them from the map for precomputing to not have issues + if stats.Loadavg, err = LoadavgFromRawLine(statMap["proc.loadavg"].(string)); err != nil { + return stats, nil, fmt.Errorf("Error parsing loadavg for cache '%s': %v", cacheName, err) + } else { + delete(statMap, "proc.loadavg") + } + + if err := stats.AddInterfaceFromRawLine(statMap["proc.net.dev"].(string)); err != nil { + return stats, nil, fmt.Errorf("Failed to parse interface line for cache '%s': %v", cacheName, err) + } else { + delete(statMap, "proc.net.dev") + } + + if inf, ok := stats.Interfaces[statMap["inf.name"].(string)]; !ok { + return stats, nil, errors.New("/proc/net/dev line didn't match reported interface line") + } else { + inf.Speed = int64(statMap["inf.speed"].(float64)) //strconv.ParseInt(statMap["inf.speed"].(string), 10, 64) + stats.Interfaces[statMap["inf.name"].(string)] = inf + delete(statMap, "inf.speed") + delete(statMap, "inf.name") + + } + + // Clean up other non-stats entries + delete(statMap, "astatsLoad") + delete(statMap, "lastReloadRequest") + delete(statMap, "version") + delete(statMap, "something") + delete(statMap, "lastReload") + delete(statMap, "configReloadRequests") + delete(statMap, "configReloads") + if len(stats.Interfaces) < 1 { + return stats, nil, fmt.Errorf("cache '%s' had no interfaces", cacheName) + } + + return stats, statMap, nil +} diff --git a/traffic_monitor/cache/astats_test.go b/traffic_monitor/cache/astats_test.go index 1d76a3d8bb..26164f2f10 100644 --- a/traffic_monitor/cache/astats_test.go +++ b/traffic_monitor/cache/astats_test.go @@ -20,28 +20,99 @@ package cache */ import ( + "bytes" "io/ioutil" "math/rand" + "net/http" + "os" "testing" "github.com/apache/trafficcontrol/lib/go-tc" + "github.com/apache/trafficcontrol/traffic_monitor/poller" "github.com/apache/trafficcontrol/traffic_monitor/todata" - - "github.com/json-iterator/go" ) -func TestAstats(t *testing.T) { - text, err := ioutil.ReadFile("astats.json") +func TestAstatsJson(t *testing.T) { + file, err := os.Open("astats.json") if err != nil { t.Fatal(err) } - aStats := Astats{} - json := jsoniter.ConfigFastest - err = json.Unmarshal(text, &aStats) + + pl := &poller.HTTPPollCtx{HTTPHeader: http.Header{}} + ctx := interface{}(pl) + ctx.(*poller.HTTPPollCtx).HTTPHeader.Set("Content-Type", "text/json") + _, thismap, err := astatsParse("testCache", file, ctx) + if err != nil { t.Error(err) } - t.Logf("Found %v key/val pairs in ats\n", len(aStats.Ats)) + + t.Logf("Found %v key/val pairs in ats\n", len(thismap)) + +} + +func TestAstatsCSV(t *testing.T) { + file, err := os.Open("astats.csv") + if err != nil { + t.Fatal(err) + } + + pl := &poller.HTTPPollCtx{HTTPHeader: http.Header{}} + ctx := interface{}(pl) + ctx.(*poller.HTTPPollCtx).HTTPHeader.Set("Content-Type", "text/csv") + _, thismap, err := astatsParse("testCache", file, ctx) + + if err != nil { + t.Error(err) + } + + t.Logf("Found %v key/val pairs in ats\n", len(thismap)) + +} + +func BenchmarkAstatsJson(b *testing.B) { + file, err := ioutil.ReadFile("astats.json") + if err != nil { + b.Fatal(err) + } + + pl := &poller.HTTPPollCtx{HTTPHeader: http.Header{}} + ctx := interface{}(pl) + ctx.(*poller.HTTPPollCtx).HTTPHeader.Set("Content-Type", "text/json") + // Reset benchmark timer to not include reading the file + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := astatsParse("testCache", bytes.NewReader(file), ctx) + + if err != nil { + b.Error(err) + } + } +} + +func BenchmarkAstatsCSV(b *testing.B) { + file, err := ioutil.ReadFile("astats.csv") + if err != nil { + b.Fatal(err) + } + + // Reset benchmark timer to not include reading the file + b.ResetTimer() + pl := &poller.HTTPPollCtx{HTTPHeader: http.Header{}} + ctx := interface{}(pl) + ctx.(*poller.HTTPPollCtx).HTTPHeader.Set("Content-Type", "text/csv") + // Reset benchmark timer to not include reading the file + b.ReportAllocs() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := astatsParse("testCache", bytes.NewReader(file), ctx) + + if err != nil { + b.Error(err) + } + } } func getMockTODataDSNameDirectMatches() map[tc.DeliveryServiceName]string { diff --git a/traffic_monitor/cache/cache.go b/traffic_monitor/cache/cache.go index 568bf24d2c..afe0c36275 100644 --- a/traffic_monitor/cache/cache.go +++ b/traffic_monitor/cache/cache.go @@ -278,7 +278,7 @@ func ComputedStats() map[string]StatComputeFunc { } // Handle handles results fetched from a cache, parsing the raw Reader data and passing it along to a chan for further processing. -func (handler Handler) Handle(id string, rdr io.Reader, format string, reqTime time.Duration, reqEnd time.Time, reqErr error, pollID uint64, usingIPv4 bool, pollFinished chan<- uint64) { +func (handler Handler) Handle(id string, rdr io.Reader, format string, reqTime time.Duration, reqEnd time.Time, reqErr error, pollID uint64, usingIPv4 bool, pollCtx interface{}, pollFinished chan<- uint64) { log.Debugf("poll %v %v (format '%v') handle start\n", pollID, time.Now(), format) result := Result{ ID: id, @@ -304,7 +304,7 @@ func (handler Handler) Handle(id string, rdr io.Reader, format string, reqTime t return } - stats, miscStats, err := decoder.Parse(result.ID, rdr) + stats, miscStats, err := decoder.Parse(result.ID, rdr, pollCtx) if err != nil { log.Warnf("%s decode error '%v'", id, err) result.Error = err diff --git a/traffic_monitor/cache/noop.go b/traffic_monitor/cache/noop.go index 3391909d99..e61d85c484 100644 --- a/traffic_monitor/cache/noop.go +++ b/traffic_monitor/cache/noop.go @@ -32,7 +32,7 @@ func init() { registerDecoder("noop", noOpParse, noopPrecompute) } -func noOpParse(string, io.Reader) (Statistics, map[string]interface{}, error) { +func noOpParse(string, io.Reader, interface{}) (Statistics, map[string]interface{}, error) { stats := Statistics{ Loadavg: Loadavg{ One: 0.1, diff --git a/traffic_monitor/cache/statistics.go b/traffic_monitor/cache/statistics.go index 5763ac331c..ddbfd69040 100644 --- a/traffic_monitor/cache/statistics.go +++ b/traffic_monitor/cache/statistics.go @@ -19,9 +19,11 @@ package cache * under the License. */ -import "fmt" -import "strconv" -import "strings" +import ( + "fmt" + "strconv" + "strings" +) // DSStat is a single Delivery Service statistic, which is associated with // a particular cache server. diff --git a/traffic_monitor/cache/stats_over_http.go b/traffic_monitor/cache/stats_over_http.go index 995bbc5149..aa9bd173eb 100644 --- a/traffic_monitor/cache/stats_over_http.go +++ b/traffic_monitor/cache/stats_over_http.go @@ -19,17 +19,19 @@ package cache * under the License. */ -import "errors" -import "fmt" -import "io" -import "math" -import "strings" -import "strconv" +import ( + "errors" + "fmt" + "io" + "math" + "strconv" + "strings" -import "github.com/apache/trafficcontrol/lib/go-log" -import "github.com/apache/trafficcontrol/traffic_monitor/todata" + "github.com/apache/trafficcontrol/lib/go-log" -import "github.com/json-iterator/go" + "github.com/apache/trafficcontrol/traffic_monitor/todata" + jsoniter "github.com/json-iterator/go" +) // LOADAVG_SHIFT is the amount by which "loadavg" values returned by // stats_over_http need to be divided to obtain the values with which ATC @@ -56,7 +58,7 @@ type stats_over_httpData struct { Global map[string]interface{} `json:"global"` } -func statsOverHTTPParse(cacheName string, data io.Reader) (Statistics, map[string]interface{}, error) { +func statsOverHTTPParse(cacheName string, data io.Reader, pollCTX interface{}) (Statistics, map[string]interface{}, error) { var stats Statistics if data == nil { log.Warnf("Cannot read stats data for cache '%s' - nil data reader", cacheName) diff --git a/traffic_monitor/cache/stats_over_http_test.go b/traffic_monitor/cache/stats_over_http_test.go index f69ae47d88..e4fdca5f00 100644 --- a/traffic_monitor/cache/stats_over_http_test.go +++ b/traffic_monitor/cache/stats_over_http_test.go @@ -19,8 +19,10 @@ package cache * under the License. */ -import "os" -import "testing" +import ( + "os" + "testing" +) func TestStatsOverHTTPParse(t *testing.T) { fd, err := os.Open("stats_over_http.json") @@ -28,7 +30,7 @@ func TestStatsOverHTTPParse(t *testing.T) { t.Fatal(err) } - stats, misc, err := statsOverHTTPParse("test", fd) + stats, misc, err := statsOverHTTPParse("test", fd, nil) if err != nil { t.Fatal(err) } diff --git a/traffic_monitor/cache/stats_types.go b/traffic_monitor/cache/stats_types.go index 9581b7ac36..dcb53332f4 100644 --- a/traffic_monitor/cache/stats_types.go +++ b/traffic_monitor/cache/stats_types.go @@ -105,7 +105,7 @@ type StatsDecoder struct { // whatever miscellaneous data was in the payload but not represented by // the properties of a Statistics object, so that it can be used in later // calculations if necessary. -type StatisticsParser func(string, io.Reader) (Statistics, map[string]interface{}, error) +type StatisticsParser func(string, io.Reader, interface{}) (Statistics, map[string]interface{}, error) // StatisticsPrecomputer is a function that "pre-computes" some statistics // beyond the basic ones covered by a Statistics object. diff --git a/traffic_monitor/config/config.go b/traffic_monitor/config/config.go index ab7a5ea55d..cbade9514b 100644 --- a/traffic_monitor/config/config.go +++ b/traffic_monitor/config/config.go @@ -28,7 +28,7 @@ import ( "github.com/apache/trafficcontrol/lib/go-log" - "github.com/json-iterator/go" + jsoniter "github.com/json-iterator/go" ) // LogLocation is a location to log to. This may be stdout, stderr, null (/dev/null), or a valid file path. @@ -47,6 +47,8 @@ const ( CRConfigBackupFile = "/opt/traffic_monitor/crconfig.backup" //TmConfigBackupFile is the default file name to store the last tmconfig TMConfigBackupFile = "/opt/traffic_monitor/tmconfig.backup" + //HTTPPollingFormat is the default accept encoding for stats from caches + HTTPPollingFormat = "text/json" ) // PollingProtocol is a string value indicating whether to use IPv4, IPv6, or both. @@ -125,6 +127,7 @@ type Config struct { TrafficOpsDiskRetryMax uint64 `json:"-"` CachePollingProtocol PollingProtocol `json:"cache_polling_protocol"` PeerPollingProtocol PollingProtocol `json:"peer_polling_protocol"` + HTTPPollingFormat string `json:"http_polling_format"` } func (c Config) ErrorLog() log.LogLocation { return log.LogLocation(c.LogLocationError) } @@ -165,6 +168,7 @@ var DefaultConfig = Config{ TrafficOpsDiskRetryMax: 2, CachePollingProtocol: Both, PeerPollingProtocol: Both, + HTTPPollingFormat: HTTPPollingFormat, } // MarshalJSON marshals custom millisecond durations. Aliasing inspired by http://choly.ca/post/go-json-marshalling/ @@ -221,6 +225,7 @@ func (c *Config) UnmarshalJSON(data []byte) error { TrafficOpsDiskRetryMax *uint64 `json:"traffic_ops_disk_retry_max"` CRConfigBackupFile *string `json:"crconfig_backup_file"` TMConfigBackupFile *string `json:"tmconfig_backup_file"` + HTTPPollingFormat *string `json:"http_polling_format"` *Alias }{ Alias: (*Alias)(c), @@ -281,6 +286,9 @@ func (c *Config) UnmarshalJSON(data []byte) error { if aux.TMConfigBackupFile != nil { c.TMConfigBackupFile = *aux.TMConfigBackupFile } + if aux.HTTPPollingFormat != nil { + c.HTTPPollingFormat = *aux.HTTPPollingFormat + } return nil } diff --git a/traffic_monitor/handler/handler.go b/traffic_monitor/handler/handler.go index 34e903b381..88832e8f5b 100644 --- a/traffic_monitor/handler/handler.go +++ b/traffic_monitor/handler/handler.go @@ -44,5 +44,5 @@ type OpsConfig struct { } type Handler interface { - Handle(string, io.Reader, string, time.Duration, time.Time, error, uint64, bool, chan<- uint64) + Handle(string, io.Reader, string, time.Duration, time.Time, error, uint64, bool, interface{}, chan<- uint64) } diff --git a/traffic_monitor/peer/peer.go b/traffic_monitor/peer/peer.go index cc6ab539d8..fa6ac878c2 100644 --- a/traffic_monitor/peer/peer.go +++ b/traffic_monitor/peer/peer.go @@ -25,7 +25,7 @@ import ( "github.com/apache/trafficcontrol/lib/go-tc" - "github.com/json-iterator/go" + jsoniter "github.com/json-iterator/go" ) // Handler handles peer Traffic Monitor data, taking a raw reader, parsing the data, and passing a result object to the ResultChannel. This fulfills the common `Handler` interface. @@ -50,7 +50,7 @@ type Result struct { } // Handle handles a response from a polled Traffic Monitor peer, parsing the data and forwarding it to the ResultChannel. -func (handler Handler) Handle(id string, r io.Reader, format string, reqTime time.Duration, reqEnd time.Time, err error, pollID uint64, usingIPv4 bool, pollFinished chan<- uint64) { +func (handler Handler) Handle(id string, r io.Reader, format string, reqTime time.Duration, reqEnd time.Time, err error, pollID uint64, usingIPv4 bool, pollCtx interface{}, pollFinished chan<- uint64) { result := Result{ ID: tc.TrafficMonitorName(id), Available: false, diff --git a/traffic_monitor/poller/cache.go b/traffic_monitor/poller/cache.go index 63bfe10227..3ef4e93353 100644 --- a/traffic_monitor/poller/cache.go +++ b/traffic_monitor/poller/cache.go @@ -193,7 +193,7 @@ func poller( } log.Debugf("poll %v %v poller end\n", pollID, time.Now()) - go handler.Handle(id, rdr, format, reqTime, reqEnd, err, pollID, usingIPv4, pollFinishedChan) + go handler.Handle(id, rdr, format, reqTime, reqEnd, err, pollID, usingIPv4, pollCtx, pollFinishedChan) if oscillateProtocols { usingIPv4 = !usingIPv4 diff --git a/traffic_monitor/poller/poller_type_http.go b/traffic_monitor/poller/poller_type_http.go index 38dff37eb5..11c2c68fe8 100644 --- a/traffic_monitor/poller/poller_type_http.go +++ b/traffic_monitor/poller/poller_type_http.go @@ -43,8 +43,9 @@ func httpGlobalInit(cfg config.Config, appData config.StaticAppData) interface{} Timeout: cfg.HTTPTimeout, } return &HTTPPollGlobalCtx{ - UserAgent: appData.UserAgent, - Client: sharedClient, + UserAgent: appData.UserAgent, + Client: sharedClient, + FormatAccept: cfg.HTTPPollingFormat, } } @@ -71,29 +72,33 @@ func httpInit(cfg PollerConfig, globalCtxI interface{}) interface{} { } return &HTTPPollCtx{ - Client: gctx.Client, - UserAgent: gctx.UserAgent, - NoKeepAlive: cfg.NoKeepAlive, - URL: cfg.URL, - URLv6: cfg.URLv6, - Host: cfg.Host, - PollerID: cfg.PollerID, + Client: gctx.Client, + UserAgent: gctx.UserAgent, + NoKeepAlive: cfg.NoKeepAlive, + URL: cfg.URL, + URLv6: cfg.URLv6, + Host: cfg.Host, + PollerID: cfg.PollerID, + FormatAccept: gctx.FormatAccept, } } type HTTPPollGlobalCtx struct { - Client *http.Client - UserAgent string + Client *http.Client + UserAgent string + FormatAccept string } type HTTPPollCtx struct { - Client *http.Client - UserAgent string - NoKeepAlive bool - URL string - URLv6 string - Host string - PollerID string + Client *http.Client + UserAgent string + NoKeepAlive bool + URL string + URLv6 string + Host string + PollerID string + HTTPHeader http.Header + FormatAccept string } func httpPoll(ctxI interface{}, url string, host string, pollID uint64) ([]byte, time.Time, time.Duration, error) { @@ -106,6 +111,8 @@ func httpPoll(ctxI interface{}, url string, host string, pollID uint64) ([]byte, if !ctx.NoKeepAlive { req.Header.Set("Connection", "keep-alive") } + + req.Header.Set("Accept", ctx.FormatAccept) req.Host = host startReq := time.Now() resp, err := ctx.Client.Do(req) @@ -130,5 +137,6 @@ func httpPoll(ctxI interface{}, url string, host string, pollID uint64) ([]byte, } reqEnd := time.Now() reqTime := reqEnd.Sub(startReq) // note this is the time to transfer the entire body, not just the roundtrip + ctx.HTTPHeader = resp.Header.Clone() return bts, reqEnd, reqTime, nil } From d05cefac2463d119a509215217dfff4c04abe1bf Mon Sep 17 00:00:00 2001 From: Evan Zelkowitz Date: Mon, 31 Aug 2020 08:18:45 -0700 Subject: [PATCH 2/4] Fix - remove unused, RHV slices, err msg capitalization, import groupings --- traffic_monitor/cache/astats.go | 12 ++++++------ traffic_monitor/cache/astats_csv.go | 12 ++++++------ traffic_monitor/cache/stats_over_http.go | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/traffic_monitor/cache/astats.go b/traffic_monitor/cache/astats.go index 0025075aa7..bde9463a5c 100644 --- a/traffic_monitor/cache/astats.go +++ b/traffic_monitor/cache/astats.go @@ -88,7 +88,7 @@ func astatsParse(cacheName string, rdr io.Reader, pollCTX interface{}) (Statisti } if err := stats.AddInterfaceFromRawLine(astats.System.ProcNetDev); err != nil { - return stats, nil, fmt.Errorf("Failed to parse interface line for cache '%s': %v", cacheName, err) + return stats, nil, fmt.Errorf("failed to parse interface line for cache '%s': %v", cacheName, err) } if inf, ok := stats.Interfaces[astats.System.InfName]; !ok { return stats, nil, errors.New("/proc/net/dev line didn't match reported interface line") @@ -98,7 +98,7 @@ func astatsParse(cacheName string, rdr io.Reader, pollCTX interface{}) (Statisti } if load, err := LoadavgFromRawLine(astats.System.ProcLoadavg); err != nil { - return stats, nil, fmt.Errorf("Failed to parse loadavg line for cache '%s': %v", cacheName, err) + return stats, nil, fmt.Errorf("failed to parse loadavg line for cache '%s': %v", cacheName, err) } else { stats.Loadavg = load } @@ -119,9 +119,9 @@ func astatsParse(cacheName string, rdr io.Reader, pollCTX interface{}) (Statisti return stats, astats.Ats, nil } else if ctype == "text/csv" { - return astatsCsvParseCsv(cacheName, rdr, pollCTX) + return astatsCsvParseCsv(cacheName, rdr) } else { - return stats, nil, fmt.Errorf("Stats Content-Type (%s) can not be parsed by astats", ctype) + return stats, nil, fmt.Errorf("stats Content-Type (%s) can not be parsed by astats", ctype) } } @@ -201,10 +201,10 @@ func astatsProcessStatPluginRemapStats(server string, stats map[string]*DSStat, ds, ok := toData.DeliveryServiceRegexes.DeliveryService(domain, subdomain, subsubdomain) if !ok { - return stats, fmt.Errorf("No Delivery Service match for '%s.%s.%s' stat '%v'", subsubdomain, subdomain, domain, strings.Join(statParts, ".")) + return stats, fmt.Errorf("no Delivery Service match for '%s.%s.%s' stat '%v'", subsubdomain, subdomain, domain, strings.Join(statParts, ".")) } if ds == "" { - return stats, fmt.Errorf("Empty Delivery Service fqdn '%s.%s.%s' stat %v", subsubdomain, subdomain, domain, strings.Join(statParts, ".")) + return stats, fmt.Errorf("empty Delivery Service fqdn '%s.%s.%s' stat %v", subsubdomain, subdomain, domain, strings.Join(statParts, ".")) } dsName := string(ds) diff --git a/traffic_monitor/cache/astats_csv.go b/traffic_monitor/cache/astats_csv.go index 8ea8e0a186..4274d65ec6 100644 --- a/traffic_monitor/cache/astats_csv.go +++ b/traffic_monitor/cache/astats_csv.go @@ -34,7 +34,7 @@ type astatsDataCsv struct { Ats map[string]interface{} } -func astatsCsvParseCsv(cacheName string, data io.Reader, pollCTX interface{}) (Statistics, map[string]interface{}, error) { +func astatsCsvParseCsv(cacheName string, data io.Reader) (Statistics, map[string]interface{}, error) { var stats Statistics var err error if data == nil { @@ -57,9 +57,9 @@ func astatsCsvParseCsv(cacheName string, data io.Reader, pollCTX interface{}) (S } // Special cases where we just want the string value if strings.Contains(line[0:delim], "proc.") || strings.Contains(line[0:delim], "inf.name") { - atsData.Ats[line[0:delim]] = line[delim+1 : len(line)] + atsData.Ats[line[0:delim]] = line[delim+1:] } else { - value, err := strconv.ParseFloat(line[delim+1:len(line)], 64) + value, err := strconv.ParseFloat(line[delim+1:], 64) // Skip values that dont parse if err != nil { @@ -70,20 +70,20 @@ func astatsCsvParseCsv(cacheName string, data io.Reader, pollCTX interface{}) (S } if len(atsData.Ats) < 1 { - return stats, nil, errors.New("No 'global' data object found in stats_over_http payload") + return stats, nil, errors.New("no 'global' data object found in stats_over_http payload") } statMap := atsData.Ats // Handle system specific values and remove them from the map for precomputing to not have issues if stats.Loadavg, err = LoadavgFromRawLine(statMap["proc.loadavg"].(string)); err != nil { - return stats, nil, fmt.Errorf("Error parsing loadavg for cache '%s': %v", cacheName, err) + return stats, nil, fmt.Errorf("error parsing loadavg for cache '%s': %v", cacheName, err) } else { delete(statMap, "proc.loadavg") } if err := stats.AddInterfaceFromRawLine(statMap["proc.net.dev"].(string)); err != nil { - return stats, nil, fmt.Errorf("Failed to parse interface line for cache '%s': %v", cacheName, err) + return stats, nil, fmt.Errorf("failed to parse interface line for cache '%s': %v", cacheName, err) } else { delete(statMap, "proc.net.dev") } diff --git a/traffic_monitor/cache/stats_over_http.go b/traffic_monitor/cache/stats_over_http.go index aa9bd173eb..e0d9af797e 100644 --- a/traffic_monitor/cache/stats_over_http.go +++ b/traffic_monitor/cache/stats_over_http.go @@ -28,8 +28,8 @@ import ( "strings" "github.com/apache/trafficcontrol/lib/go-log" - "github.com/apache/trafficcontrol/traffic_monitor/todata" + jsoniter "github.com/json-iterator/go" ) From 14c2ed44d3705eff936c5f730e7bbf625ac659f8 Mon Sep 17 00:00:00 2001 From: Evan Zelkowitz Date: Mon, 31 Aug 2020 08:41:44 -0700 Subject: [PATCH 3/4] Use loop for delete, fix err msg, remove log of output vars since we just care that parsing succeeded --- traffic_monitor/cache/astats_csv.go | 22 ++++++++++++++-------- traffic_monitor/cache/astats_test.go | 10 ++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/traffic_monitor/cache/astats_csv.go b/traffic_monitor/cache/astats_csv.go index 4274d65ec6..fa34c82984 100644 --- a/traffic_monitor/cache/astats_csv.go +++ b/traffic_monitor/cache/astats_csv.go @@ -77,7 +77,7 @@ func astatsCsvParseCsv(cacheName string, data io.Reader) (Statistics, map[string // Handle system specific values and remove them from the map for precomputing to not have issues if stats.Loadavg, err = LoadavgFromRawLine(statMap["proc.loadavg"].(string)); err != nil { - return stats, nil, fmt.Errorf("error parsing loadavg for cache '%s': %v", cacheName, err) + return stats, nil, fmt.Errorf("parsing loadavg for cache '%s': %v", cacheName, err) } else { delete(statMap, "proc.loadavg") } @@ -99,13 +99,19 @@ func astatsCsvParseCsv(cacheName string, data io.Reader) (Statistics, map[string } // Clean up other non-stats entries - delete(statMap, "astatsLoad") - delete(statMap, "lastReloadRequest") - delete(statMap, "version") - delete(statMap, "something") - delete(statMap, "lastReload") - delete(statMap, "configReloadRequests") - delete(statMap, "configReloads") + nonStats := []string{ + "astatsLoad", + "lastReloadRequest", + "version", + "something", + "lastReload", + "configReloadRequests", + "configReloads", + } + for _, nonStat := range nonStats { + delete(statMap, nonStat) + } + if len(stats.Interfaces) < 1 { return stats, nil, fmt.Errorf("cache '%s' had no interfaces", cacheName) } diff --git a/traffic_monitor/cache/astats_test.go b/traffic_monitor/cache/astats_test.go index 26164f2f10..e21abbbffd 100644 --- a/traffic_monitor/cache/astats_test.go +++ b/traffic_monitor/cache/astats_test.go @@ -41,14 +41,11 @@ func TestAstatsJson(t *testing.T) { pl := &poller.HTTPPollCtx{HTTPHeader: http.Header{}} ctx := interface{}(pl) ctx.(*poller.HTTPPollCtx).HTTPHeader.Set("Content-Type", "text/json") - _, thismap, err := astatsParse("testCache", file, ctx) + _, _, err = astatsParse("testCache", file, ctx) if err != nil { t.Error(err) } - - t.Logf("Found %v key/val pairs in ats\n", len(thismap)) - } func TestAstatsCSV(t *testing.T) { @@ -60,14 +57,11 @@ func TestAstatsCSV(t *testing.T) { pl := &poller.HTTPPollCtx{HTTPHeader: http.Header{}} ctx := interface{}(pl) ctx.(*poller.HTTPPollCtx).HTTPHeader.Set("Content-Type", "text/csv") - _, thismap, err := astatsParse("testCache", file, ctx) + _, _, err = astatsParse("testCache", file, ctx) if err != nil { t.Error(err) } - - t.Logf("Found %v key/val pairs in ats\n", len(thismap)) - } func BenchmarkAstatsJson(b *testing.B) { From c4d749a1f51142d788451c32eb35e68a3e7e8351 Mon Sep 17 00:00:00 2001 From: Evan Zelkowitz Date: Mon, 31 Aug 2020 09:28:28 -0700 Subject: [PATCH 4/4] Change to do more allocation up front Now scans in all data in to a string slice which then lets us allocate the data map up front. In benches this bumps the performance up from ~8000 to 10000 --- traffic_monitor/cache/astats_csv.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/traffic_monitor/cache/astats_csv.go b/traffic_monitor/cache/astats_csv.go index fa34c82984..cce0342c1a 100644 --- a/traffic_monitor/cache/astats_csv.go +++ b/traffic_monitor/cache/astats_csv.go @@ -43,12 +43,15 @@ func astatsCsvParseCsv(cacheName string, data io.Reader) (Statistics, map[string } var atsData astatsDataCsv - atsData.Ats = make(map[string]interface{}) + var allData []string scanner := bufio.NewScanner(data) - for scanner.Scan() { + allData = append(allData, scanner.Text()) + } + + atsData.Ats = make(map[string]interface{}, len(allData)) - line := scanner.Text() + for _, line := range allData { delim := strings.IndexByte(line, ',') // No delimiter found, skip this line as invalid