@@ -785,3 +785,76 @@ function kill_port {
785
785
kill -9 $PID
786
786
fi
787
787
}
788
+
789
+ # Kills the process listening on a port if the name matches the first argument.
790
+ # usage:
791
+ # kill_listener_port program_name port
792
+ function kill_listener_port {
793
+ CMDLINE=" $1 "
794
+ PORT=" $2 "
795
+ kill -9 $( lsof -t -i TCP:${PORT} -s TCP:LISTEN -a -c " /^${CMDLINE} $/" ) || true
796
+ }
797
+
798
+ # Performs timed reads on the output from a command passed via $1. The stream
799
+ # will be interpreted as a chunked http encoding. Each chunk will be allowed
800
+ # at most threshold_sec ($2) seconds to be read or the function will fail. When
801
+ # the stream is fully read, the funcion will compare the total number of http
802
+ # chunks read with expect_chunk_count ($3) and fail on mismatch.
803
+ # Usage:
804
+ # check_flushing "curl -N --raw --silent --proxy $SECONDARY_HOSTNAME $URL" 5 1
805
+ # This will check if the curl command resulted in single chunk which was read
806
+ # within one second or less.
807
+ function check_flushing() {
808
+ local command=" $1 "
809
+ local threshold_sec=" $2 "
810
+ local expect_chunk_count=" $3 "
811
+ local output=" "
812
+ local start=$( date +%s%N)
813
+ local chunk_count=0
814
+
815
+ echo " Command: $command "
816
+
817
+ if [ " ${USE_VALGRIND:- } " = true ]; then
818
+ # We can't say much about correctness of timings under valgrind, so relax
819
+ # the test for that.
820
+ threshold_sec=$( echo " scale=2; $threshold_sec *10" | bc)
821
+ fi
822
+
823
+ while true ; do
824
+ start=$( date +%s%N)
825
+ # Read the http chunk size from the stream. This is also the read which
826
+ # checks timings.
827
+ check read -t $threshold_sec line
828
+ echo " Chunk number [$chunk_count ] has size: $line "
829
+ line=$( echo $line | tr -d ' \n' | tr -d ' \r' )
830
+ # If we read 0 that means we have finished reading the stream.
831
+ if [ $(( 16 #$line )) -eq " 0" ] ; then
832
+ check [ $expect_chunk_count -le $chunk_count ]
833
+ return
834
+ fi
835
+ let chunk_count=chunk_count+1
836
+ # read the actual data from the stream, using the amount indicated in
837
+ # the previous read. This read should be fast.
838
+ # Note that we need to clear IFS for read since otherwise it can get
839
+ # confused by whitespace-only chunks.
840
+ IFS= check read -N $(( 16 #$line )) line
841
+ echo " Chunk data: $line "
842
+ # Read the trailing \r\n - should be fast.
843
+ check read -N 2 line
844
+ done < <( $command )
845
+ check 0
846
+ }
847
+
848
+ # Given the output of a page with ?PageSpeedFilters=+debug, print the section of
849
+ # the page where it lists what filters are enabled.
850
+ function extract_filters_from_debug_html() {
851
+ local debug_output=" $1 "
852
+
853
+ # Pull out the non-blank lines between "Filters:" and Options:". First
854
+ # convert newlines to % so sed can operate on the whole file, then put them
855
+ # back again.
856
+ check_from -q " $debug_output " grep -q " ^Filters:$"
857
+ check_from -q " $debug_output " grep -q " ^Options:$"
858
+ echo " $debug_output " | tr ' \n' ' %' | sed ' s~.*%Filters:%~~' \
859
+ | sed " s~%Options:.*~~" | tr ' %' ' \n'
860
+ }
0 commit comments