From bc598bbdf8c1634578aa205b66cdf8b87c1b3acf Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 06:32:06 +0200 Subject: [PATCH 1/4] move HTTP to Communication --- examples/build-examples-log.txt | 6 +- .../streams-audiokit-multioutput-server.ino | 1 + .../player-url-i2s/player-url-i2s.ino | 1 + .../player-url_icy-audiokit.ino | 1 + .../player-url_icy-i2s/player-url_icy-i2s.ino | 1 + .../AudioSourceIcyUrl.h | 2 +- .../player-url_subclass-i2s.ino | 1 + .../streams-eth_url_mp3_helix-i2s.ino | 1 + .../streams-http_post/streams-http_post.ino | 1 + .../streams-url-file/streams-url-file.ino | 1 + .../streams-url-measuring.ino | 1 + .../streams-url_aac-audiokit.ino | 1 + .../streams-url_aac-i2s.ino | 1 + .../streams-url_flac-i2s.ino | 1 + .../streams-url_flac_foxen-i2s.ino | 1 + .../streams-url_mp3-analog.ino | 1 + .../streams-url_mp3-audiokit.ino | 1 + .../streams-url_mp3-metadata.ino | 1 + .../streams-url_mp3-metadata2.ino | 1 + .../streams-url_mp3-pwm.ino | 1 + .../streams-url_mp3_helix-i2s.ino | 1 + .../streams-url_mp3_helix-i2s_32bit.ino | 1 + .../streams-url_mp3_mad-i2s.ino | 1 + .../streams-url_post/streams-url_post.ino | 1 + .../streams-url_raw-i2s.ino | 1 + .../streams-url_raw-serial.ino | 1 + .../streams-url_vorbis_i2s.ino | 1 + .../player-sd-webserverex_mp3.ino | 2 +- .../streams-audiokit-webserver_aac.ino | 1 + .../streams-audiokit-webserver_mp3.ino | 1 + .../streams-audiokit-webserver_wav.ino | 1 + .../streams-effect-webserver_wav.ino | 1 + .../streams-flite-webserver_wav.ino | 1 + .../streams-generator-webserver_aac.ino | 1 + .../streams-generator-webserver_mp3.ino | 1 + .../streams-generator-webserver_ogg.ino | 1 + .../streams-generator-webserver_wav.ino | 1 + .../streams-generator-webserverex_wav.ino | 2 +- .../streams-generator-webserverex_wav1.ino | 2 +- .../streams-i2s-webserver_wav.ino | 1 + .../streams-sam-webserver_wav.ino | 1 + .../streams-tts-webserver_wav.ino | 1 + .../communication-ip-send.ino | 3 +- .../serial/mp3-custom/send-mp3/send-mp3.ino | 1 + .../serial/mp3-xon-xoff/send-mp3/send-mp3.ino | 1 + .../serial/mp3/send-mp3/send-mp3.ino | 1 + .../streams-sdfat_mp3-metadata.ino | 1 + .../streams-azure_tts-i2s.ino | 1 + .../streams-google-audiokit.ino | 1 + .../streams-url_wav-i2s.ino | 1 + .../streams-url_mp3-vs1053.ino | 1 + .../test-container-avi/test-container-avi.ino | 1 + .../test-codec-aac-fdk-dec.ino | 1 + .../test-memory-helix/test-memory-helix.ino | 1 + .../test-streaming-adapter.ino | 1 + .../tests/etc/test-ads1015/test-ads1015.ino | 39 --- examples/tests/performance/wifi/wifi.ino | 1 + .../tests/player/test-player/test-player.ino | 1 + src/AudioTools/AudioCodecs/AudioEncoded.h | 3 +- src/AudioTools/AudioCodecs/CodecCopy.h | 5 + src/AudioTools/AudioCodecs/CodecMTS.h | 2 + src/AudioTools/AudioCodecs/ContainerAVI.h | 2 + src/AudioTools/AudioCodecs/HeaderParserMP3.h | 4 +- src/AudioTools/AudioCodecs/M4ACommonDemuxer.h | 2 + src/AudioTools/AudioCodecs/MultiDecoder.h | 2 +- src/AudioTools/AudioLibs/AudioServerEx.h | 184 +------------ src/AudioTools/AudioLibs/Desktop/File.h | 5 + .../AudioLibs/Desktop/JupyterAudio.h | 4 + src/AudioTools/AudioLibs/HLSStream.h | 4 +- src/AudioTools/AudioLibs/HLSStreamESP32.h | 4 +- src/AudioTools/AudioLibs/SPDIFOutput.h | 2 + src/AudioTools/Communication/AudioHttp.h | 15 ++ src/AudioTools/Communication/AudioServerEx.h | 183 +++++++++++++ .../HTTP}/AbstractURLStream.h | 0 .../HTTP}/AudioClient.h | 0 .../Communication/HTTP/AudioEncodedServerT.h | 222 +++++++++++++++ src/AudioTools/Communication/HTTP/AudioHttp.h | 25 ++ .../Communication/HTTP/AudioServer.h | 4 + .../Communication/HTTP/AudioServerEthernet.h | 45 ++++ .../HTTP/AudioServerT.h} | 253 ++---------------- .../Communication/HTTP/AudioServerWiFi.h | 43 +++ .../HTTP}/HttpChunkReader.h | 0 .../HTTP}/HttpHeader.h | 0 .../HTTP}/HttpLineReader.h | 0 .../HTTP}/HttpRequest.h | 0 .../HTTP}/HttpTypes.h | 0 src/AudioTools/Communication/HTTP/ICYStream.h | 16 ++ .../HTTP}/ICYStreamT.h | 2 +- src/AudioTools/Communication/HTTP/README.md | 4 + .../HTTP}/URLStream.h | 53 +--- .../HTTP}/URLStreamBufferedT.h | 2 +- .../HTTP}/URLStreamESP32.h | 15 +- .../AudioHttp => Communication/HTTP}/Url.h | 0 .../Communication/HTTP/WiFiInclude.h | 20 ++ .../Concurrency/RP2040/BufferRP2040.h | 2 + .../Concurrency/RP2040/MutexRP2040.h | 2 + src/AudioTools/Concurrency/RTOS/BufferRTOS.h | 2 + src/AudioTools/Concurrency/RTOS/MutexRTOS.h | 2 + .../RTOS/SynchronizedNBufferRTOS.h | 5 + src/AudioTools/CoreAudio.h | 1 - src/AudioTools/CoreAudio/AudioHttp.h | 3 - .../CoreAudio/AudioHttp/AudioHttp.h | 13 - src/AudioTools/CoreAudio/AudioHttp/README.md | 2 - .../CoreAudio/AudioMetaData/MetaData.h | 5 +- .../CoreAudio/AudioMetaData/MetaDataICY.h | 2 +- .../CoreAudio/AudioPWM/PWMDriverAVR.h | 2 + src/AudioTools/CoreAudio/AudioPlayer.h | 1 - src/AudioTools/CoreAudio/AudioStreams.h | 5 + .../CoreAudio/AudioTimer/AudioTimerDesktop.h | 7 +- src/AudioTools/CoreAudio/AudioTypes.h | 5 +- src/AudioTools/Disk/AudioSourceURL.h | 2 +- src/AudioTools/PlatformConfig/avr.h | 5 +- src/AudioTools/PlatformConfig/giga.h | 1 - src/AudioTools/PlatformConfig/portenta.h | 1 - src/AudioTools/PlatformConfig/samd.h | 2 +- src/AudioTools/PlatformConfig/stm32.h | 7 +- src/AudioTools/PlatformConfig/unor4.h | 2 +- tests-cmake/codec/CMakeLists.txt | 2 +- tests-cmake/codec/aac-faad/aac-faad.cpp | 2 - .../codec/aac-fdk-encode/aac-fdk-encode.cpp | 1 - tests-cmake/codec/aac-fdk/aac-fdk.cpp | 2 - tests-cmake/codec/aac-helix/aac-helix.cpp | 2 - tests-cmake/codec/mp3-helix/mp3-helix.cpp | 2 - tests-cmake/codec/mp3-lame/mp3-lame.cpp | 2 - tests-cmake/codec/mp3-mad/mp3-mad.cpp | 2 - .../codec/mp3-metadata/mp3-metadata.cpp | 3 - tests-cmake/effects/effects.cpp | 2 - tests-cmake/url-test/url-test.cpp | 3 +- 128 files changed, 749 insertions(+), 585 deletions(-) delete mode 100644 examples/tests/etc/test-ads1015/test-ads1015.ino create mode 100644 src/AudioTools/Communication/AudioHttp.h create mode 100644 src/AudioTools/Communication/AudioServerEx.h rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/AbstractURLStream.h (100%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/AudioClient.h (100%) create mode 100644 src/AudioTools/Communication/HTTP/AudioEncodedServerT.h create mode 100644 src/AudioTools/Communication/HTTP/AudioHttp.h create mode 100644 src/AudioTools/Communication/HTTP/AudioServer.h create mode 100644 src/AudioTools/Communication/HTTP/AudioServerEthernet.h rename src/AudioTools/{CoreAudio/AudioHttp/AudioServer.h => Communication/HTTP/AudioServerT.h} (55%) create mode 100644 src/AudioTools/Communication/HTTP/AudioServerWiFi.h rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/HttpChunkReader.h (100%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/HttpHeader.h (100%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/HttpLineReader.h (100%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/HttpRequest.h (100%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/HttpTypes.h (100%) create mode 100644 src/AudioTools/Communication/HTTP/ICYStream.h rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/ICYStreamT.h (99%) create mode 100644 src/AudioTools/Communication/HTTP/README.md rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/URLStream.h (91%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/URLStreamBufferedT.h (99%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/URLStreamESP32.h (95%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/Url.h (100%) create mode 100644 src/AudioTools/Communication/HTTP/WiFiInclude.h delete mode 100644 src/AudioTools/CoreAudio/AudioHttp.h delete mode 100644 src/AudioTools/CoreAudio/AudioHttp/AudioHttp.h delete mode 100644 src/AudioTools/CoreAudio/AudioHttp/README.md diff --git a/examples/build-examples-log.txt b/examples/build-examples-log.txt index 8f87fcd7ae..59ec0a0a2e 100644 --- a/examples/build-examples-log.txt +++ b/examples/build-examples-log.txt @@ -58,6 +58,7 @@ ../examples/examples-audiokit/streams-audiokit-fft -> rc=0 ../examples/examples-audiokit/streams-audiokit-fft-led -> rc=0 ../examples/examples-audiokit/streams-audiokit-filter-audiokit -> rc=0 +../examples/examples-audiokit/streams-audiokit-goertzel -> rc=0 ../examples/examples-audiokit/streams-audiokit-multioutput -> rc=0 ../examples/examples-audiokit/streams-audiokit-multioutput-server -> rc=0 ../examples/examples-audiokit/streams-audiokit-ram-audiokit -> rc=0 @@ -183,8 +184,10 @@ ../examples/examples-communication/vban/streams-generator-vban -> rc=0 ../examples/examples-communication/vban/streams-vban-audiokit -> rc=0 ../examples/examples-communication/rtsp/communication-audiokit-rtsp -> rc=0 -../examples/examples-communication/rtsp/communication-codec-rtsp -> rc=0 +../examples/examples-communication/rtsp/communication-codec-rtsp -> rc=1 ../examples/examples-communication/rtsp/communication-generator-rtsp -> rc=0 +../examples/examples-communication/rtsp/communication-player_mp3-rtsp -> rc=0 +../examples/examples-communication/rtsp/communication-player_mp3-rtsp_adcpm -> rc=0 ../examples/examples-communication/rtsp/communication-rtsp-audiokit -> rc=0 ../examples/examples-communication/rtsp/communication-rtsp-i2s -> rc=0 ../examples/examples-communication/serial/mp3 -> rc=1 @@ -310,7 +313,6 @@ ../examples/tests/effects/pitch-shift-180 -> rc=0 ../examples/tests/effects/pitch-shift-simple -> rc=0 ../examples/tests/etc/callback-write -> rc=0 -../examples/tests/etc/test-ads1015 -> rc=0 ../examples/tests/etc/test-audiolibs -> rc=0 ../examples/tests/etc/test-mulit-compilation-units -> rc=0 ../examples/tests/etc/test-pins -> rc=0 diff --git a/examples/examples-audiokit/streams-audiokit-multioutput-server/streams-audiokit-multioutput-server.ino b/examples/examples-audiokit/streams-audiokit-multioutput-server/streams-audiokit-multioutput-server.ino index 653992abfc..95e0fa210e 100644 --- a/examples/examples-audiokit/streams-audiokit-multioutput-server/streams-audiokit-multioutput-server.ino +++ b/examples/examples-audiokit/streams-audiokit-multioutput-server/streams-audiokit-multioutput-server.ino @@ -11,6 +11,7 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" const int buffer_count = 10; const int buffer_size = 1024; diff --git a/examples/examples-communication/http-client/player-url-i2s/player-url-i2s.ino b/examples/examples-communication/http-client/player-url-i2s/player-url-i2s.ino index ee1f014ef9..d4dcb4267f 100644 --- a/examples/examples-communication/http-client/player-url-i2s/player-url-i2s.ino +++ b/examples/examples-communication/http-client/player-url-i2s/player-url-i2s.ino @@ -10,6 +10,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/Disk/AudioSourceURL.h" +#include "AudioTools/Communication/AudioHttp.h" const char *urls[] = { diff --git a/examples/examples-communication/http-client/player-url_icy-audiokit/player-url_icy-audiokit.ino b/examples/examples-communication/http-client/player-url_icy-audiokit/player-url_icy-audiokit.ino index 2f510092bf..7b6e6450ce 100644 --- a/examples/examples-communication/http-client/player-url_icy-audiokit/player-url_icy-audiokit.ino +++ b/examples/examples-communication/http-client/player-url_icy-audiokit/player-url_icy-audiokit.ino @@ -10,6 +10,7 @@ #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" #include "AudioTools/Disk/AudioSourceURL.h" +#include "AudioTools/Communication/AudioHttp.h" const char *urls[] = { "http://stream.srg-ssr.ch/m/rsj/mp3_128", diff --git a/examples/examples-communication/http-client/player-url_icy-i2s/player-url_icy-i2s.ino b/examples/examples-communication/http-client/player-url_icy-i2s/player-url_icy-i2s.ino index 53b193d7e8..82aeb05ba5 100644 --- a/examples/examples-communication/http-client/player-url_icy-i2s/player-url_icy-i2s.ino +++ b/examples/examples-communication/http-client/player-url_icy-i2s/player-url_icy-i2s.ino @@ -10,6 +10,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/Disk/AudioSourceURL.h" +#include "AudioTools/Communication/AudioHttp.h" const char *urls[] = { diff --git a/examples/examples-communication/http-client/player-url_subclass-i2s/AudioSourceIcyUrl.h b/examples/examples-communication/http-client/player-url_subclass-i2s/AudioSourceIcyUrl.h index acb5fffaf5..7b88d088d0 100644 --- a/examples/examples-communication/http-client/player-url_subclass-i2s/AudioSourceIcyUrl.h +++ b/examples/examples-communication/http-client/player-url_subclass-i2s/AudioSourceIcyUrl.h @@ -1,6 +1,6 @@ #pragma once #include "AudioTools.h" -#include "AudioTools/CoreAudio/AudioHttp/URLStream.h" +#include "AudioTools/Communication/HTTP/URLStream.h" #include "AudioTools/Disk/AudioSourceURL.h" namespace audio_tools { diff --git a/examples/examples-communication/http-client/player-url_subclass-i2s/player-url_subclass-i2s.ino b/examples/examples-communication/http-client/player-url_subclass-i2s/player-url_subclass-i2s.ino index 1c354c6074..a2b5bb3996 100644 --- a/examples/examples-communication/http-client/player-url_subclass-i2s/player-url_subclass-i2s.ino +++ b/examples/examples-communication/http-client/player-url_subclass-i2s/player-url_subclass-i2s.ino @@ -10,6 +10,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioSourceIcyUrl.h" +#include "AudioTools/Communication/AudioHttp.h" const char *urls[] = { diff --git a/examples/examples-communication/http-client/streams-eth_url_mp3_helix-i2s/streams-eth_url_mp3_helix-i2s.ino b/examples/examples-communication/http-client/streams-eth_url_mp3_helix-i2s/streams-eth_url_mp3_helix-i2s.ino index 68857587c0..46e9ff8f5a 100644 --- a/examples/examples-communication/http-client/streams-eth_url_mp3_helix-i2s/streams-eth_url_mp3_helix-i2s.ino +++ b/examples/examples-communication/http-client/streams-eth_url_mp3_helix-i2s/streams-eth_url_mp3_helix-i2s.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" #include #include diff --git a/examples/examples-communication/http-client/streams-http_post/streams-http_post.ino b/examples/examples-communication/http-client/streams-http_post/streams-http_post.ino index 54186a978b..11ec3a7c78 100644 --- a/examples/examples-communication/http-client/streams-http_post/streams-http_post.ino +++ b/examples/examples-communication/http-client/streams-http_post/streams-http_post.ino @@ -8,6 +8,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" const char *ssid = "your SSID"; const char *password = "your PASSWORD"; diff --git a/examples/examples-communication/http-client/streams-url-file/streams-url-file.ino b/examples/examples-communication/http-client/streams-url-file/streams-url-file.ino index b4b8a1d2d8..fa3b92c376 100644 --- a/examples/examples-communication/http-client/streams-url-file/streams-url-file.ino +++ b/examples/examples-communication/http-client/streams-url-file/streams-url-file.ino @@ -11,6 +11,7 @@ */ #include "SD.h" #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" #define PIN_AUDIO_KIT_SD_CARD_CS 13 #define PIN_AUDIO_KIT_SD_CARD_MISO 2 diff --git a/examples/examples-communication/http-client/streams-url-measuring/streams-url-measuring.ino b/examples/examples-communication/http-client/streams-url-measuring/streams-url-measuring.ino index 0e72aa05f3..bfc11eb00b 100644 --- a/examples/examples-communication/http-client/streams-url-measuring/streams-url-measuring.ino +++ b/examples/examples-communication/http-client/streams-url-measuring/streams-url-measuring.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); // or replace with ICYStream to get metadata MeasuringStream out(50, &Serial); // final output of decoded stream diff --git a/examples/examples-communication/http-client/streams-url_aac-audiokit/streams-url_aac-audiokit.ino b/examples/examples-communication/http-client/streams-url_aac-audiokit/streams-url_aac-audiokit.ino index 3ba8b078e4..942814f38d 100644 --- a/examples/examples-communication/http-client/streams-url_aac-audiokit/streams-url_aac-audiokit.ino +++ b/examples/examples-communication/http-client/streams-url_aac-audiokit/streams-url_aac-audiokit.ino @@ -13,6 +13,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecAACHelix.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); // or replace with ICYStream to get metadata diff --git a/examples/examples-communication/http-client/streams-url_aac-i2s/streams-url_aac-i2s.ino b/examples/examples-communication/http-client/streams-url_aac-i2s/streams-url_aac-i2s.ino index 2d409b9ca7..e3118b234f 100644 --- a/examples/examples-communication/http-client/streams-url_aac-i2s/streams-url_aac-i2s.ino +++ b/examples/examples-communication/http-client/streams-url_aac-i2s/streams-url_aac-i2s.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecAACHelix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_flac-i2s/streams-url_flac-i2s.ino b/examples/examples-communication/http-client/streams-url_flac-i2s/streams-url_flac-i2s.ino index cd153ab24e..6e026661ce 100644 --- a/examples/examples-communication/http-client/streams-url_flac-i2s/streams-url_flac-i2s.ino +++ b/examples/examples-communication/http-client/streams-url_flac-i2s/streams-url_flac-i2s.ino @@ -10,6 +10,7 @@ */ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecFLAC.h" +#include "AudioTools/Communication/AudioHttp.h" const char* ssid = "ssid"; const char* pwd = "password"; diff --git a/examples/examples-communication/http-client/streams-url_flac_foxen-i2s/streams-url_flac_foxen-i2s.ino b/examples/examples-communication/http-client/streams-url_flac_foxen-i2s/streams-url_flac_foxen-i2s.ino index 0bb8b9faa7..e7e7f9a726 100644 --- a/examples/examples-communication/http-client/streams-url_flac_foxen-i2s/streams-url_flac_foxen-i2s.ino +++ b/examples/examples-communication/http-client/streams-url_flac_foxen-i2s/streams-url_flac_foxen-i2s.ino @@ -14,6 +14,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecFLACFoxen.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" const char* ssid = "ssid"; const char* pwd = "password"; diff --git a/examples/examples-communication/http-client/streams-url_mp3-analog/streams-url_mp3-analog.ino b/examples/examples-communication/http-client/streams-url_mp3-analog/streams-url_mp3-analog.ino index bad4e6ffef..631734e146 100644 --- a/examples/examples-communication/http-client/streams-url_mp3-analog/streams-url_mp3-analog.ino +++ b/examples/examples-communication/http-client/streams-url_mp3-analog/streams-url_mp3-analog.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_mp3-audiokit/streams-url_mp3-audiokit.ino b/examples/examples-communication/http-client/streams-url_mp3-audiokit/streams-url_mp3-audiokit.ino index c3a80676f2..69a5d59a25 100644 --- a/examples/examples-communication/http-client/streams-url_mp3-audiokit/streams-url_mp3-audiokit.ino +++ b/examples/examples-communication/http-client/streams-url_mp3-audiokit/streams-url_mp3-audiokit.ino @@ -13,6 +13,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); // or replace with ICYStream to get metadata diff --git a/examples/examples-communication/http-client/streams-url_mp3-metadata/streams-url_mp3-metadata.ino b/examples/examples-communication/http-client/streams-url_mp3-metadata/streams-url_mp3-metadata.ino index 7d6e67c08f..c53d0e8235 100644 --- a/examples/examples-communication/http-client/streams-url_mp3-metadata/streams-url_mp3-metadata.ino +++ b/examples/examples-communication/http-client/streams-url_mp3-metadata/streams-url_mp3-metadata.ino @@ -11,6 +11,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" ICYStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_mp3-metadata2/streams-url_mp3-metadata2.ino b/examples/examples-communication/http-client/streams-url_mp3-metadata2/streams-url_mp3-metadata2.ino index 399da89a92..583ea5c0ee 100644 --- a/examples/examples-communication/http-client/streams-url_mp3-metadata2/streams-url_mp3-metadata2.ino +++ b/examples/examples-communication/http-client/streams-url_mp3-metadata2/streams-url_mp3-metadata2.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" // -> EncodedAudioStream -> I2SStream // URLStream -> MultiOutput -| diff --git a/examples/examples-communication/http-client/streams-url_mp3-pwm/streams-url_mp3-pwm.ino b/examples/examples-communication/http-client/streams-url_mp3-pwm/streams-url_mp3-pwm.ino index 15b96d25b5..71989a9496 100644 --- a/examples/examples-communication/http-client/streams-url_mp3-pwm/streams-url_mp3-pwm.ino +++ b/examples/examples-communication/http-client/streams-url_mp3-pwm/streams-url_mp3-pwm.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_mp3_helix-i2s/streams-url_mp3_helix-i2s.ino b/examples/examples-communication/http-client/streams-url_mp3_helix-i2s/streams-url_mp3_helix-i2s.ino index 18cfef0a60..f2e82420fa 100644 --- a/examples/examples-communication/http-client/streams-url_mp3_helix-i2s/streams-url_mp3_helix-i2s.ino +++ b/examples/examples-communication/http-client/streams-url_mp3_helix-i2s/streams-url_mp3_helix-i2s.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_mp3_helix-i2s_32bit/streams-url_mp3_helix-i2s_32bit.ino b/examples/examples-communication/http-client/streams-url_mp3_helix-i2s_32bit/streams-url_mp3_helix-i2s_32bit.ino index 705d8bb202..d0b71811aa 100644 --- a/examples/examples-communication/http-client/streams-url_mp3_helix-i2s_32bit/streams-url_mp3_helix-i2s_32bit.ino +++ b/examples/examples-communication/http-client/streams-url_mp3_helix-i2s_32bit/streams-url_mp3_helix-i2s_32bit.ino @@ -13,6 +13,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_mp3_mad-i2s/streams-url_mp3_mad-i2s.ino b/examples/examples-communication/http-client/streams-url_mp3_mad-i2s/streams-url_mp3_mad-i2s.ino index bf3d81445d..b4c64d764b 100644 --- a/examples/examples-communication/http-client/streams-url_mp3_mad-i2s/streams-url_mp3_mad-i2s.ino +++ b/examples/examples-communication/http-client/streams-url_mp3_mad-i2s/streams-url_mp3_mad-i2s.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3MAD.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_post/streams-url_post.ino b/examples/examples-communication/http-client/streams-url_post/streams-url_post.ino index 721f6a506e..07d9e2d5f3 100644 --- a/examples/examples-communication/http-client/streams-url_post/streams-url_post.ino +++ b/examples/examples-communication/http-client/streams-url_post/streams-url_post.ino @@ -7,6 +7,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" AudioInfo info(44100, 2, 16); SineWaveGenerator sineWave(32000); diff --git a/examples/examples-communication/http-client/streams-url_raw-i2s/streams-url_raw-i2s.ino b/examples/examples-communication/http-client/streams-url_raw-i2s/streams-url_raw-i2s.ino index b000e4ea17..ed0bfcc1a1 100644 --- a/examples/examples-communication/http-client/streams-url_raw-i2s/streams-url_raw-i2s.ino +++ b/examples/examples-communication/http-client/streams-url_raw-i2s/streams-url_raw-i2s.ino @@ -8,6 +8,7 @@ #include "WiFi.h" #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream music; // Music Stream I2SStream i2s;// I2S as Stream diff --git a/examples/examples-communication/http-client/streams-url_raw-serial/streams-url_raw-serial.ino b/examples/examples-communication/http-client/streams-url_raw-serial/streams-url_raw-serial.ino index bab43889fe..df53e4ddf1 100644 --- a/examples/examples-communication/http-client/streams-url_raw-serial/streams-url_raw-serial.ino +++ b/examples/examples-communication/http-client/streams-url_raw-serial/streams-url_raw-serial.ino @@ -8,6 +8,7 @@ #include "WiFi.h" #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" diff --git a/examples/examples-communication/http-client/streams-url_vorbis_i2s/streams-url_vorbis_i2s.ino b/examples/examples-communication/http-client/streams-url_vorbis_i2s/streams-url_vorbis_i2s.ino index 5e25d3d127..ab835255cb 100644 --- a/examples/examples-communication/http-client/streams-url_vorbis_i2s/streams-url_vorbis_i2s.ino +++ b/examples/examples-communication/http-client/streams-url_vorbis_i2s/streams-url_vorbis_i2s.ino @@ -10,6 +10,7 @@ */ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecVorbis.h" +#include "AudioTools/Communication/AudioHttp.h" const char* ssid = "ssid"; const char* pwd = "password"; diff --git a/examples/examples-communication/http-server/player-sd-webserverex_mp3/player-sd-webserverex_mp3.ino b/examples/examples-communication/http-server/player-sd-webserverex_mp3/player-sd-webserverex_mp3.ino index eebf181aca..662397f256 100644 --- a/examples/examples-communication/http-server/player-sd-webserverex_mp3/player-sd-webserverex_mp3.ino +++ b/examples/examples-communication/http-server/player-sd-webserverex_mp3/player-sd-webserverex_mp3.ino @@ -9,7 +9,7 @@ #include "AudioTools.h" #include "AudioTools/Disk/AudioSourceSD.h" -#include "AudioTools/AudioLibs/AudioServerEx.h" +#include "AudioTools/Communication/AudioServerEx.h" #include "AudioTools/AudioCodecs/CodecCopy.h" #define PIN_AUDIO_KIT_SD_CARD_CS 13 diff --git a/examples/examples-communication/http-server/streams-audiokit-webserver_aac/streams-audiokit-webserver_aac.ino b/examples/examples-communication/http-server/streams-audiokit-webserver_aac/streams-audiokit-webserver_aac.ino index f5cf208039..2163e3eb4a 100644 --- a/examples/examples-communication/http-server/streams-audiokit-webserver_aac/streams-audiokit-webserver_aac.ino +++ b/examples/examples-communication/http-server/streams-audiokit-webserver_aac/streams-audiokit-webserver_aac.ino @@ -10,6 +10,7 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" #include "AudioTools/AudioCodecs/CodecAACFDK.h" +#include "AudioTools/Communication/AudioHttp.h" // WIFI const char *ssid = "ssid"; diff --git a/examples/examples-communication/http-server/streams-audiokit-webserver_mp3/streams-audiokit-webserver_mp3.ino b/examples/examples-communication/http-server/streams-audiokit-webserver_mp3/streams-audiokit-webserver_mp3.ino index 24025fb94a..a14092f9cd 100644 --- a/examples/examples-communication/http-server/streams-audiokit-webserver_mp3/streams-audiokit-webserver_mp3.ino +++ b/examples/examples-communication/http-server/streams-audiokit-webserver_mp3/streams-audiokit-webserver_mp3.ino @@ -10,6 +10,7 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" #include "AudioTools/AudioCodecs/CodecMP3LAME.h" +#include "AudioTools/Communication/AudioHttp.h" // Set static IP address and stuff (optional) IPAddress IPA_address(192, 168, 0, 222); diff --git a/examples/examples-communication/http-server/streams-audiokit-webserver_wav/streams-audiokit-webserver_wav.ino b/examples/examples-communication/http-server/streams-audiokit-webserver_wav/streams-audiokit-webserver_wav.ino index 0bc0a7a35d..7ab77b37bc 100644 --- a/examples/examples-communication/http-server/streams-audiokit-webserver_wav/streams-audiokit-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-audiokit-webserver_wav/streams-audiokit-webserver_wav.ino @@ -9,6 +9,7 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" AudioEncoderServer server(new WAVEncoder(),"ssid","password"); AudioBoardStream kit(AudioKitEs8388V1); diff --git a/examples/examples-communication/http-server/streams-effect-webserver_wav/streams-effect-webserver_wav.ino b/examples/examples-communication/http-server/streams-effect-webserver_wav/streams-effect-webserver_wav.ino index 98d0863888..567c1d9cd4 100644 --- a/examples/examples-communication/http-server/streams-effect-webserver_wav/streams-effect-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-effect-webserver_wav/streams-effect-webserver_wav.ino @@ -9,6 +9,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" // WIFI const char *ssid = "ssid"; diff --git a/examples/examples-communication/http-server/streams-flite-webserver_wav/streams-flite-webserver_wav.ino b/examples/examples-communication/http-server/streams-flite-webserver_wav/streams-flite-webserver_wav.ino index f4f03cd596..d589e760b8 100644 --- a/examples/examples-communication/http-server/streams-flite-webserver_wav/streams-flite-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-flite-webserver_wav/streams-flite-webserver_wav.ino @@ -8,6 +8,7 @@ #include "flite_arduino.h" #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" AudioWAVServer server("ssid","password"); diff --git a/examples/examples-communication/http-server/streams-generator-webserver_aac/streams-generator-webserver_aac.ino b/examples/examples-communication/http-server/streams-generator-webserver_aac/streams-generator-webserver_aac.ino index 730acf2324..adcb117fc7 100644 --- a/examples/examples-communication/http-server/streams-generator-webserver_aac/streams-generator-webserver_aac.ino +++ b/examples/examples-communication/http-server/streams-generator-webserver_aac/streams-generator-webserver_aac.ino @@ -9,6 +9,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecAACFDK.h" +#include "AudioTools/Communication/AudioHttp.h" // WIFI diff --git a/examples/examples-communication/http-server/streams-generator-webserver_mp3/streams-generator-webserver_mp3.ino b/examples/examples-communication/http-server/streams-generator-webserver_mp3/streams-generator-webserver_mp3.ino index 6ec100a8d6..03620425a2 100644 --- a/examples/examples-communication/http-server/streams-generator-webserver_mp3/streams-generator-webserver_mp3.ino +++ b/examples/examples-communication/http-server/streams-generator-webserver_mp3/streams-generator-webserver_mp3.ino @@ -11,6 +11,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3LAME.h" +#include "AudioTools/Communication/AudioHttp.h" // WIFI const char *ssid = "ssid"; diff --git a/examples/examples-communication/http-server/streams-generator-webserver_ogg/streams-generator-webserver_ogg.ino b/examples/examples-communication/http-server/streams-generator-webserver_ogg/streams-generator-webserver_ogg.ino index 1ef880b9bf..c7e1adcc66 100644 --- a/examples/examples-communication/http-server/streams-generator-webserver_ogg/streams-generator-webserver_ogg.ino +++ b/examples/examples-communication/http-server/streams-generator-webserver_ogg/streams-generator-webserver_ogg.ino @@ -16,6 +16,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecOpusOgg.h" +#include "AudioTools/Communication/AudioHttp.h" // WIFI const char *ssid = "ssid"; diff --git a/examples/examples-communication/http-server/streams-generator-webserver_wav/streams-generator-webserver_wav.ino b/examples/examples-communication/http-server/streams-generator-webserver_wav/streams-generator-webserver_wav.ino index 8a921dd8e8..fc5a24679d 100644 --- a/examples/examples-communication/http-server/streams-generator-webserver_wav/streams-generator-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-generator-webserver_wav/streams-generator-webserver_wav.ino @@ -9,6 +9,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" // WIFI const char *ssid = "ssid"; diff --git a/examples/examples-communication/http-server/streams-generator-webserverex_wav/streams-generator-webserverex_wav.ino b/examples/examples-communication/http-server/streams-generator-webserverex_wav/streams-generator-webserverex_wav.ino index a2dafe1c4f..dc13cc57d8 100644 --- a/examples/examples-communication/http-server/streams-generator-webserverex_wav/streams-generator-webserverex_wav.ino +++ b/examples/examples-communication/http-server/streams-generator-webserverex_wav/streams-generator-webserverex_wav.ino @@ -9,7 +9,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/AudioServerEx.h" +#include "AudioTools/Communication/AudioServerEx.h" // WIFI const char *ssid = "SSID"; diff --git a/examples/examples-communication/http-server/streams-generator-webserverex_wav1/streams-generator-webserverex_wav1.ino b/examples/examples-communication/http-server/streams-generator-webserverex_wav1/streams-generator-webserverex_wav1.ino index a9c374db5c..3a0b289492 100644 --- a/examples/examples-communication/http-server/streams-generator-webserverex_wav1/streams-generator-webserverex_wav1.ino +++ b/examples/examples-communication/http-server/streams-generator-webserverex_wav1/streams-generator-webserverex_wav1.ino @@ -9,7 +9,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/AudioServerEx.h" +#include "AudioTools/Communication/AudioServerEx.h" // WIFI const char *ssid = "SSID"; diff --git a/examples/examples-communication/http-server/streams-i2s-webserver_wav/streams-i2s-webserver_wav.ino b/examples/examples-communication/http-server/streams-i2s-webserver_wav/streams-i2s-webserver_wav.ino index 7e96b79117..b82b938c1b 100644 --- a/examples/examples-communication/http-server/streams-i2s-webserver_wav/streams-i2s-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-i2s-webserver_wav/streams-i2s-webserver_wav.ino @@ -8,6 +8,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" //AudioEncodedServer server(new WAVEncoder(),"ssid","password"); AudioWAVServer server("ssid","password"); // the same a above diff --git a/examples/examples-communication/http-server/streams-sam-webserver_wav/streams-sam-webserver_wav.ino b/examples/examples-communication/http-server/streams-sam-webserver_wav/streams-sam-webserver_wav.ino index 325f55e4b0..12413b90cd 100644 --- a/examples/examples-communication/http-server/streams-sam-webserver_wav/streams-sam-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-sam-webserver_wav/streams-sam-webserver_wav.ino @@ -6,6 +6,7 @@ * */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" #include "sam_arduino.h" AudioWAVServer server("ssid","password"); diff --git a/examples/examples-communication/http-server/streams-tts-webserver_wav/streams-tts-webserver_wav.ino b/examples/examples-communication/http-server/streams-tts-webserver_wav/streams-tts-webserver_wav.ino index 24ce8af2d0..b852055eaa 100644 --- a/examples/examples-communication/http-server/streams-tts-webserver_wav/streams-tts-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-tts-webserver_wav/streams-tts-webserver_wav.ino @@ -7,6 +7,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" #include "TTS.h" AudioWAVServer server("ssid","password"); diff --git a/examples/examples-communication/ip/communication-ip-send/communication-ip-send.ino b/examples/examples-communication/ip/communication-ip-send/communication-ip-send.ino index 43d1c4c28e..1a3b91ae59 100644 --- a/examples/examples-communication/ip/communication-ip-send/communication-ip-send.ino +++ b/examples/examples-communication/ip/communication-ip-send/communication-ip-send.ino @@ -33,8 +33,9 @@ void connectWifi() { Serial.println(WiFi. localIP()); // Performance Hack + //esp_wifi_set_ps(WIFI_PS_NONE); + WiFi.setSleep(WIFI_PS_NONE); client.setNoDelay(true); - esp_wifi_set_ps(WIFI_PS_NONE); } void connectIP() { diff --git a/examples/examples-communication/serial/mp3-custom/send-mp3/send-mp3.ino b/examples/examples-communication/serial/mp3-custom/send-mp3/send-mp3.ino index 66a096ce00..da0d8a2c5f 100644 --- a/examples/examples-communication/serial/mp3-custom/send-mp3/send-mp3.ino +++ b/examples/examples-communication/serial/mp3-custom/send-mp3/send-mp3.ino @@ -8,6 +8,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid", "password"); // or replace with ICYStream to get metadata StreamCopy copier(Serial1, url); // copy url to decoder diff --git a/examples/examples-communication/serial/mp3-xon-xoff/send-mp3/send-mp3.ino b/examples/examples-communication/serial/mp3-xon-xoff/send-mp3/send-mp3.ino index 709249ae1c..a76f572cb3 100644 --- a/examples/examples-communication/serial/mp3-xon-xoff/send-mp3/send-mp3.ino +++ b/examples/examples-communication/serial/mp3-xon-xoff/send-mp3/send-mp3.ino @@ -7,6 +7,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid", "password"); // or replace with ICYStream to get metadata StreamCopy copier(Serial1, url); // copy url to decoder diff --git a/examples/examples-communication/serial/mp3/send-mp3/send-mp3.ino b/examples/examples-communication/serial/mp3/send-mp3/send-mp3.ino index ae0223f8fd..e2fc39e0b2 100644 --- a/examples/examples-communication/serial/mp3/send-mp3/send-mp3.ino +++ b/examples/examples-communication/serial/mp3/send-mp3/send-mp3.ino @@ -10,6 +10,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid", "password"); // or replace with ICYStream to get metadata HardwareSerial MP3Serial(1); // define a Serial for UART1 diff --git a/examples/examples-stream/streams-sdfat_mp3-metadata/streams-sdfat_mp3-metadata.ino b/examples/examples-stream/streams-sdfat_mp3-metadata/streams-sdfat_mp3-metadata.ino index 953146039b..549bc3917b 100644 --- a/examples/examples-stream/streams-sdfat_mp3-metadata/streams-sdfat_mp3-metadata.ino +++ b/examples/examples-stream/streams-sdfat_mp3-metadata/streams-sdfat_mp3-metadata.ino @@ -15,6 +15,7 @@ #include #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" // -> EncodedAudioStream -> I2SStream diff --git a/examples/examples-tts/streams-azure_tts-i2s/streams-azure_tts-i2s.ino b/examples/examples-tts/streams-azure_tts-i2s/streams-azure_tts-i2s.ino index e265410f69..ebc0cc3bc1 100644 --- a/examples/examples-tts/streams-azure_tts-i2s/streams-azure_tts-i2s.ino +++ b/examples/examples-tts/streams-azure_tts-i2s/streams-azure_tts-i2s.ino @@ -6,6 +6,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" String speechKey = "...."; // deploy a Speech Service in Azure and get both the key and the region. info here: https://azure.microsoft.com/en-us/products/cognitive-services/text-to-speech/ String spechregion = "...."; diff --git a/examples/examples-tts/streams-google-audiokit/streams-google-audiokit.ino b/examples/examples-tts/streams-google-audiokit/streams-google-audiokit.ino index 591c3003a7..c70c1b08b4 100644 --- a/examples/examples-tts/streams-google-audiokit/streams-google-audiokit.ino +++ b/examples/examples-tts/streams-google-audiokit/streams-google-audiokit.ino @@ -14,6 +14,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-tts/streams-url_wav-i2s/streams-url_wav-i2s.ino b/examples/examples-tts/streams-url_wav-i2s/streams-url_wav-i2s.ino index 62e439e0ec..f28ae528a3 100644 --- a/examples/examples-tts/streams-url_wav-i2s/streams-url_wav-i2s.ino +++ b/examples/examples-tts/streams-url_wav-i2s/streams-url_wav-i2s.ino @@ -9,6 +9,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" // UrlStream -copy-> EncodedAudioStream -> I2S diff --git a/examples/examples-vs1053/streams-url_mp3-vs1053/streams-url_mp3-vs1053.ino b/examples/examples-vs1053/streams-url_mp3-vs1053/streams-url_mp3-vs1053.ino index 6a113c9da6..2e61e38ce4 100644 --- a/examples/examples-vs1053/streams-url_mp3-vs1053/streams-url_mp3-vs1053.ino +++ b/examples/examples-vs1053/streams-url_mp3-vs1053/streams-url_mp3-vs1053.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/VS1053Stream.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); // or replace with ICYStream to get metadata VS1053Stream vs1053; // final output diff --git a/examples/sandbox/test-container-avi/test-container-avi.ino b/examples/sandbox/test-container-avi/test-container-avi.ino index 76d37ac0a1..9c29ac16de 100644 --- a/examples/sandbox/test-container-avi/test-container-avi.ino +++ b/examples/sandbox/test-container-avi/test-container-avi.ino @@ -11,6 +11,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/ContainerAVI.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); // input AudioBoardStream out(AudioKitEs8388V1); diff --git a/examples/tests/codecs/test-codec-aac-fdk-dec/test-codec-aac-fdk-dec.ino b/examples/tests/codecs/test-codec-aac-fdk-dec/test-codec-aac-fdk-dec.ino index 6f509af461..c96330c26b 100644 --- a/examples/tests/codecs/test-codec-aac-fdk-dec/test-codec-aac-fdk-dec.ino +++ b/examples/tests/codecs/test-codec-aac-fdk-dec/test-codec-aac-fdk-dec.ino @@ -3,6 +3,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecAACFDK.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" SET_LOOP_TASK_STACK_SIZE(50 * 1024); diff --git a/examples/tests/codecs/test-memory-helix/test-memory-helix.ino b/examples/tests/codecs/test-memory-helix/test-memory-helix.ino index c30a6ae3d9..09d493c821 100644 --- a/examples/tests/codecs/test-memory-helix/test-memory-helix.ino +++ b/examples/tests/codecs/test-memory-helix/test-memory-helix.ino @@ -1,6 +1,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" /** * @brief Sketch to test the memory usage with libhelix with an ESP32 diff --git a/examples/tests/codecs/test-streaming-adapter/test-streaming-adapter.ino b/examples/tests/codecs/test-streaming-adapter/test-streaming-adapter.ino index 3e3136f376..52c5cbffdd 100644 --- a/examples/tests/codecs/test-streaming-adapter/test-streaming-adapter.ino +++ b/examples/tests/codecs/test-streaming-adapter/test-streaming-adapter.ino @@ -1,6 +1,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); // or replace with ICYStream to get metadata AudioBoardStream i2s(AudioKitEs8388V1); // final output of decoded stream diff --git a/examples/tests/etc/test-ads1015/test-ads1015.ino b/examples/tests/etc/test-ads1015/test-ads1015.ino deleted file mode 100644 index 0b877bbc46..0000000000 --- a/examples/tests/etc/test-ads1015/test-ads1015.ino +++ /dev/null @@ -1,39 +0,0 @@ - -#include -#include "ADS1X15.h" // https://github.com/pschatzmann/ADS1X15.git - -ADS1115 ads1015(0x48); // ads1015 device - - -void list(bool print){ - int count; - unsigned long end = millis()+1000*10; - while(end>millis()) { - int16_t value = ads1015.getValue();; - if (print) Serial.println(value); - count++; - } - Serial.print("Samples per second: "); - Serial.println(count/10); -} - -void setup(){ - Serial.begin(119200); - - // setup gain for ads1015 - Wire.setClock(400000); - ads1015.begin(); - if(!ads1015.isConnected()) Serial.println("ads1015 NOT CONNECTED!"); - ads1015.setGain(4); // 6.144 volt - ads1015.setDataRate(4); // 0 = slow 4 = medium 7 = fast (7 = fails ) - ads1015.setMode(0); - ads1015.requestADC_Differential_0_1(); - - list(false); - //list(true); -} - - -void loop(){ - -} \ No newline at end of file diff --git a/examples/tests/performance/wifi/wifi.ino b/examples/tests/performance/wifi/wifi.ino index 3e1d207761..50315e4a66 100644 --- a/examples/tests/performance/wifi/wifi.ino +++ b/examples/tests/performance/wifi/wifi.ino @@ -1,4 +1,5 @@ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("SSID","PASSWORD"); // or replace with ICYStream to get metadata MeasuringStream out(50, &Serial); // final output of decoded stream diff --git a/examples/tests/player/test-player/test-player.ino b/examples/tests/player/test-player/test-player.ino index 68f1c19422..8450bfad62 100644 --- a/examples/tests/player/test-player/test-player.ino +++ b/examples/tests/player/test-player/test-player.ino @@ -14,6 +14,7 @@ #include "AudioTools.h" #include "AudioTools/Disk/AudioSourceSDFAT.h" #include "AudioTools/Disk/AudioSourceURL.h" +#include "AudioTools/Communication/AudioHttp.h" const char *urls[] = { diff --git a/src/AudioTools/AudioCodecs/AudioEncoded.h b/src/AudioTools/AudioCodecs/AudioEncoded.h index 025ea2d819..f951085e62 100644 --- a/src/AudioTools/AudioCodecs/AudioEncoded.h +++ b/src/AudioTools/AudioCodecs/AudioEncoded.h @@ -257,7 +257,8 @@ class EncodedAudioOutput : public ModifyingOutput { int frame_size = DEFAULT_BUFFER_SIZE; }; -// legacy name +/// @brief Legacy alias for EncodedAudioOutput +/// @ingroup codecs using EncodedAudioPrint = EncodedAudioOutput; /** diff --git a/src/AudioTools/AudioCodecs/CodecCopy.h b/src/AudioTools/AudioCodecs/CodecCopy.h index 4c7b89ab96..55ab76d27b 100644 --- a/src/AudioTools/AudioCodecs/CodecCopy.h +++ b/src/AudioTools/AudioCodecs/CodecCopy.h @@ -101,7 +101,12 @@ class CopyEncoder : public AudioEncoder { const char *mime_type = "audio/pcm"; }; +/// @brief Alias for CopyEncoder to handle PCM audio encoding (no actual encoding) +/// @ingroup codecs using PCMEncoder = CopyEncoder; + +/// @brief Alias for CopyDecoder to handle PCM audio decoding (no actual decoding) +/// @ingroup codecs using PCMDecoder = CopyDecoder; } // namespace audio_tools diff --git a/src/AudioTools/AudioCodecs/CodecMTS.h b/src/AudioTools/AudioCodecs/CodecMTS.h index 8b6fd3f417..1add2564f1 100644 --- a/src/AudioTools/AudioCodecs/CodecMTS.h +++ b/src/AudioTools/AudioCodecs/CodecMTS.h @@ -485,6 +485,8 @@ class MTSDecoder : public AudioDecoder { } }; +/// @brief Legacy alias for MPEG Transport Stream decoder +/// @ingroup codecs using MPEG_TSDecoder = MTSDecoder; } // namespace audio_tools diff --git a/src/AudioTools/AudioCodecs/ContainerAVI.h b/src/AudioTools/AudioCodecs/ContainerAVI.h index c7f443cffa..e9d84a1b9b 100644 --- a/src/AudioTools/AudioCodecs/ContainerAVI.h +++ b/src/AudioTools/AudioCodecs/ContainerAVI.h @@ -58,6 +58,8 @@ class ParseBuffer { size_t available_byte_count = 0; }; +/// @brief Four-character code identifier for AVI format +/// @ingroup codecs using FOURCC = char[4]; struct AVIMainHeader { diff --git a/src/AudioTools/AudioCodecs/HeaderParserMP3.h b/src/AudioTools/AudioCodecs/HeaderParserMP3.h index 292c7a8b0b..4d209db680 100644 --- a/src/AudioTools/AudioCodecs/HeaderParserMP3.h +++ b/src/AudioTools/AudioCodecs/HeaderParserMP3.h @@ -219,7 +219,7 @@ class HeaderParserMP3 { return false; } - memset(&header, 0, sizeof(header)); + header = FrameHeader{}; int valid_frames_found = 0; int consecutive_frames = 0; const int MIN_FRAMES_TO_VALIDATE = 3; // Require at least 3 consecutive valid frames @@ -484,7 +484,7 @@ class HeaderParserMP3 { void reset() { buffer.reset(); frame_header_valid = false; - memset(&header, 0, sizeof(header)); + header = FrameHeader{}; } /// Finds the mp3/aac sync word diff --git a/src/AudioTools/AudioCodecs/M4ACommonDemuxer.h b/src/AudioTools/AudioCodecs/M4ACommonDemuxer.h index 840a9989a1..857b4cdef2 100644 --- a/src/AudioTools/AudioCodecs/M4ACommonDemuxer.h +++ b/src/AudioTools/AudioCodecs/M4ACommonDemuxer.h @@ -14,6 +14,8 @@ namespace audio_tools { /// we expect that the sample size is usually aound 1 - 2k, so uint16_t /// should be more then sufficient! Microcontolles only have a limited /// amount of RAM, so this makes a big difference! +/// @brief Sample size type optimized for microcontrollers +/// @ingroup codecs using stsz_sample_size_t = uint16_t; /** diff --git a/src/AudioTools/AudioCodecs/MultiDecoder.h b/src/AudioTools/AudioCodecs/MultiDecoder.h index 70dee46538..7f6f731f91 100644 --- a/src/AudioTools/AudioCodecs/MultiDecoder.h +++ b/src/AudioTools/AudioCodecs/MultiDecoder.h @@ -3,7 +3,7 @@ #include "AudioTools/AudioCodecs/AudioCodecsBase.h" #include "AudioTools/CoreAudio/AudioBasic/StrView.h" -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" #include "AudioTools/CoreAudio/AudioMetaData/MimeDetector.h" #include "AudioTools/AudioCodecs/StreamingDecoder.h" diff --git a/src/AudioTools/AudioLibs/AudioServerEx.h b/src/AudioTools/AudioLibs/AudioServerEx.h index a5f3fa5a56..36019b52b9 100644 --- a/src/AudioTools/AudioLibs/AudioServerEx.h +++ b/src/AudioTools/AudioLibs/AudioServerEx.h @@ -1,183 +1,3 @@ #pragma once - -#include "AudioToolsConfig.h" -#include "AudioTools/CoreAudio/AudioOutput.h" -#include "AudioTools/AudioCodecs/CodecWAV.h" -#include "AudioTools/CoreAudio/AudioBasic/StrView.h" -#include "HttpServer.h" -#include "HttpExtensions.h" - -namespace audio_tools { - -/** - * @brief Config information for AudioServerEx - * @author Phil Schatzmann - * @copyright GPLv3 - */ - -struct AudioServerExConfig : public AudioInfo { - const char* mime = nullptr; - const char* ssid = nullptr; - const char* password = nullptr; - const char* path = "/"; - // optional input; if not used use write methods to push data - Stream *input=nullptr; - int port = 80; -}; - -/** - * @brief A powerfull Web server which is based on - * https://github.com/pschatzmann/TinyHttp. - * It supports multiple concurrent clients. You can e.g. use it to write mp3 data and make - * it available in multiple clients. - * @ingroup http - * @author Phil Schatzmann - * @copyright GPLv3 - */ - -class AudioServerEx : public AudioOutput { - public: - // Default Constructor - AudioServerEx() = default; - - /// To be compatible with legacy API - AudioServerEx(const char *ssid, const char* pwd){ - info.ssid = ssid; - info.password = pwd; - } - - virtual AudioServerExConfig defaultConfig() { - AudioServerExConfig cfg; - return cfg; - } - - virtual bool begin(AudioServerExConfig cfg) { - info = cfg; - return begin(); - } - - virtual bool begin(Stream &in, const char* contentType) { - info.input = ∈ - info.mime = contentType; - return begin(); - } - - virtual bool begin() { - end(); // we (re) start with a clean state - - if (info.input==nullptr){ - p_stream = new ExtensionStream(info.path,tinyhttp::T_GET, info.mime ); - } else { - p_stream = new ExtensionStream(info.path, info.mime, *info.input); - } - p_stream->setReplyHeader(*getReplyHeader()); - p_server = new tinyhttp::HttpServer(wifi); - - // handling of WAV - p_server->addExtension(*p_stream); - return p_server->begin(info.port, info.ssid, info.password); - } - - virtual void end() { - if (p_stream!=nullptr) { - delete p_stream; - p_stream = nullptr; - } - if (p_server!=nullptr) { - delete p_server; - p_server = nullptr; - } - } - - /// Web server supports write so that we can e.g. use is as destination for the audio player. - size_t write(const uint8_t* data, size_t len) override { - if (p_stream==nullptr) return 0; - return p_stream->write((uint8_t*)data, len); - } - - int availableForWrite() override { - if (p_stream==nullptr) return 0; - return p_stream->availableForWrite(); - } - - /// Needs to be called if the data was provided as input Stream in the AudioServerExConfig - virtual void copy() { - if (p_server!=nullptr){ - p_server->copy(); - } - } - - protected: - AudioServerExConfig info; - WiFiServer wifi; - HttpServer *p_server; - ExtensionStream *p_stream=nullptr; - - virtual tinyhttp::StrView* getReplyHeader() { - return nullptr; - } - -}; - -/** - * @brief A powerfull WAV Web server which is based on - * https://github.com/pschatzmann/TinyHttp. - * It supports multiple concurrent clients - * @ingroup http - * @author Phil Schatzmann - * @copyright GPLv3 - * - */ -class AudioWAVServerEx : public AudioServerEx { - public: - // Default Constructor - AudioWAVServerEx() = default; - - /// To be compatible with legacy API - AudioWAVServerEx(const char *ssid, const char* pwd):AudioServerEx(ssid, pwd){} - - AudioServerExConfig defaultConfig() override { - AudioServerExConfig cfg; - cfg.mime = "audio/wav"; - return cfg; - } - - /// Legacy API support - bool begin(Stream &in, int sample_rate, int channels, int bits_per_sample=16) { - info.input = ∈ - info.sample_rate = sample_rate; - info.channels = channels; - info. bits_per_sample = bits_per_sample; - info.mime = "audio/wav"; - return AudioServerEx::begin(); - } - - bool begin(AudioServerExConfig cfg) override{ - return AudioServerEx::begin(cfg); - } - - protected: - // Dynamic memory - tinyhttp::Str header; - - // wav files start with a 44 bytes header - virtual tinyhttp::StrView* getReplyHeader() { - header.allocate(44); - MemoryOutput mp{(uint8_t*)header.c_str(), 44}; - WAVHeader enc; - WAVAudioInfo wi; - wi.format = AudioFormat::PCM; - wi.sample_rate = info.sample_rate; - wi.bits_per_sample = info.bits_per_sample; - wi.channels = info.channels; - enc.setAudioInfo(wi); - // fill header with data - enc.writeHeader(&mp); - // make sure that the length is 44 - assert(header.length() == 44); - - return &header; - } -}; - -} \ No newline at end of file +#warning("obsolete: use AudioTools/Communication/AudioServerEx.h") +#include "AudioTools/Communication/AudioServerEx.h" \ No newline at end of file diff --git a/src/AudioTools/AudioLibs/Desktop/File.h b/src/AudioTools/AudioLibs/Desktop/File.h index f1fc1f468f..800ec28469 100644 --- a/src/AudioTools/AudioLibs/Desktop/File.h +++ b/src/AudioTools/AudioLibs/Desktop/File.h @@ -7,7 +7,12 @@ namespace audio_tools { +/// @brief Desktop file system compatibility alias +/// @ingroup io using File = VFSFile; + +/// @brief Desktop file system compatibility alias +/// @ingroup io using FS = VFS; static FS SD; // global object for compatibility with Arduino code diff --git a/src/AudioTools/AudioLibs/Desktop/JupyterAudio.h b/src/AudioTools/AudioLibs/Desktop/JupyterAudio.h index 7d5810c285..0abc953aea 100644 --- a/src/AudioTools/AudioLibs/Desktop/JupyterAudio.h +++ b/src/AudioTools/AudioLibs/Desktop/JupyterAudio.h @@ -101,6 +101,8 @@ class ChartT { } }; +/// @brief Default chart type for Jupyter integration +/// @ingroup io using Chart = ChartT; /** @@ -193,6 +195,8 @@ class JupyterAudioT : public AudioStream { size_t buffer_count=0; }; +/// @brief Default Jupyter audio output with 16-bit samples +/// @ingroup io using JupyterAudio = JupyterAudioT; } // namespace audio_tools diff --git a/src/AudioTools/AudioLibs/HLSStream.h b/src/AudioTools/AudioLibs/HLSStream.h index 50aff75668..3d9e6126b7 100644 --- a/src/AudioTools/AudioLibs/HLSStream.h +++ b/src/AudioTools/AudioLibs/HLSStream.h @@ -1,7 +1,7 @@ #pragma once #include "AudioTools/AudioCodecs/AudioEncoded.h" #include "AudioTools/CoreAudio/AudioBasic/Str.h" -#include "AudioTools/CoreAudio/AudioHttp/URLStream.h" +#include "AudioTools/Communication/HTTP/URLStream.h" #include "AudioTools/CoreAudio/StreamCopy.h" #include "AudioToolsConfig.h" @@ -776,6 +776,8 @@ class HLSStreamT : public AbstractURLStream { void addRequestHeader(const char *header, const char *value) override {} }; +/// @brief HLS Stream implementation using URLStream for HTTP requests +/// @ingroup http using HLSStream = HLSStreamT; } // namespace audio_tools diff --git a/src/AudioTools/AudioLibs/HLSStreamESP32.h b/src/AudioTools/AudioLibs/HLSStreamESP32.h index ea83435e59..a3072c28c1 100644 --- a/src/AudioTools/AudioLibs/HLSStreamESP32.h +++ b/src/AudioTools/AudioLibs/HLSStreamESP32.h @@ -1,9 +1,11 @@ #pragma once -#include "AudioTools/CoreAudio/AudioHttp/URLStreamESP32.h" +#include "AudioTools/Communication/HTTP/URLStreamESP32.h" #include "HLSStream.h" namespace audio_tools { +/// @brief HLS Stream implementation using URLStreamESP32 for ESP32-specific HTTP requests +/// @ingroup http using HLSStreamESP32 = HLSStreamT; } // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/AudioLibs/SPDIFOutput.h b/src/AudioTools/AudioLibs/SPDIFOutput.h index b0cd488bb1..b83adf4a96 100644 --- a/src/AudioTools/AudioLibs/SPDIFOutput.h +++ b/src/AudioTools/AudioLibs/SPDIFOutput.h @@ -282,6 +282,8 @@ class SPDIFOutput : public AudioStream { } }; +/// @brief Alias for SPDIFOutput for backward compatibility +/// @ingroup io using SPDIFStream = SPDIFOutput; } // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/Communication/AudioHttp.h b/src/AudioTools/Communication/AudioHttp.h new file mode 100644 index 0000000000..7a817763da --- /dev/null +++ b/src/AudioTools/Communication/AudioHttp.h @@ -0,0 +1,15 @@ +#pragma once + +/** + * @file AudioNetworking.h + * @brief Convenience header to include all networking functionality for AudioTools + * + * This header includes WiFi-based URLStream and AudioServer implementations. + * Include this file when you want to use networking features with the AudioTools library. + * + * @author Phil Schatzmann + * @copyright GPLv3 + */ + +#include "AudioTools/Communication/HTTP/AudioHttp.h" + diff --git a/src/AudioTools/Communication/AudioServerEx.h b/src/AudioTools/Communication/AudioServerEx.h new file mode 100644 index 0000000000..a5f3fa5a56 --- /dev/null +++ b/src/AudioTools/Communication/AudioServerEx.h @@ -0,0 +1,183 @@ +#pragma once + +#include "AudioToolsConfig.h" +#include "AudioTools/CoreAudio/AudioOutput.h" +#include "AudioTools/AudioCodecs/CodecWAV.h" +#include "AudioTools/CoreAudio/AudioBasic/StrView.h" +#include "HttpServer.h" +#include "HttpExtensions.h" + +namespace audio_tools { + +/** + * @brief Config information for AudioServerEx + * @author Phil Schatzmann + * @copyright GPLv3 + */ + +struct AudioServerExConfig : public AudioInfo { + const char* mime = nullptr; + const char* ssid = nullptr; + const char* password = nullptr; + const char* path = "/"; + // optional input; if not used use write methods to push data + Stream *input=nullptr; + int port = 80; +}; + +/** + * @brief A powerfull Web server which is based on + * https://github.com/pschatzmann/TinyHttp. + * It supports multiple concurrent clients. You can e.g. use it to write mp3 data and make + * it available in multiple clients. + * @ingroup http + * @author Phil Schatzmann + * @copyright GPLv3 + */ + +class AudioServerEx : public AudioOutput { + public: + // Default Constructor + AudioServerEx() = default; + + /// To be compatible with legacy API + AudioServerEx(const char *ssid, const char* pwd){ + info.ssid = ssid; + info.password = pwd; + } + + virtual AudioServerExConfig defaultConfig() { + AudioServerExConfig cfg; + return cfg; + } + + virtual bool begin(AudioServerExConfig cfg) { + info = cfg; + return begin(); + } + + virtual bool begin(Stream &in, const char* contentType) { + info.input = ∈ + info.mime = contentType; + return begin(); + } + + virtual bool begin() { + end(); // we (re) start with a clean state + + if (info.input==nullptr){ + p_stream = new ExtensionStream(info.path,tinyhttp::T_GET, info.mime ); + } else { + p_stream = new ExtensionStream(info.path, info.mime, *info.input); + } + p_stream->setReplyHeader(*getReplyHeader()); + p_server = new tinyhttp::HttpServer(wifi); + + // handling of WAV + p_server->addExtension(*p_stream); + return p_server->begin(info.port, info.ssid, info.password); + } + + virtual void end() { + if (p_stream!=nullptr) { + delete p_stream; + p_stream = nullptr; + } + if (p_server!=nullptr) { + delete p_server; + p_server = nullptr; + } + } + + /// Web server supports write so that we can e.g. use is as destination for the audio player. + size_t write(const uint8_t* data, size_t len) override { + if (p_stream==nullptr) return 0; + return p_stream->write((uint8_t*)data, len); + } + + int availableForWrite() override { + if (p_stream==nullptr) return 0; + return p_stream->availableForWrite(); + } + + /// Needs to be called if the data was provided as input Stream in the AudioServerExConfig + virtual void copy() { + if (p_server!=nullptr){ + p_server->copy(); + } + } + + protected: + AudioServerExConfig info; + WiFiServer wifi; + HttpServer *p_server; + ExtensionStream *p_stream=nullptr; + + virtual tinyhttp::StrView* getReplyHeader() { + return nullptr; + } + +}; + +/** + * @brief A powerfull WAV Web server which is based on + * https://github.com/pschatzmann/TinyHttp. + * It supports multiple concurrent clients + * @ingroup http + * @author Phil Schatzmann + * @copyright GPLv3 + * + */ +class AudioWAVServerEx : public AudioServerEx { + public: + // Default Constructor + AudioWAVServerEx() = default; + + /// To be compatible with legacy API + AudioWAVServerEx(const char *ssid, const char* pwd):AudioServerEx(ssid, pwd){} + + AudioServerExConfig defaultConfig() override { + AudioServerExConfig cfg; + cfg.mime = "audio/wav"; + return cfg; + } + + /// Legacy API support + bool begin(Stream &in, int sample_rate, int channels, int bits_per_sample=16) { + info.input = ∈ + info.sample_rate = sample_rate; + info.channels = channels; + info. bits_per_sample = bits_per_sample; + info.mime = "audio/wav"; + return AudioServerEx::begin(); + } + + bool begin(AudioServerExConfig cfg) override{ + return AudioServerEx::begin(cfg); + } + + protected: + // Dynamic memory + tinyhttp::Str header; + + // wav files start with a 44 bytes header + virtual tinyhttp::StrView* getReplyHeader() { + header.allocate(44); + MemoryOutput mp{(uint8_t*)header.c_str(), 44}; + WAVHeader enc; + WAVAudioInfo wi; + wi.format = AudioFormat::PCM; + wi.sample_rate = info.sample_rate; + wi.bits_per_sample = info.bits_per_sample; + wi.channels = info.channels; + enc.setAudioInfo(wi); + // fill header with data + enc.writeHeader(&mp); + // make sure that the length is 44 + assert(header.length() == 44); + + return &header; + } +}; + +} \ No newline at end of file diff --git a/src/AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h b/src/AudioTools/Communication/HTTP/AbstractURLStream.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h rename to src/AudioTools/Communication/HTTP/AbstractURLStream.h diff --git a/src/AudioTools/CoreAudio/AudioHttp/AudioClient.h b/src/AudioTools/Communication/HTTP/AudioClient.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/AudioClient.h rename to src/AudioTools/Communication/HTTP/AudioClient.h diff --git a/src/AudioTools/Communication/HTTP/AudioEncodedServerT.h b/src/AudioTools/Communication/HTTP/AudioEncodedServerT.h new file mode 100644 index 0000000000..6961607c0d --- /dev/null +++ b/src/AudioTools/Communication/HTTP/AudioEncodedServerT.h @@ -0,0 +1,222 @@ +#pragma once + +#include "AudioServerT.h" + +namespace audio_tools { + +/** + * @brief A simple Arduino Webserver which streams the audio using the indicated + * encoder.. This class is based on the WiFiServer class. All you need to do is + * to provide the data with a callback method or from a Stream. + * + * @ingroup http + * @author Phil Schatzmann + * @copyright GPLv3 + */ +template +class AudioEncoderServerT : public AudioServerT { + public: + /** + * @brief Construct a new Audio Server object that supports an AudioEncoder + * We assume that the WiFi is already connected + */ + AudioEncoderServerT(AudioEncoder *encoder, int port = 80) : AudioServerT(port) { + this->encoder = encoder; + } + + /** + * @brief Construct a new Audio Server object + * + * @param network + * @param password + */ + AudioEncoderServerT(AudioEncoder *encoder, const char *network, + const char *password, int port = 80) + : AudioServerT(network, password, port) { + this->encoder = encoder; + } + + /** + * @brief Destructor release the memory + **/ + virtual ~AudioEncoderServerT() = default; + + /** + * @brief Start the server. You need to be connected to WiFI before calling + * this method + * + * @param in + * @param sample_rate + * @param channels + */ + bool begin(Stream &in, int sample_rate, int channels, + int bits_per_sample = 16, BaseConverter *converter = nullptr) { + TRACED(); + this->in = ∈ + AudioServerT::setConverter(converter); + audio_info.sample_rate = sample_rate; + audio_info.channels = channels; + audio_info.bits_per_sample = bits_per_sample; + encoder->setAudioInfo(audio_info); + // encoded_stream.begin(&client_obj, encoder); + encoded_stream.setOutput(&this->client_obj); + encoded_stream.setEncoder(encoder); + encoded_stream.begin(audio_info); + return AudioServerT::begin(in, encoder->mime()); + } + + /** + * @brief Start the server. You need to be connected to WiFI before calling + * this method + * + * @param in + * @param info + * @param converter + */ + bool begin(Stream &in, AudioInfo info, BaseConverter *converter = nullptr) { + TRACED(); + this->in = ∈ + this->audio_info = info; + AudioServerT::setConverter(converter); + encoder->setAudioInfo(audio_info); + encoded_stream.setOutput(&this->client_obj); + encoded_stream.setEncoder(encoder); + if (!encoded_stream.begin(audio_info)) { + LOGE("encoder begin failed"); + // stop(); + } + + return AudioServerT::begin(in, encoder->mime()); + } + + /** + * @brief Start the server. You need to be connected to WiFI before calling + * this method + * + * @param in + * @param converter + */ + bool begin(AudioStream &in, BaseConverter *converter = nullptr) { + TRACED(); + this->in = ∈ + this->audio_info = in.audioInfo(); + AudioServerT::setConverter(converter); + encoder->setAudioInfo(audio_info); + encoded_stream.setOutput(&this->client_obj); + encoded_stream.setEncoder(encoder); + encoded_stream.begin(audio_info); + + return AudioServerT::begin(in, encoder->mime()); + } + + /** + * @brief Start the server. The data must be provided by a callback method + * + * @param cb + * @param sample_rate + * @param channels + */ + bool begin(AudioServerDataCallback cb, int sample_rate, int channels, + int bits_per_sample = 16) { + TRACED(); + audio_info.sample_rate = sample_rate; + audio_info.channels = channels; + audio_info.bits_per_sample = bits_per_sample; + encoder->setAudioInfo(audio_info); + + return AudioServerT::begin(cb, encoder->mime()); + } + + // provides a pointer to the encoder + AudioEncoder *audioEncoder() { return encoder; } + + protected: + // Sound Generation - use EncodedAudioOutput with is more efficient then + // EncodedAudioStream + EncodedAudioOutput encoded_stream; + AudioInfo audio_info; + AudioEncoder *encoder = nullptr; + + // moved to be part of reply content to avoid timeout issues in Chrome + void sendReplyHeader() override {} + + void sendReplyContent() override { + TRACED(); + // restart encoder + if (encoder) { + encoder->end(); + encoder->begin(); + } + + if (this->callback != nullptr) { + // encoded_stream.begin(out_ptr(), encoder); + encoded_stream.setOutput(this->out_ptr()); + encoded_stream.setEncoder(encoder); + encoded_stream.begin(); + + // provide data via Callback to encoded_stream + LOGI("sendReply - calling callback"); + // Send delayed header + AudioServerT::sendReplyHeader(); + this->callback(&encoded_stream); + this->client_obj.stop(); + } else if (this->in != nullptr) { + // provide data for stream: in -copy> encoded_stream -> out + LOGI("sendReply - Returning encoded stream..."); + // encoded_stream.begin(out_ptr(), encoder); + encoded_stream.setOutput(this->out_ptr()); + encoded_stream.setEncoder(encoder); + encoded_stream.begin(); + + this->copier.begin(encoded_stream, *this->in); + if (!this->client_obj.connected()) { + LOGE("connection was closed"); + } + // Send delayed header + AudioServerT::sendReplyHeader(); + } + } +}; + +/** + * @brief A simple Arduino Webserver which streams the audio as WAV data. + * This class is based on the AudioEncodedServer class. All you need to do is to + * provide the data with a callback method or from a Stream. + * @ingroup http + * @author Phil Schatzmann + * @copyright GPLv3 + */ +template +class AudioWAVServerT : public AudioEncoderServerT { + public: + /** + * @brief Construct a new Audio WAV Server object + * We assume that the WiFi is already connected + */ + AudioWAVServerT(int port = 80) : AudioEncoderServerT(new WAVEncoder(), port) {} + + /** + * @brief Construct a new Audio WAV Server object + * + * @param network + * @param password + */ + AudioWAVServerT(const char *network, const char *password, int port = 80) + : AudioEncoderServerT(new WAVEncoder(), network, password, port) {} + + /// Destructor: release the allocated encoder + ~AudioWAVServerT() { + AudioEncoder *encoder = AudioEncoderServerT::audioEncoder(); + if (encoder != nullptr) { + delete encoder; + } + } + + // provides a pointer to the encoder + WAVEncoder &wavEncoder() { return *static_cast(AudioEncoderServerT::encoder); } +}; + + + +} // namespace audio_tools + diff --git a/src/AudioTools/Communication/HTTP/AudioHttp.h b/src/AudioTools/Communication/HTTP/AudioHttp.h new file mode 100644 index 0000000000..ea025757f6 --- /dev/null +++ b/src/AudioTools/Communication/HTTP/AudioHttp.h @@ -0,0 +1,25 @@ +#pragma once +/** + * @defgroup http Http + * @ingroup communications + * @brief Http client & server +**/ + +// Include abstract base classes and utilities +#include "AbstractURLStream.h" +#include "HttpRequest.h" +#include "HttpHeader.h" +#include "HttpTypes.h" +#include "ICYStreamT.h" +#include "URLStreamBufferedT.h" +#include "Url.h" + +// For backward compatibility, include stub implementations +#include "URLStream.h" +#include "ICYStream.h" +#include "AudioServer.h" + +#if ((defined(ESP32) && defined(USE_URL_ARDUINO)) || defined(ESP32_CMAKE)) +# include "URLStreamESP32.h" +#endif + diff --git a/src/AudioTools/Communication/HTTP/AudioServer.h b/src/AudioTools/Communication/HTTP/AudioServer.h new file mode 100644 index 0000000000..7fa6fb4fdc --- /dev/null +++ b/src/AudioTools/Communication/HTTP/AudioServer.h @@ -0,0 +1,4 @@ +#pragma once + +#include "AudioServerWiFi.h" +#include "AudioServerEthernet.h" \ No newline at end of file diff --git a/src/AudioTools/Communication/HTTP/AudioServerEthernet.h b/src/AudioTools/Communication/HTTP/AudioServerEthernet.h new file mode 100644 index 0000000000..b6c121b560 --- /dev/null +++ b/src/AudioTools/Communication/HTTP/AudioServerEthernet.h @@ -0,0 +1,45 @@ +#pragma once + +#include "AudioToolsConfig.h" + +#ifdef USE_ETHERNET +# include +#endif + +#include "AudioServerT.h" +#include "AudioEncodedServerT.h" +#include "AudioTools.h" +#include "AudioTools/AudioCodecs/CodecWAV.h" + +namespace audio_tools { + +#ifdef USE_ETHERNET +/// @brief Ethernet audio server for streaming audio content over Ethernet +/// @ingroup http +using AudioServerEthernet = AudioServerT; + +/// @brief Ethernet audio server with encoder support for streaming encoded audio +/// @ingroup http +using AudioEncoderServerEthernet = AudioEncoderServerT; + +/// @brief Ethernet audio server specifically for streaming WAV audio +/// @ingroup http +using AudioWAVServerEthernet = AudioWAVServerT; + +#ifndef USE_WIFI +/// @brief Basic audio server (defaults to Ethernet when USE_WIFI is not defined) +/// @ingroup http +using AudioServer = AudioServerT; + +/// @brief Basic audio server with encoder support (defaults to Ethernet when USE_WIFI is not defined) +/// @ingroup http +using AudioEncoderServer = AudioEncoderServerEthernet; + +/// @brief Basic WAV audio server (defaults to Ethernet when USE_WIFI is not defined) +/// @ingroup http +using AudioWAVServer = AudioWAVServerEthernet; +#endif +#endif + + +} // namespace audio_tools diff --git a/src/AudioTools/CoreAudio/AudioHttp/AudioServer.h b/src/AudioTools/Communication/HTTP/AudioServerT.h similarity index 55% rename from src/AudioTools/CoreAudio/AudioHttp/AudioServer.h rename to src/AudioTools/Communication/HTTP/AudioServerT.h index 6fda68a5f1..8834df6da8 100644 --- a/src/AudioTools/CoreAudio/AudioHttp/AudioServer.h +++ b/src/AudioTools/Communication/HTTP/AudioServerT.h @@ -1,24 +1,25 @@ #pragma once +#include "AudioTools/AudioCodecs/CodecWAV.h" +#include "AudioTools/CoreAudio/AudioStreams.h" +#include "AudioTools/CoreAudio/StreamCopy.h" #include "AudioToolsConfig.h" -#if defined(USE_AUDIO_SERVER) && (defined(USE_ETHERNET) || defined(USE_WIFI)) -#ifdef USE_WIFI -#ifdef ESP8266 -#include -#else -#include -#endif -#endif - -#ifdef USE_ETHERNET -#include -#endif +namespace audio_tools { -#include "AudioTools.h" -#include "AudioTools/AudioCodecs/CodecWAV.h" +/// Calback which writes the sound data to the stream +typedef void (*AudioServerDataCallback)(Print *out); -namespace audio_tools { +/** + * @brief A simple Arduino Webserver template which streams the result + * This template class can work with different Client and Server types. + * All you need to do is to provide the data with a callback method or + * from an Arduino Stream: in -copy> client + * + * @ingroup http + * @author Phil Schatzmann + * @copyright GPLv3 + */ /// Calback which writes the sound data to the stream typedef void (*AudioServerDataCallback)(Print *out); @@ -302,226 +303,4 @@ class AudioServerT { } }; -#ifdef USE_WIFI -using AudioServer = AudioServerT; -using AudioServerWiFi = AudioServerT; -#endif - -#ifdef USE_ETHERNET -using AudioServer = AudioServerT; -using AudioServerEthernet = AudioServerT; -#endif - -/** - * @brief A simple Arduino Webserver which streams the audio using the indicated - * encoder.. This class is based on the WiFiServer class. All you need to do is - * to provide the data with a callback method or from a Stream. - * - * @ingroup http - * @author Phil Schatzmann - * @copyright GPLv3 - */ -class AudioEncoderServer : public AudioServer { - public: - /** - * @brief Construct a new Audio Server object that supports an AudioEncoder - * We assume that the WiFi is already connected - */ - AudioEncoderServer(AudioEncoder *encoder, int port = 80) : AudioServer(port) { - this->encoder = encoder; - } - - /** - * @brief Construct a new Audio Server object - * - * @param network - * @param password - */ - AudioEncoderServer(AudioEncoder *encoder, const char *network, - const char *password, int port = 80) - : AudioServer(network, password, port) { - this->encoder = encoder; - } - - /** - * @brief Destructor release the memory - **/ - ~AudioEncoderServer() {} - - /** - * @brief Start the server. You need to be connected to WiFI before calling - * this method - * - * @param in - * @param sample_rate - * @param channels - */ - bool begin(Stream &in, int sample_rate, int channels, - int bits_per_sample = 16, BaseConverter *converter = nullptr) { - TRACED(); - this->in = ∈ - setConverter(converter); - audio_info.sample_rate = sample_rate; - audio_info.channels = channels; - audio_info.bits_per_sample = bits_per_sample; - encoder->setAudioInfo(audio_info); - // encoded_stream.begin(&client_obj, encoder); - encoded_stream.setOutput(&client_obj); - encoded_stream.setEncoder(encoder); - encoded_stream.begin(audio_info); - return AudioServer::begin(in, encoder->mime()); - } - - /** - * @brief Start the server. You need to be connected to WiFI before calling - * this method - * - * @param in - * @param info - * @param converter - */ - bool begin(Stream &in, AudioInfo info, BaseConverter *converter = nullptr) { - TRACED(); - this->in = ∈ - this->audio_info = info; - setConverter(converter); - encoder->setAudioInfo(audio_info); - encoded_stream.setOutput(&client_obj); - encoded_stream.setEncoder(encoder); - if (!encoded_stream.begin(audio_info)) { - LOGE("encoder begin failed"); - stop(); - } - - return AudioServer::begin(in, encoder->mime()); - } - - /** - * @brief Start the server. You need to be connected to WiFI before calling - * this method - * - * @param in - * @param converter - */ - bool begin(AudioStream &in, BaseConverter *converter = nullptr) { - TRACED(); - this->in = ∈ - this->audio_info = in.audioInfo(); - setConverter(converter); - encoder->setAudioInfo(audio_info); - encoded_stream.setOutput(&client_obj); - encoded_stream.setEncoder(encoder); - encoded_stream.begin(audio_info); - - return AudioServer::begin(in, encoder->mime()); - } - - /** - * @brief Start the server. The data must be provided by a callback method - * - * @param cb - * @param sample_rate - * @param channels - */ - bool begin(AudioServerDataCallback cb, int sample_rate, int channels, - int bits_per_sample = 16) { - TRACED(); - audio_info.sample_rate = sample_rate; - audio_info.channels = channels; - audio_info.bits_per_sample = bits_per_sample; - encoder->setAudioInfo(audio_info); - - return AudioServer::begin(cb, encoder->mime()); - } - - // provides a pointer to the encoder - AudioEncoder *audioEncoder() { return encoder; } - - protected: - // Sound Generation - use EncodedAudioOutput with is more efficient then - // EncodedAudioStream - EncodedAudioOutput encoded_stream; - AudioInfo audio_info; - AudioEncoder *encoder = nullptr; - - // moved to be part of reply content to avoid timeout issues in Chrome - void sendReplyHeader() override {} - - void sendReplyContent() override { - TRACED(); - // restart encoder - if (encoder) { - encoder->end(); - encoder->begin(); - } - - if (callback != nullptr) { - // encoded_stream.begin(out_ptr(), encoder); - encoded_stream.setOutput(out_ptr()); - encoded_stream.setEncoder(encoder); - encoded_stream.begin(); - - // provide data via Callback to encoded_stream - LOGI("sendReply - calling callback"); - // Send delayed header - AudioServer::sendReplyHeader(); - callback(&encoded_stream); - client_obj.stop(); - } else if (in != nullptr) { - // provide data for stream: in -copy> encoded_stream -> out - LOGI("sendReply - Returning encoded stream..."); - // encoded_stream.begin(out_ptr(), encoder); - encoded_stream.setOutput(out_ptr()); - encoded_stream.setEncoder(encoder); - encoded_stream.begin(); - - copier.begin(encoded_stream, *in); - if (!client_obj.connected()) { - LOGE("connection was closed"); - } - // Send delayed header - AudioServer::sendReplyHeader(); - } - } -}; - -/** - * @brief A simple Arduino Webserver which streams the audio as WAV data. - * This class is based on the AudioEncodedServer class. All you need to do is to - * provide the data with a callback method or from a Stream. - * @ingroup http - * @author Phil Schatzmann - * @copyright GPLv3 - */ -class AudioWAVServer : public AudioEncoderServer { - public: - /** - * @brief Construct a new Audio WAV Server object - * We assume that the WiFi is already connected - */ - AudioWAVServer(int port = 80) : AudioEncoderServer(new WAVEncoder(), port) {} - - /** - * @brief Construct a new Audio WAV Server object - * - * @param network - * @param password - */ - AudioWAVServer(const char *network, const char *password, int port = 80) - : AudioEncoderServer(new WAVEncoder(), network, password, port) {} - - /// Destructor: release the allocated encoder - ~AudioWAVServer() { - AudioEncoder *encoder = audioEncoder(); - if (encoder != nullptr) { - delete encoder; - } - } - - // provides a pointer to the encoder - WAVEncoder &wavEncoder() { return *static_cast(encoder); } -}; - } // namespace audio_tools - -#endif diff --git a/src/AudioTools/Communication/HTTP/AudioServerWiFi.h b/src/AudioTools/Communication/HTTP/AudioServerWiFi.h new file mode 100644 index 0000000000..778f328bad --- /dev/null +++ b/src/AudioTools/Communication/HTTP/AudioServerWiFi.h @@ -0,0 +1,43 @@ +#pragma once + +#include "AudioToolsConfig.h" + +#ifdef USE_WIFI +#include "WiFiInclude.h" +#endif + +#include "AudioEncodedServerT.h" +#include "AudioServerT.h" +#include "AudioTools.h" +#include "AudioTools/AudioCodecs/CodecWAV.h" + +namespace audio_tools { + +#ifdef USE_WIFI +/// @brief Basic WiFi audio server for streaming audio content +/// @ingroup http +using AudioServer = AudioServerT; + +/// @brief WiFi audio server for streaming audio content (explicit WiFi naming) +/// @ingroup http +using AudioServerWiFi = AudioServerT; + +/// @brief WiFi audio server with encoder support for streaming encoded audio +/// @ingroup http +using AudioEncoderServerWiFi = AudioEncoderServerT; + +/// @brief Basic audio server with encoder support (defaults to WiFi when +/// USE_WIFI is defined) +/// @ingroup http +using AudioEncoderServer = AudioEncoderServerT; + +/// @brief WiFi audio server specifically for streaming WAV audio +/// @ingroup http +using AudioWAVServerWiFi = AudioWAVServerT; + +/// @brief Basic WAV audio server (defaults to WiFi when USE_WIFI is defined) +/// @ingroup http +using AudioWAVServer = AudioWAVServerT; +#endif + +} // namespace audio_tools diff --git a/src/AudioTools/CoreAudio/AudioHttp/HttpChunkReader.h b/src/AudioTools/Communication/HTTP/HttpChunkReader.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/HttpChunkReader.h rename to src/AudioTools/Communication/HTTP/HttpChunkReader.h diff --git a/src/AudioTools/CoreAudio/AudioHttp/HttpHeader.h b/src/AudioTools/Communication/HTTP/HttpHeader.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/HttpHeader.h rename to src/AudioTools/Communication/HTTP/HttpHeader.h diff --git a/src/AudioTools/CoreAudio/AudioHttp/HttpLineReader.h b/src/AudioTools/Communication/HTTP/HttpLineReader.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/HttpLineReader.h rename to src/AudioTools/Communication/HTTP/HttpLineReader.h diff --git a/src/AudioTools/CoreAudio/AudioHttp/HttpRequest.h b/src/AudioTools/Communication/HTTP/HttpRequest.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/HttpRequest.h rename to src/AudioTools/Communication/HTTP/HttpRequest.h diff --git a/src/AudioTools/CoreAudio/AudioHttp/HttpTypes.h b/src/AudioTools/Communication/HTTP/HttpTypes.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/HttpTypes.h rename to src/AudioTools/Communication/HTTP/HttpTypes.h diff --git a/src/AudioTools/Communication/HTTP/ICYStream.h b/src/AudioTools/Communication/HTTP/ICYStream.h new file mode 100644 index 0000000000..bbec6bc292 --- /dev/null +++ b/src/AudioTools/Communication/HTTP/ICYStream.h @@ -0,0 +1,16 @@ +#pragma once +#include "AudioTools/Communication/HTTP/URLStream.h" +#include "AudioTools/Communication/HTTP/ICYStreamT.h" + +namespace audio_tools { + +/// Type alias for ICYStream +using ICYStream = ICYStreamT; + +#if defined(USE_CONCURRENCY) +/// Type alias for buffered ICYStream +using ICYStreamBuffered = URLStreamBufferedT; + +#endif + +} \ No newline at end of file diff --git a/src/AudioTools/CoreAudio/AudioHttp/ICYStreamT.h b/src/AudioTools/Communication/HTTP/ICYStreamT.h similarity index 99% rename from src/AudioTools/CoreAudio/AudioHttp/ICYStreamT.h rename to src/AudioTools/Communication/HTTP/ICYStreamT.h index b2b346e935..19708a8e00 100644 --- a/src/AudioTools/CoreAudio/AudioHttp/ICYStreamT.h +++ b/src/AudioTools/Communication/HTTP/ICYStreamT.h @@ -1,5 +1,5 @@ #pragma once -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" #include "AudioTools/CoreAudio/AudioMetaData/MetaDataICY.h" #include "AudioToolsConfig.h" diff --git a/src/AudioTools/Communication/HTTP/README.md b/src/AudioTools/Communication/HTTP/README.md new file mode 100644 index 0000000000..c96d798a51 --- /dev/null +++ b/src/AudioTools/Communication/HTTP/README.md @@ -0,0 +1,4 @@ + +We provide our own HTTP protocal implementation which includes a simple webserver. +Initially this was part of the core functionality, but has been changed to __optional functionality__ +in order to optimize the sketch size. \ No newline at end of file diff --git a/src/AudioTools/CoreAudio/AudioHttp/URLStream.h b/src/AudioTools/Communication/HTTP/URLStream.h similarity index 91% rename from src/AudioTools/CoreAudio/AudioHttp/URLStream.h rename to src/AudioTools/Communication/HTTP/URLStream.h index fdd998ccd9..e748e1403f 100644 --- a/src/AudioTools/CoreAudio/AudioHttp/URLStream.h +++ b/src/AudioTools/Communication/HTTP/URLStream.h @@ -1,21 +1,15 @@ #pragma once #include "AudioToolsConfig.h" -#ifdef USE_URL_ARDUINO -#if defined(ESP32) -# include -# include -# include -# include +#if defined(USE_WIFI) +# include "WiFiInclude.h" #endif #include "AudioTools/CoreAudio/AudioBasic/Str.h" -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" -#include "AudioTools/CoreAudio/AudioHttp/HttpRequest.h" -#include "AudioTools/CoreAudio/AudioHttp/ICYStreamT.h" -#include "AudioTools/CoreAudio/AudioHttp/URLStreamBufferedT.h" - +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/HttpRequest.h" +#include "AudioTools/Communication/HTTP/URLStreamBufferedT.h" namespace audio_tools { @@ -25,7 +19,7 @@ namespace audio_tools { * In this chase you can check if setting the protocol to "HTTP/1.0" improves * the situation. * @author Phil Schatzmann - * @ingroup http + * @ingroup network * @copyright GPLv3 * */ @@ -55,18 +49,14 @@ class URLStream : public AbstractURLStream { ~URLStream() { TRACED(); end(); -#ifdef USE_WIFI_CLIENT_SECURE if (clientSecure != nullptr) { delete clientSecure; clientSecure = nullptr; } -#endif -#ifdef USE_WIFI if (clientInsecure != nullptr) { delete clientInsecure; clientInsecure = nullptr; } -#endif } /// (Re-)defines the client @@ -259,9 +249,7 @@ class URLStream : public AbstractURLStream { /// Define the Root PEM Certificate for SSL void setCACert(const char* cert) override{ - #ifdef USE_WIFI_CLIENT_SECURE if (clientSecure!=nullptr) clientSecure->setCACert(cert); - #endif } protected: @@ -281,12 +269,8 @@ class URLStream : public AbstractURLStream { const char* network = nullptr; const char* password = nullptr; Client* client = nullptr; // client defined via setClient -#ifdef USE_WIFI WiFiClient* clientInsecure = nullptr; // wifi client for http -#endif -#ifdef USE_WIFI_CLIENT_SECURE WiFiClientSecure* clientSecure = nullptr; // wifi client for https -#endif int clientTimeout = URL_CLIENT_TIMEOUT; // 60000; unsigned long handshakeTimeout = URL_HANDSHAKE_TIMEOUT; // 120000 bool is_power_save = false; @@ -300,7 +284,6 @@ class URLStream : public AbstractURLStream { // close it - if we have an active connection if (active) end(); -#ifdef USE_WIFI // optional: login if necessary if no external client is defined if (client == nullptr){ if (!login()){ @@ -308,7 +291,6 @@ class URLStream : public AbstractURLStream { return false; } } -#endif // request.reply().setAutoCreateLines(false); if (acceptMime != nullptr) { @@ -323,7 +305,7 @@ class URLStream : public AbstractURLStream { client.setTimeout(clientTimeout / 1000); request.setTimeout(clientTimeout); -#if defined(ESP32) && defined(USE_WIFI_CLIENT_SECURE) +#if defined(ESP32) // There is a bug in IDF 4! if (clientSecure != nullptr) { clientSecure->setHandshakeTimeout(handshakeTimeout); @@ -370,7 +352,6 @@ class URLStream : public AbstractURLStream { /// Determines the client Client& getClient(bool isSecure) { -#ifdef USE_WIFI_CLIENT_SECURE if (isSecure) { if (clientSecure == nullptr) { clientSecure = new WiFiClientSecure(); @@ -379,20 +360,11 @@ class URLStream : public AbstractURLStream { LOGI("WiFiClientSecure"); return *clientSecure; } -#endif -#ifdef USE_WIFI if (clientInsecure == nullptr) { clientInsecure = new WiFiClient(); LOGI("WiFiClient"); } return *clientInsecure; -#else - if (client == nullptr){ - LOGE("Client not set"); - stop(); - } - return *client; // to avoid compiler warning -#endif } inline void fillBuffer() { @@ -406,7 +378,6 @@ class URLStream : public AbstractURLStream { inline bool isEOS() { return read_pos >= read_size; } bool login() { -#ifdef USE_WIFI if (network != nullptr && password != nullptr && WiFi.status() != WL_CONNECTED) { TRACEI(); @@ -420,22 +391,14 @@ class URLStream : public AbstractURLStream { return WiFi.status() == WL_CONNECTED; } return WiFi.status() == WL_CONNECTED; -#else - return false; -#endif } }; -/// Type alias for ICYStream -using ICYStream = ICYStreamT; #if defined(USE_CONCURRENCY) /// Type alias for buffered URLStream using URLStreamBuffered = URLStreamBufferedT; -/// Type alias for buffered ICYStream -using ICYStreamBuffered = URLStreamBufferedT; + #endif } // namespace audio_tools - -#endif diff --git a/src/AudioTools/CoreAudio/AudioHttp/URLStreamBufferedT.h b/src/AudioTools/Communication/HTTP/URLStreamBufferedT.h similarity index 99% rename from src/AudioTools/CoreAudio/AudioHttp/URLStreamBufferedT.h rename to src/AudioTools/Communication/HTTP/URLStreamBufferedT.h index 1331a2b8af..8f47c56c6a 100644 --- a/src/AudioTools/CoreAudio/AudioHttp/URLStreamBufferedT.h +++ b/src/AudioTools/Communication/HTTP/URLStreamBufferedT.h @@ -2,7 +2,7 @@ #include "AudioToolsConfig.h" #if defined(USE_CONCURRENCY) #include "AudioTools/AudioLibs/Concurrency.h" -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" #include "AudioTools/CoreAudio/BaseStream.h" #ifndef URL_STREAM_CORE diff --git a/src/AudioTools/CoreAudio/AudioHttp/URLStreamESP32.h b/src/AudioTools/Communication/HTTP/URLStreamESP32.h similarity index 95% rename from src/AudioTools/CoreAudio/AudioHttp/URLStreamESP32.h rename to src/AudioTools/Communication/HTTP/URLStreamESP32.h index 66a7a2b4b4..e471e2907c 100644 --- a/src/AudioTools/CoreAudio/AudioHttp/URLStreamESP32.h +++ b/src/AudioTools/Communication/HTTP/URLStreamESP32.h @@ -1,9 +1,9 @@ #pragma once -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" -#include "AudioTools/CoreAudio/AudioHttp/HttpRequest.h" -#include "AudioTools/CoreAudio/AudioHttp/ICYStreamT.h" -#include "AudioTools/CoreAudio/AudioHttp/URLStreamBufferedT.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/HttpRequest.h" +#include "AudioTools/Communication/HTTP/ICYStreamT.h" +#include "AudioTools/Communication/HTTP/URLStreamBufferedT.h" #include "esp_http_client.h" #include "esp_idf_version.h" #include "esp_system.h" @@ -414,17 +414,22 @@ class URLStreamESP32 : public AbstractURLStream { } }; -/// ICYStream +/// ICYStream for ESP32 platform using ICYStreamESP32 = ICYStreamT; #if defined(USE_CONCURRENCY) +/// Buffered URLStream for ESP32 platform using URLStreamBufferedESP32 = URLStreamBufferedT; +/// Buffered ICYStream for ESP32 platform using ICYStreamBufferedESP32 = URLStreamBufferedT; #endif /// Support URLStream w/o Arduino #if !defined(ARDUINO) +/// URLStream alias for ESP32 (non-Arduino environments) using URLStream = URLStreamESP32; +/// Buffered URLStream alias for ESP32 (non-Arduino environments) using URLStreamBuffered = URLStreamBufferedESP32; +/// Buffered ICYStream alias for ESP32 (non-Arduino environments) using ICYStreamBuffered = ICYStreamBufferedESP32; #endif diff --git a/src/AudioTools/CoreAudio/AudioHttp/Url.h b/src/AudioTools/Communication/HTTP/Url.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/Url.h rename to src/AudioTools/Communication/HTTP/Url.h diff --git a/src/AudioTools/Communication/HTTP/WiFiInclude.h b/src/AudioTools/Communication/HTTP/WiFiInclude.h new file mode 100644 index 0000000000..b2d7cf6285 --- /dev/null +++ b/src/AudioTools/Communication/HTTP/WiFiInclude.h @@ -0,0 +1,20 @@ +#pragma once +#include "AudioToolsConfig.h" + +// Different platforms have different WiFi libraries + +#if defined(USE_WIFININA) +# include +#elif defined(USE_WIFIS3) +# include +#elif defined(ESP8266) +# include +#elif defined(ESP32) +# include +# include +# include +# include +#else +# include +#endif + diff --git a/src/AudioTools/Concurrency/RP2040/BufferRP2040.h b/src/AudioTools/Concurrency/RP2040/BufferRP2040.h index 1d19f3989d..f55dedb0ff 100644 --- a/src/AudioTools/Concurrency/RP2040/BufferRP2040.h +++ b/src/AudioTools/Concurrency/RP2040/BufferRP2040.h @@ -218,6 +218,8 @@ class BufferRP2040T : public BaseBuffer { }; +/// @brief RP2040 specific buffer for audio data +/// @ingroup buffers using BufferRP2040 = BufferRP2040T; } // namespace audio_tools diff --git a/src/AudioTools/Concurrency/RP2040/MutexRP2040.h b/src/AudioTools/Concurrency/RP2040/MutexRP2040.h index 53627db614..685a56e914 100644 --- a/src/AudioTools/Concurrency/RP2040/MutexRP2040.h +++ b/src/AudioTools/Concurrency/RP2040/MutexRP2040.h @@ -55,6 +55,8 @@ class MutexRP2040 : public MutexBase { mutex_t mtx; }; +/// @brief Default Mutex implementation using RP2040 Pico SDK +/// @ingroup concurrency using Mutex = MutexRP2040; } // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/Concurrency/RTOS/BufferRTOS.h b/src/AudioTools/Concurrency/RTOS/BufferRTOS.h index fc41b920c7..1b3d7cadcc 100644 --- a/src/AudioTools/Concurrency/RTOS/BufferRTOS.h +++ b/src/AudioTools/Concurrency/RTOS/BufferRTOS.h @@ -208,6 +208,8 @@ class BufferRTOS : public BaseBuffer { }; // #endif // ESP_IDF_VERSION_MAJOR >= 4 +/// @brief Template alias for RTOS-based synchronized buffer +/// @ingroup concurrency template using SynchronizedBufferRTOS = BufferRTOS; diff --git a/src/AudioTools/Concurrency/RTOS/MutexRTOS.h b/src/AudioTools/Concurrency/RTOS/MutexRTOS.h index 67d72b4bd4..28419ddec0 100644 --- a/src/AudioTools/Concurrency/RTOS/MutexRTOS.h +++ b/src/AudioTools/Concurrency/RTOS/MutexRTOS.h @@ -39,6 +39,8 @@ class MutexRTOS : public MutexBase { SemaphoreHandle_t xSemaphore = NULL; }; +/// @brief Default Mutex implementation using RTOS semaphores +/// @ingroup concurrency using Mutex = MutexRTOS; } \ No newline at end of file diff --git a/src/AudioTools/Concurrency/RTOS/SynchronizedNBufferRTOS.h b/src/AudioTools/Concurrency/RTOS/SynchronizedNBufferRTOS.h index d23cb4f72b..561f94a421 100644 --- a/src/AudioTools/Concurrency/RTOS/SynchronizedNBufferRTOS.h +++ b/src/AudioTools/Concurrency/RTOS/SynchronizedNBufferRTOS.h @@ -118,7 +118,12 @@ class SynchronizedNBufferRTOST : public NBuffer { } }; +/// @brief RTOS synchronized buffer for managing multiple audio buffers +/// @ingroup buffers using SynchronizedNBufferRTOS = SynchronizedNBufferRTOST; + +/// @brief Default synchronized buffer alias +/// @ingroup buffers using SynchronizedNBuffer = SynchronizedNBufferRTOS; } // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/CoreAudio.h b/src/AudioTools/CoreAudio.h index 44b622ec3e..e638a952d6 100644 --- a/src/AudioTools/CoreAudio.h +++ b/src/AudioTools/CoreAudio.h @@ -23,6 +23,5 @@ #include "AudioTools/CoreAudio/AnalogAudioStream.h" #include "AudioTools/CoreAudio/AudioEffects.h" #include "AudioTools/CoreAudio/AudioMetaData.h" -#include "AudioTools/CoreAudio/AudioHttp.h" #include "AudioTools/CoreAudio/FrequencyDetector.h" #include "AudioTools/CoreAudio/GoerzelStream.h" diff --git a/src/AudioTools/CoreAudio/AudioHttp.h b/src/AudioTools/CoreAudio/AudioHttp.h deleted file mode 100644 index 570f61d65d..0000000000 --- a/src/AudioTools/CoreAudio/AudioHttp.h +++ /dev/null @@ -1,3 +0,0 @@ -#pragma once - -#include "AudioTools/CoreAudio/AudioHttp/AudioHttp.h" diff --git a/src/AudioTools/CoreAudio/AudioHttp/AudioHttp.h b/src/AudioTools/CoreAudio/AudioHttp/AudioHttp.h deleted file mode 100644 index 9d4a0c3e84..0000000000 --- a/src/AudioTools/CoreAudio/AudioHttp/AudioHttp.h +++ /dev/null @@ -1,13 +0,0 @@ -#pragma once -/** - * @defgroup http Http - * @ingroup communications - * @brief Http client & server -**/ - -#include "URLStream.h" -#include "AudioServer.h" - -#if ((defined(ESP32) && defined(USE_URL_ARDUINO)) || defined(ESP32_CMAKE)) -# include "URLStreamESP32.h" -#endif \ No newline at end of file diff --git a/src/AudioTools/CoreAudio/AudioHttp/README.md b/src/AudioTools/CoreAudio/AudioHttp/README.md deleted file mode 100644 index e0cc926917..0000000000 --- a/src/AudioTools/CoreAudio/AudioHttp/README.md +++ /dev/null @@ -1,2 +0,0 @@ - -We provide our own HTTP protocal implementation which includes a simple webserver \ No newline at end of file diff --git a/src/AudioTools/CoreAudio/AudioMetaData/MetaData.h b/src/AudioTools/CoreAudio/AudioMetaData/MetaData.h index 1ce8fee0bf..f3bf6d97f3 100644 --- a/src/AudioTools/CoreAudio/AudioMetaData/MetaData.h +++ b/src/AudioTools/CoreAudio/AudioMetaData/MetaData.h @@ -3,7 +3,7 @@ #include "AudioToolsConfig.h" #include "AudioTools/CoreAudio/AudioTypes.h" #include "AudioTools/CoreAudio/AudioStreams.h" -#include "AudioTools/CoreAudio/AudioHttp/HttpRequest.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" #include "AudioTools/CoreAudio/AudioMetaData/MetaDataFilter.h" #include "MetaDataICY.h" #include "MetaDataID3.h" @@ -42,8 +42,6 @@ class MetaDataOutput : public AudioOutput { callback = fn; } -#ifdef USE_URL_ARDUINO - /// Starts the processing - iceMetaint is determined from the HttpRequest virtual void begin(AbstractURLStream &url) { TRACED(); @@ -52,7 +50,6 @@ class MetaDataOutput : public AudioOutput { icySetup.executeCallback(callback); begin(metaInt); } -#endif /// Starts the processing - if iceMetaint is defined we use icecast virtual void begin(int iceMetaint=0) { diff --git a/src/AudioTools/CoreAudio/AudioMetaData/MetaDataICY.h b/src/AudioTools/CoreAudio/AudioMetaData/MetaDataICY.h index f1e3811cc4..473832e73e 100644 --- a/src/AudioTools/CoreAudio/AudioMetaData/MetaDataICY.h +++ b/src/AudioTools/CoreAudio/AudioMetaData/MetaDataICY.h @@ -3,7 +3,7 @@ #include "AudioToolsConfig.h" #include "AbstractMetaData.h" #include "AudioTools/CoreAudio/AudioBasic/StrView.h" -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" namespace audio_tools { diff --git a/src/AudioTools/CoreAudio/AudioPWM/PWMDriverAVR.h b/src/AudioTools/CoreAudio/AudioPWM/PWMDriverAVR.h index 5f242cf926..b48188f339 100644 --- a/src/AudioTools/CoreAudio/AudioPWM/PWMDriverAVR.h +++ b/src/AudioTools/CoreAudio/AudioPWM/PWMDriverAVR.h @@ -8,6 +8,8 @@ namespace audio_tools { class PWMDriverAVR; +/// @brief Platform-specific PWM driver alias for AVR +/// @ingroup io using PWMDriver = PWMDriverAVR; static PWMDriverAVR *accessAudioPWM = nullptr; diff --git a/src/AudioTools/CoreAudio/AudioPlayer.h b/src/AudioTools/CoreAudio/AudioPlayer.h index 9aaab248a8..684967d190 100644 --- a/src/AudioTools/CoreAudio/AudioPlayer.h +++ b/src/AudioTools/CoreAudio/AudioPlayer.h @@ -2,7 +2,6 @@ #include "AudioTools/AudioCodecs/AudioCodecs.h" #include "AudioTools/CoreAudio/AudioBasic/Debouncer.h" -#include "AudioTools/CoreAudio/AudioHttp/AudioHttp.h" #include "AudioTools/CoreAudio/AudioLogger.h" #include "AudioTools/CoreAudio/AudioMetaData/MetaData.h" #include "AudioTools/CoreAudio/AudioStreams.h" diff --git a/src/AudioTools/CoreAudio/AudioStreams.h b/src/AudioTools/CoreAudio/AudioStreams.h index e68b18e4fb..dac9d93dbd 100644 --- a/src/AudioTools/CoreAudio/AudioStreams.h +++ b/src/AudioTools/CoreAudio/AudioStreams.h @@ -1945,7 +1945,12 @@ class VolumeMeter : public ModifyingStream { }; // legacy names +/// @brief Legacy alias for VolumeMeter +/// @ingroup io using VolumePrint = VolumeMeter; + +/// @brief Legacy alias for VolumeMeter +/// @ingroup io using VolumeOutput = VolumeMeter; #ifdef USE_TIMER diff --git a/src/AudioTools/CoreAudio/AudioTimer/AudioTimerDesktop.h b/src/AudioTools/CoreAudio/AudioTimer/AudioTimerDesktop.h index d655a3705d..31fa22d197 100644 --- a/src/AudioTools/CoreAudio/AudioTimer/AudioTimerDesktop.h +++ b/src/AudioTools/CoreAudio/AudioTimer/AudioTimerDesktop.h @@ -1,12 +1,13 @@ #pragma once -#include -#include -#include #include "AudioTimerBase.h" #if defined(USE_TIMER) && defined(USE_CPP_TASK) +#include +#include +#include + namespace audio_tools { /** diff --git a/src/AudioTools/CoreAudio/AudioTypes.h b/src/AudioTools/CoreAudio/AudioTypes.h index 17db4c6a3d..6abb168c3a 100644 --- a/src/AudioTools/CoreAudio/AudioTypes.h +++ b/src/AudioTools/CoreAudio/AudioTypes.h @@ -18,6 +18,8 @@ namespace audio_tools { +/// @brief Type alias for sample rate values +/// @ingroup basic using sample_rate_t = uint32_t; /** @@ -520,7 +522,8 @@ inline void waitFor(HardwareSerial& out) { while (!out); } /// wait for flag to be active @ingroup basic inline void waitFor(bool& flag) { while (!flag); } -/// Pins @ingroup basic +/// @brief Type alias for a collection of pin numbers +/// @ingroup basic using Pins = Vector; } // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/Disk/AudioSourceURL.h b/src/AudioTools/Disk/AudioSourceURL.h index e3df89d489..d0c02c7de8 100644 --- a/src/AudioTools/Disk/AudioSourceURL.h +++ b/src/AudioTools/Disk/AudioSourceURL.h @@ -3,7 +3,7 @@ #pragma once #include "AudioToolsConfig.h" #include "AudioSource.h" -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" namespace audio_tools { diff --git a/src/AudioTools/PlatformConfig/avr.h b/src/AudioTools/PlatformConfig/avr.h index 43dc86965b..fe0a87d5bd 100644 --- a/src/AudioTools/PlatformConfig/avr.h +++ b/src/AudioTools/PlatformConfig/avr.h @@ -4,9 +4,8 @@ #define USE_PWM #define USE_TIMER #define NO_INPLACE_INIT_SUPPORT -// Uncomment to activate network -//#include -//#define USE_URL_ARDUINO +#define USE_ETHERNET +#define USE_URL_ARDUINO #ifndef assert # define assert(T) #endif diff --git a/src/AudioTools/PlatformConfig/giga.h b/src/AudioTools/PlatformConfig/giga.h index b0c9ca9ccd..9a8830b748 100644 --- a/src/AudioTools/PlatformConfig/giga.h +++ b/src/AudioTools/PlatformConfig/giga.h @@ -2,7 +2,6 @@ #pragma once -#include #include #define IS_MBED diff --git a/src/AudioTools/PlatformConfig/portenta.h b/src/AudioTools/PlatformConfig/portenta.h index 921a2e5471..49ba6ce16f 100644 --- a/src/AudioTools/PlatformConfig/portenta.h +++ b/src/AudioTools/PlatformConfig/portenta.h @@ -2,7 +2,6 @@ #pragma once -#include #include #define IS_MBED diff --git a/src/AudioTools/PlatformConfig/samd.h b/src/AudioTools/PlatformConfig/samd.h index bca98a7f27..9f57346d93 100644 --- a/src/AudioTools/PlatformConfig/samd.h +++ b/src/AudioTools/PlatformConfig/samd.h @@ -15,7 +15,7 @@ #define PIN_CS 4 #ifdef ARDUINO_SAMD_MKRWIFI1010 -#include +#define USE_WIFI_NININA #define USE_URL_ARDUINO #define USE_AUDIO_SERVER #endif diff --git a/src/AudioTools/PlatformConfig/stm32.h b/src/AudioTools/PlatformConfig/stm32.h index a1e83ceab6..3706770967 100644 --- a/src/AudioTools/PlatformConfig/stm32.h +++ b/src/AudioTools/PlatformConfig/stm32.h @@ -25,8 +25,7 @@ #define SOFT_MUTE_VALUE 0 #define PIN_CS -1 -// Uncomment to activate networking -//#define USE_ETHERNET -//#define USE_URL_ARDUINO -//#define USE_AUDIO_SERVER +#define USE_ETHERNET +#define USE_URL_ARDUINO +#define USE_AUDIO_SERVER diff --git a/src/AudioTools/PlatformConfig/unor4.h b/src/AudioTools/PlatformConfig/unor4.h index 18f05a5085..380eb5f0cf 100644 --- a/src/AudioTools/PlatformConfig/unor4.h +++ b/src/AudioTools/PlatformConfig/unor4.h @@ -32,5 +32,5 @@ # define USE_WIFI # define USE_URL_ARDUINO # define USE_AUDIO_SERVER -# include "WiFiS3.h" +# define USE_WIFIS3 #endif diff --git a/tests-cmake/codec/CMakeLists.txt b/tests-cmake/codec/CMakeLists.txt index dfaf1b08b6..ac0bcd24ad 100644 --- a/tests-cmake/codec/CMakeLists.txt +++ b/tests-cmake/codec/CMakeLists.txt @@ -30,7 +30,7 @@ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/hls ${CMAKE_CURRENT_BINARY_DIR}/hls add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/container-m4a ${CMAKE_CURRENT_BINARY_DIR}/container-m4a) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/m4a-extractor ${CMAKE_CURRENT_BINARY_DIR}/m4a-extractor) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/mp4-parser ${CMAKE_CURRENT_BINARY_DIR}/mp4-parser) -add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/mp3-parser ${CMAKE_CURRENT_BINARY_DIR}/mp4-parser) +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/mp3-parser ${CMAKE_CURRENT_BINARY_DIR}/mp3-parser) #add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/container-avi-movie ${CMAKE_CURRENT_BINARY_DIR}/container-avi-movie) diff --git a/tests-cmake/codec/aac-faad/aac-faad.cpp b/tests-cmake/codec/aac-faad/aac-faad.cpp index e2882f1472..4c98baa722 100644 --- a/tests-cmake/codec/aac-faad/aac-faad.cpp +++ b/tests-cmake/codec/aac-faad/aac-faad.cpp @@ -4,8 +4,6 @@ //#include "AudioTools/AudioLibs/PortAudioStream.h" #include "audio.h" -using namespace audio_tools; - MemoryStream aac(gs_16b_2c_44100hz_aac, gs_16b_2c_44100hz_aac_len); //PortAudioStream out; // Output of sound on desktop CsvOutput out(Serial, 2); diff --git a/tests-cmake/codec/aac-fdk-encode/aac-fdk-encode.cpp b/tests-cmake/codec/aac-fdk-encode/aac-fdk-encode.cpp index c72d604247..ce8f104dc1 100644 --- a/tests-cmake/codec/aac-fdk-encode/aac-fdk-encode.cpp +++ b/tests-cmake/codec/aac-fdk-encode/aac-fdk-encode.cpp @@ -4,7 +4,6 @@ #include "AudioTools/AudioCodecs/CodecAACFDK.h" //#include // for rand -using namespace audio_tools; HexDumpOutput out(Serial); AACEncoderFDK aac(out); diff --git a/tests-cmake/codec/aac-fdk/aac-fdk.cpp b/tests-cmake/codec/aac-fdk/aac-fdk.cpp index c25125046d..f6ec92ee84 100644 --- a/tests-cmake/codec/aac-fdk/aac-fdk.cpp +++ b/tests-cmake/codec/aac-fdk/aac-fdk.cpp @@ -4,8 +4,6 @@ #include "AudioTools/AudioLibs/PortAudioStream.h" #include "audio.h" -using namespace audio_tools; - MemoryStream aac(gs_16b_2c_44100hz_aac, gs_16b_2c_44100hz_aac_len); PortAudioStream portaudio_stream; // Output of sound on desktop EncodedAudioStream dec(&portaudio_stream, new AACDecoderFDK()); // aac data source diff --git a/tests-cmake/codec/aac-helix/aac-helix.cpp b/tests-cmake/codec/aac-helix/aac-helix.cpp index 68f31cb69b..37cfbdab7d 100644 --- a/tests-cmake/codec/aac-helix/aac-helix.cpp +++ b/tests-cmake/codec/aac-helix/aac-helix.cpp @@ -4,8 +4,6 @@ #include "AudioTools/AudioLibs/PortAudioStream.h" #include "audio.h" -using namespace audio_tools; - MemoryStream aac(gs_16b_2c_44100hz_aac, gs_16b_2c_44100hz_aac_len); PortAudioStream portaudio_stream; // Output of sound on desktop EncodedAudioStream dec(&portaudio_stream, new AACDecoderHelix()); // aac data source diff --git a/tests-cmake/codec/mp3-helix/mp3-helix.cpp b/tests-cmake/codec/mp3-helix/mp3-helix.cpp index d6504f1034..3ede3b6b9b 100644 --- a/tests-cmake/codec/mp3-helix/mp3-helix.cpp +++ b/tests-cmake/codec/mp3-helix/mp3-helix.cpp @@ -5,8 +5,6 @@ #include "AudioTools/AudioLibs/PortAudioStream.h" #include "BabyElephantWalk60_mp3.h" -using namespace audio_tools; - MemoryStream mp3(BabyElephantWalk60_mp3, BabyElephantWalk60_mp3_len); PortAudioStream portaudio_stream; // Output of sound on desktop EncodedAudioStream dec(&portaudio_stream, new MP3DecoderHelix()); // MP3 data source diff --git a/tests-cmake/codec/mp3-lame/mp3-lame.cpp b/tests-cmake/codec/mp3-lame/mp3-lame.cpp index d8d3e7483e..8e9f01609e 100644 --- a/tests-cmake/codec/mp3-lame/mp3-lame.cpp +++ b/tests-cmake/codec/mp3-lame/mp3-lame.cpp @@ -4,8 +4,6 @@ #include "AudioTools/AudioCodecs/CodecMP3LAME.h" //#include // for rand -using namespace audio_tools; - HexDumpOutput out(Serial); MP3EncoderLAME mp3(out); AudioInfoLAME info; diff --git a/tests-cmake/codec/mp3-mad/mp3-mad.cpp b/tests-cmake/codec/mp3-mad/mp3-mad.cpp index 66628a7f86..3dd507e65d 100644 --- a/tests-cmake/codec/mp3-mad/mp3-mad.cpp +++ b/tests-cmake/codec/mp3-mad/mp3-mad.cpp @@ -5,8 +5,6 @@ #include "AudioTools/AudioLibs/PortAudioStream.h" #include "BabyElephantWalk60_mp3.h" -using namespace audio_tools; - MemoryStream mp3(BabyElephantWalk60_mp3, BabyElephantWalk60_mp3_len); PortAudioStream portaudio_stream; // Output of sound on desktop EncodedAudioStream dec(&portaudio_stream, new MP3DecoderMAD()); // MP3 data source diff --git a/tests-cmake/codec/mp3-metadata/mp3-metadata.cpp b/tests-cmake/codec/mp3-metadata/mp3-metadata.cpp index 5cad3fa347..cbac6384df 100644 --- a/tests-cmake/codec/mp3-metadata/mp3-metadata.cpp +++ b/tests-cmake/codec/mp3-metadata/mp3-metadata.cpp @@ -3,14 +3,11 @@ #include "AudioTools.h" #include "sample-12s.h" -using namespace audio_tools; - MemoryStream mp3(sample_12s_mp3, sample_12s_mp3_len); MetaDataOutput out; StreamCopy copier(out, mp3); // copy in to out bool title_printed = false; - void printMetaData(MetaDataType type, const char* str, int len){ Serial.print("==> "); Serial.print(toStr(type)); diff --git a/tests-cmake/effects/effects.cpp b/tests-cmake/effects/effects.cpp index d12fa388d1..ede3f28a52 100644 --- a/tests-cmake/effects/effects.cpp +++ b/tests-cmake/effects/effects.cpp @@ -3,8 +3,6 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/PortAudioStream.h" -using namespace audio_tools; - PortAudioStream out; SineWaveGenerator sine; AudioEffects> effects(sine); diff --git a/tests-cmake/url-test/url-test.cpp b/tests-cmake/url-test/url-test.cpp index 4f82a1b610..282e5ba40d 100644 --- a/tests-cmake/url-test/url-test.cpp +++ b/tests-cmake/url-test/url-test.cpp @@ -1,6 +1,5 @@ #include "AudioTools.h" - -using namespace audio_tools; +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); NullStream null_out; // final output of decoded stream From 1b1c6ff46929b1ab0faaf47365abb26785f4aa9a Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 06:45:32 +0200 Subject: [PATCH 2/4] RTSP corrections --- .../Communication/RTSP/RTSPAudioSource.h | 7 - .../Communication/RTSP/RTSPAudioStreamer.h | 12 +- .../Communication/RTSP/RTSPClient.h | 891 ++++++++++++++++++ .../Communication/RTSP/RTSPClientEthernet.h | 29 + .../Communication/RTSP/RTSPClientWiFi.h | 28 + .../Communication/RTSP/RTSPFormat.h | 1 - .../Communication/RTSP/RTSPOutput.h | 8 - .../Communication/RTSP/RTSPPlatform.h | 4 +- .../Communication/RTSP/RTSPServer.h | 26 +- .../Communication/RTSP/RTSPSession.h | 56 +- 10 files changed, 1034 insertions(+), 28 deletions(-) create mode 100644 src/AudioTools/Communication/RTSP/RTSPClient.h create mode 100644 src/AudioTools/Communication/RTSP/RTSPClientEthernet.h create mode 100644 src/AudioTools/Communication/RTSP/RTSPClientWiFi.h diff --git a/src/AudioTools/Communication/RTSP/RTSPAudioSource.h b/src/AudioTools/Communication/RTSP/RTSPAudioSource.h index fa6ad3a434..56c2efb0f8 100644 --- a/src/AudioTools/Communication/RTSP/RTSPAudioSource.h +++ b/src/AudioTools/Communication/RTSP/RTSPAudioSource.h @@ -5,13 +5,6 @@ #include "RTSPFormat.h" #include "RTSPPlatform.h" -/** - * @defgroup rtsp RTSP Streaming - * @ingroup communications - * @file RTSPAudioSource.h - * @author Phil Schatzmann - * @copyright GPLv3 - */ namespace audio_tools { diff --git a/src/AudioTools/Communication/RTSP/RTSPAudioStreamer.h b/src/AudioTools/Communication/RTSP/RTSPAudioStreamer.h index b634a7c0eb..4833690d4d 100644 --- a/src/AudioTools/Communication/RTSP/RTSPAudioStreamer.h +++ b/src/AudioTools/Communication/RTSP/RTSPAudioStreamer.h @@ -43,8 +43,8 @@ namespace audio_tools { * * @note This base class does not include timer functionality * @note Use RTSPAudioStreamer for automatic timer-driven streaming - * @author Thomas Pfitzinger - * @version 0.2.0 + * @ingroup rtsp + * @author Phil Schatzmann */ template class RTSPAudioStreamerBase { @@ -617,8 +617,8 @@ class RTSPAudioStreamerBase { * * @note This is the recommended class for most use cases * @note Use RTSPAudioStreamerBase for custom streaming control - * @author Thomas Pfitzinger - * @version 0.2.0 + * @ingroup rtsp + * @author Phil Schatzmann */ template class RTSPAudioStreamer : public RTSPAudioStreamerBase { @@ -770,8 +770,8 @@ class RTSPAudioStreamer : public RTSPAudioStreamerBase { * @note Useful when hardware timers are limited or need different scheduling * @note Requires FreeRTOS support (ESP32, etc.) * @note Throttled mode provides more accurate timing but uses more CPU - * @author Thomas Pfitzinger - * @version 0.2.0 + * @ingroup rtsp + * @author Phil Schatzmann */ template class RTSPAudioStreamerUsingTask : public RTSPAudioStreamerBase { diff --git a/src/AudioTools/Communication/RTSP/RTSPClient.h b/src/AudioTools/Communication/RTSP/RTSPClient.h new file mode 100644 index 0000000000..3470495782 --- /dev/null +++ b/src/AudioTools/Communication/RTSP/RTSPClient.h @@ -0,0 +1,891 @@ +#pragma once +#include +#include + +#include "AudioTools/AudioCodecs/CodecNetworkFormat.h" +#include "AudioTools/AudioCodecs/MultiDecoder.h" +#include "AudioTools/CoreAudio//BaseStream.h" +#include "AudioTools/CoreAudio/AudioBasic/Collections/Vector.h" +#include "AudioTools/CoreAudio/Buffers.h" +#include "AudioTools/CoreAudio/ResampleStream.h" + +namespace audio_tools { + +/** + * @brief Efficient RTSP client for UDP/RTP audio with decoder pipeline. + * + * Establishes an RTSP session (OPTIONS, DESCRIBE, SETUP/UDP, PLAY), binds a + * local UDP RTP port and receives RTP audio packets. The payload of each RTP + * packet is forwarded to an internal MultiDecoder. For raw PCM over RTP + * (e.g. L16) a DecoderNetworkFormat is used to convert network byte order + * into host format before writing to the configured output. For compressed + * formats, register decoders with addDecoder(). + * + * Usage: + * - Construct and set an output via setOutput() + * - Call begin(address, port) + * - In your loop, call copy() to push RTP payloads to decoders + * - Optionally control streaming via setActive(true/false) + * - Optionally define resampling factor to prevent buffer over/underflows + * + * Template parameters: + * - TcpClient: TCP client type (e.g: WiFiClient) + * - UdpSocket: UDP socket type (e.g: WiFiUDP) + * + * @ingroup rtsp + * @author Phil Schatzmann + */ +template +class RTSPClient : public AudioInfoSource, public AudioInfoSupport { + public: + RTSPClient() { + // convert network format to little endian + m_multi_decoder.addDecoder(m_decoder_net, "audio/L16"); + // convert to 16 bit + m_multi_decoder.addDecoder(m_decoder_l8, "audio/L8"); + // Start resampler; it will receive AudioInfo later via setAudioInfo + m_resampler.begin(); + m_multi_decoder.setOutput(m_resampler); + } + + /** + * @brief Construct with an AudioOutput as decoding sink. + */ + RTSPClient(AudioOutput& out) : RTSPClient() { setOutput(out); } + /** + * @brief Construct with an AudioStream as decoding sink. + */ + RTSPClient(AudioStream& out) : RTSPClient() { setOutput(out); } + /** + * @brief Construct with a generic Print sink. + */ + RTSPClient(Print& out) : RTSPClient() { setOutput(out); } + /** + * @brief Define decoding sink as AudioOutput. + */ + void setOutput(AudioOutput& out) { m_resampler.setOutput(out); } + /** + * @brief Define decoding sink as AudioStream. + */ + void setOutput(AudioStream& out) { m_resampler.setStream(out); } + /** + * @brief Define decoding sink as Print. + */ + void setOutput(Print& out) { m_resampler.setOutput(out); } + + /** + * @brief Set resampling factor to stabilize buffers and playback. + * 1.0 means no resampling. factor > 1.0 speeds up (upsamples), + * factor < 1.0 slows down (downsamples). Useful to compensate clock drift + * between sender and receiver to prevent buffer overflows/underflows. + * Internally mapped to step size as step = 1.0 / factor. + * @note This can be used to prevent buffer overflows/underflows + */ + void setResampleFactor(float factor) { + if (factor <= 0.0f) factor = 1.0f; + float step = 1.0f / factor; + m_resampleStep = step; + m_resampler.setStepSize(step); + // Always route via resampler; factor 1.0 is pass-through + } + /** + * @brief Set idle backoff delay (ms) for zero-return cases. + * Used in available() and copy() to avoid busy loops. + */ + void setIdleDelay(uint32_t ms) { m_idleDelayMs = ms; } + + /** + * @brief Set number of TCP connect retries (default 2). + */ + void setConnectRetries(uint8_t retries) { m_connectRetries = retries; } + + /** + * @brief Set delay between connect retries in ms (default 500ms). + */ + void setConnectRetryDelayMs(uint32_t ms) { m_connectRetryDelayMs = ms; } + + /** + * @brief Set timeout (ms) for reading RTSP response headers. + * Increase if your server responds slowly. Default 3000ms. + */ + void setHeaderTimeoutMs(uint32_t ms) { m_headerTimeoutMs = ms; } + + /** + * @brief Set additional RTP payload offset in bytes. + * Some payloads embed a small header before the actual audio data + * (e.g., RFC2250 4-byte header for MP3). This offset is added after + * the RTP header and any CSRC entries. + */ + void setPayloadOffset(uint8_t bytes) { m_payloadOffset = bytes; } + /** + * @brief Start RTSP session and UDP RTP reception. + * @param addr RTSP server IP address + * @param port RTSP server port (typically 554) + * @param path Optional path appended to the RTSP URL (e.g. "stream1"). + * If provided, the base URL becomes + * rtsp://:// + * @return true on success + */ + bool begin(IPAddress addr, uint16_t port, const char* path = nullptr) { + resetState(); + m_addr = addr; + m_port = port; + + if (m_tcp.connected()) m_tcp.stop(); + LOGI("RTSPClient: connecting to %u.%u.%u.%u:%u", m_addr[0], m_addr[1], + m_addr[2], m_addr[3], (unsigned)m_port); + // m_tcp.setTimeout(m_headerTimeoutMs / 1000); + bool connected = false; + for (uint8_t attempt = 0; attempt <= m_connectRetries; ++attempt) { + if (m_tcp.connect(m_addr, m_port)) { + connected = true; + break; + } + LOGW("RTSPClient: connect attempt %u failed", (unsigned)(attempt + 1)); + if (attempt < m_connectRetries) delay(m_connectRetryDelayMs); + } + if (!connected) { + LOGE("RTSPClient: TCP connect failed"); + return false; + } + m_tcp.setNoDelay(true); + + // Build base URL and track URL + buildUrls(path); + + // CSeq starts at 1 + m_cseq = 1; + + // OPTIONS + LOGI("OPTIONS"); + if (!sendSimpleRequest("OPTIONS", m_baseUrl, nullptr, 0, m_hdrBuf, + sizeof(m_hdrBuf), nullptr, 0)) { + // Some servers expect "OPTIONS *" instead of URL + if (!sendSimpleRequest("OPTIONS", "*", nullptr, 0, m_hdrBuf, + sizeof(m_hdrBuf), nullptr, 0)) { + return fail("OPTIONS failed"); + } + } + + // DESCRIBE + LOGI("DESCRIBE"); + const char* describeExtra = "Accept: application/sdp\r\n"; + if (!sendSimpleRequest("DESCRIBE", m_baseUrl, describeExtra, + strlen(describeExtra), m_hdrBuf, sizeof(m_hdrBuf), + m_bodyBuf, sizeof(m_bodyBuf))) + return fail("DESCRIBE failed"); + + // Parse SDP (rtpmap) to capture payload and encoding + parseSdp(m_bodyBuf); + // Parse Content-Base for absolute/relative control resolution + parseContentBaseFromHeaders(m_hdrBuf); + // Parse a=control and build the correct track URL for SETUP + parseControlFromSdp(m_bodyBuf); + buildTrackUrlFromBaseAndControl(); + LOGI("RTSPClient: SETUP url: %s", m_trackUrl); + + // Prepare UDP (client_port) + if (!openUdpPorts()) return fail("UDP bind failed"); + + // SETUP with client_port pair + char transportHdr[128]; + snprintf(transportHdr, sizeof(transportHdr), + "Transport: RTP/AVP;unicast;client_port=%u-%u\r\n", + (unsigned)m_clientRtpPort, (unsigned)(m_clientRtpPort + 1)); + if (!sendSimpleRequest("SETUP", m_trackUrl, transportHdr, + strlen(transportHdr), m_hdrBuf, sizeof(m_hdrBuf), + nullptr, 0)) { + // Fallback: some servers require explicit UDP in transport profile + snprintf(transportHdr, sizeof(transportHdr), + "Transport: RTP/AVP/UDP;unicast;client_port=%u-%u\r\n", + (unsigned)m_clientRtpPort, (unsigned)(m_clientRtpPort + 1)); + if (!sendSimpleRequest("SETUP", m_trackUrl, transportHdr, + strlen(transportHdr), m_hdrBuf, sizeof(m_hdrBuf), + nullptr, 0)) { + return fail("SETUP failed"); + } + } + + // Parse Session and server_port from last headers + parseSessionFromHeaders(m_hdrBuf); + parseServerPortsFromHeaders(m_hdrBuf); + if (m_sessionId[0] == '\0') return fail("Missing Session ID"); + + // Prime UDP path to server RTP port (helps some networks/servers) + primeUdpPath(); + + // PLAY + char sessionHdr[128]; + snprintf(sessionHdr, sizeof(sessionHdr), "Session: %s\r\n", m_sessionId); + if (!sendSimpleRequest("PLAY", m_baseUrl, sessionHdr, strlen(sessionHdr), + m_hdrBuf, sizeof(m_hdrBuf), nullptr, 0)) { + // Some servers start streaming RTP immediately but delay/omit PLAY + // response Treat PLAY as successful if RTP arrives shortly after sending + // PLAY + if (sniffUdpFor(1500)) { + LOGW("RTSPClient: proceeding without PLAY response (RTP detected)"); + } else { + return fail("PLAY failed"); + } + } + + m_started = true; + m_isPlaying = true; + m_lastKeepaliveMs = millis(); + return true; + } + + /// returns true when streaming is active and a decoder is configured and we + /// have data + operator bool() { return m_started && mime() != nullptr && available() > 0; } + + /** + * @brief Stop streaming and close RTSP/UDP sockets. + */ + void end() { + if (m_started) { + // best-effort TEARDOWN + if (m_tcp.connected()) { + char sessionHdr[128]; + if (m_sessionId[0]) { + snprintf(sessionHdr, sizeof(sessionHdr), "Session: %s\r\n", + m_sessionId); + sendSimpleRequest("TEARDOWN", m_baseUrl, sessionHdr, + strlen(sessionHdr), m_hdrBuf, sizeof(m_hdrBuf), + nullptr, 0, /*quiet*/ true); + } + } + } + + if (m_udp) { + m_udp->stop(); + delete m_udp; + m_udp = nullptr; + } + if (m_tcp.connected()) m_tcp.stop(); + m_started = false; + m_isPlaying = false; + } + + /** + * @brief Returns buffered RTP payload bytes available for copy(). + */ + int available() { + if (!m_started) { + delay(m_idleDelayMs); + return 0; + } + // keepalive regardless of play state + maybeKeepalive(); + if (!m_isPlaying) { + delay(m_idleDelayMs); + return 0; + } + serviceUdp(); + int avail = (int)(m_pktSize > m_pktPos ? (m_pktSize - m_pktPos) : 0); + if (avail == 0) delay(m_idleDelayMs); + return avail; + } + + /** + * @brief Best-effort MIME derived from SDP (e.g. audio/L16, audio/aac). + * @return MIME string or nullptr if unknown. + */ + const char* mime() const { + // Prefer static RTP payload type mapping when available + switch (m_payloadType) { + case 0: // PCMU + return "audio/PCMU"; + case 3: // GSM + return "audio/gsm"; + case 4: // G723 + return "audio/g723"; + case 5: // DVI4/8000 (IMA ADPCM) + case 6: // DVI4/16000 + case 16: // DVI4/11025 + case 17: // DVI4/22050 + return "audio/adpcm"; + case 8: // PCMA + return "audio/PCMA"; + case 9: // G722 + return "audio/g722"; + case 10: // L16 stereo + case 11: // L16 mono + return "audio/L16"; + case 14: // MPA (MPEG audio / MP3) + return "audio/mpeg"; + default: + break; // dynamic or unknown; fall back to SDP encoding string + } + // Fallback: infer from SDP encoding token + if (strcasecmp(m_encoding, "L16") == 0) return "audio/L16"; + if (strcasecmp(m_encoding, "L8") == 0) return "audio/L8"; + if (strcasecmp(m_encoding, "PCMU") == 0) return "audio/PCMU"; + if (strcasecmp(m_encoding, "PCMA") == 0) return "audio/PCMA"; + if (strcasecmp(m_encoding, "GSM") == 0) return "audio/gsm"; + if (strcasecmp(m_encoding, "MPA") == 0) return "audio/mpeg"; // MP3 + if (strcasecmp(m_encoding, "MPEG4-GENERIC") == 0) return "audio/aac"; + if (strcasecmp(m_encoding, "OPUS") == 0) return "audio/opus"; + if (strcasecmp(m_encoding, "DVI4") == 0) return "audio/adpcm"; // IMA ADPCM + return nullptr; + } + + /** + * @brief RTP payload type from SDP (0xFF if unknown). + */ + uint8_t payloadType() const { return m_payloadType; } + + /** + * @brief Pause or resume playback via RTSP PAUSE/PLAY. + * @param active true to PLAY, false to PAUSE + * @return true if command succeeded + */ + bool setActive(bool active) { + if (!m_started || !m_tcp.connected() || m_sessionId[0] == '\0') + return false; + if (active == m_isPlaying) return true; // no-op + + char sessionHdr[128]; + snprintf(sessionHdr, sizeof(sessionHdr), "Session: %s\r\n", m_sessionId); + bool ok; + if (active) { + ok = sendSimpleRequest("PLAY", m_baseUrl, sessionHdr, strlen(sessionHdr), + m_hdrBuf, sizeof(m_hdrBuf), nullptr, 0); + if (ok) m_isPlaying = true; + } else { + ok = sendSimpleRequest("PAUSE", m_baseUrl, sessionHdr, strlen(sessionHdr), + m_hdrBuf, sizeof(m_hdrBuf), nullptr, 0); + if (ok) { + m_isPlaying = false; + // drop any buffered payload + m_pktPos = m_pktSize = 0; + } + } + return ok; + } + + /** + * @brief Register a decoder to be auto-selected for the given MIME. + * @param mimeType MIME to match + * @param decoder AudioDecoder instance handling that MIME + */ + void addDecoder(const char* mimeType, AudioDecoder& decoder) { + m_multi_decoder.addDecoder(decoder, mimeType); + } + + /** + * @brief Copy the next buffered RTP payload into the decoder pipeline. + * Performs initial decoder selection based on SDP MIME. + * @return Bytes written to decoder, or 0 if none available. + */ + size_t copy() { + if (!m_started) { + delay(m_idleDelayMs); + LOGW("not started"); + return 0; + } + maybeKeepalive(); + if (!m_isPlaying) { + delay(m_idleDelayMs); + LOGW("not playing"); + return 0; + } + serviceUdp(); + if (m_pktPos >= m_pktSize) { + LOGW("no data"); + delay(m_idleDelayMs); + return 0; + } + + // On first data, make sure decoder selection and audio info are applied + if (!m_decoderReady) { + const char* m = mime(); + if (m) { + LOGI("Selecting decoder: %s", m); + // Ensure network format decoder has correct PCM info + m_multi_decoder.selectDecoder(m); + m_multi_decoder.setAudioInfo(m_info); + if (m_multi_decoder.getOutput() != nullptr) { + m_multi_decoder.begin(); // start decoder only when output is defined + } + m_decoderReady = true; + } + } + + size_t n = m_pktSize - m_pktPos; + size_t written = m_multi_decoder.write(m_pktBuf.data() + m_pktPos, n); + m_pktPos = m_pktSize = 0; + LOGI("copy: %d -> %d", (int)n, (int)written); + return written; + } + + /** + * @brief Audio info parsed from SDP for raw PCM encodings. + * @return AudioInfo or default-constructed if not PCM. + */ + AudioInfo audioInfo() override { return m_multi_decoder.audioInfo(); } + + void setAudioInfo(AudioInfo info) override { + m_multi_decoder.setAudioInfo(info); + } + + // AudioInfoSource forwarding: delegate notifications to MultiDecoder + void addNotifyAudioChange(AudioInfoSupport& bi) override { + m_multi_decoder.addNotifyAudioChange(bi); + } + bool removeNotifyAudioChange(AudioInfoSupport& bi) override { + return m_multi_decoder.removeNotifyAudioChange(bi); + } + void clearNotifyAudioChange() override { + m_multi_decoder.clearNotifyAudioChange(); + } + void setNotifyActive(bool flag) { m_multi_decoder.setNotifyActive(flag); } + bool isNotifyActive() { return m_multi_decoder.isNotifyActive(); } + + protected: + // Connection + TcpClient m_tcp; + UdpSocket* m_udp = nullptr; + IPAddress m_addr{}; + uint16_t m_port = 0; + + // RTSP state + uint32_t m_cseq = 1; + char m_baseUrl[96] = {0}; + char m_trackUrl[128] = {0}; + char m_contentBase[160] = {0}; + char m_sdpControl[128] = {0}; + char m_sessionId[64] = {0}; + uint16_t m_clientRtpPort = 0; // even + uint16_t m_serverRtpPort = 0; // optional from Transport response + bool m_started = false; + bool m_isPlaying = false; + uint32_t m_lastKeepaliveMs = 0; + const uint32_t m_keepaliveIntervalMs = 25000; // 25s + + // Buffers + audio_tools::Vector m_pktBuf{0}; + size_t m_pktPos = 0; + size_t m_pktSize = 0; + char m_hdrBuf[1024]; + char m_bodyBuf[1024]; + + // Decoder pipeline + MultiDecoder m_multi_decoder; + DecoderNetworkFormat m_decoder_net; + DecoderL8 m_decoder_l8; + bool m_decoderReady = false; + uint32_t m_idleDelayMs = 10; + uint8_t m_payloadOffset = 0; // extra bytes after RTP header/CSRCs + uint8_t m_connectRetries = 2; + uint32_t m_connectRetryDelayMs = 500; + uint32_t m_headerTimeoutMs = 4000; // header read timeout + + // Resampling pipeline + ResampleStream m_resampler; + float m_resampleStep = 1.0f; + // Sinks are set directly on the resampler + + // --- RTP/SDP fields --- + uint8_t m_payloadType = 0xFF; // unknown by default + char m_encoding[32] = {0}; + AudioInfo m_info{0, 0, 0}; + + void resetState() { + m_sessionId[0] = '\0'; + m_serverRtpPort = 0; + m_clientRtpPort = 0; + m_cseq = 1; + m_pktBuf.resize(2048); + m_pktPos = m_pktSize = 0; + m_decoderReady = false; + // keep default decoders registered once per instance + } + + void buildUrls(const char* path) { + snprintf(m_baseUrl, sizeof(m_baseUrl), "rtsp://%u.%u.%u.%u:%u/", m_addr[0], + m_addr[1], m_addr[2], m_addr[3], (unsigned)m_port); + if (path && *path) { + const char* p = path; + if (*p == '/') ++p; // skip leading slash + size_t used = strlen(m_baseUrl); + size_t avail = sizeof(m_baseUrl) - used - 1; + if (avail > 0) strncat(m_baseUrl, p, avail); + // ensure trailing '/' + used = strlen(m_baseUrl); + if (used > 0 && m_baseUrl[used - 1] != '/') { + if (used + 1 < sizeof(m_baseUrl)) { + m_baseUrl[used] = '/'; + m_baseUrl[used + 1] = '\0'; + } + } + } + snprintf(m_trackUrl, sizeof(m_trackUrl), "%strackID=0", m_baseUrl); + } + + // Always routed: MultiDecoder -> Resampler -> User sink + + bool openUdpPorts() { + // Try a few even RTP ports starting at 5004 + for (uint16_t p = 5004; p < 65000; p += 2) { + UdpSocket* s = new UdpSocket(); + if (s->begin(p)) { + LOGI("RTSPClient: bound UDP RTP port %u", (unsigned)p); + m_udp = s; + m_clientRtpPort = p; + return true; + } + delete s; + } + return false; + } + + bool fail(const char* msg) { + LOGE("RTSPClient: %s", msg); + end(); + return false; + } + + void maybeKeepalive() { + if (!m_started || !m_tcp.connected()) return; + uint32_t now = millis(); + if (now - m_lastKeepaliveMs < m_keepaliveIntervalMs) return; + m_lastKeepaliveMs = now; + char sessionHdr[128]; + if (m_sessionId[0]) { + snprintf(sessionHdr, sizeof(sessionHdr), "Session: %s\r\n", m_sessionId); + sendSimpleRequest("OPTIONS", m_baseUrl, sessionHdr, strlen(sessionHdr), + m_hdrBuf, sizeof(m_hdrBuf), nullptr, 0, /*quiet*/ true); + } else { + sendSimpleRequest("OPTIONS", m_baseUrl, nullptr, 0, m_hdrBuf, + sizeof(m_hdrBuf), nullptr, 0, /*quiet*/ true); + } + } + + void serviceUdp() { + // Keep RTSP session alive + maybeKeepalive(); + + if (!m_udp) return; + if (m_pktPos < m_pktSize) return; // still have data buffered + + int packetSize = m_udp->parsePacket(); + if (packetSize <= 0) return; + + if ((size_t)packetSize > m_pktBuf.size()) m_pktBuf.resize(packetSize); + int n = m_udp->read(m_pktBuf.data(), packetSize); + if (n <= 12) return; // too small to contain RTP + + // Very basic RTP parsing: skip 12-byte header + size_t payloadOffset = 12; + uint8_t cc = m_pktBuf[0] & 0x0F; + uint8_t payloadType = m_pktBuf[1] & 0x7F; + payloadOffset += cc * 4; // skip CSRCs if present + // Apply any configured additional payload offset (e.g., RFC2250) + payloadOffset += m_payloadOffset; + if (payloadOffset >= (size_t)n) return; + + m_pktPos = 0; + m_pktSize = n - payloadOffset; + // move payload to beginning for contiguous read + memmove(m_pktBuf.data(), m_pktBuf.data() + payloadOffset, m_pktSize); + } + + void primeUdpPath() { + if (!m_udp) return; + if (m_serverRtpPort == 0) return; + // Send a tiny datagram to server RTP port to open NAT/flows + // Not required by RTSP, but improves interoperability + for (int i = 0; i < 2; ++i) { + m_udp->beginPacket(m_addr, m_serverRtpPort); + uint8_t b = 0x00; + m_udp->write(&b, 1); + m_udp->endPacket(); + delay(2); + } + } + + bool sniffUdpFor(uint32_t ms) { + if (!m_udp) return false; + uint32_t start = millis(); + while ((millis() - start) < ms) { + int packetSize = m_udp->parsePacket(); + if (packetSize > 0) { + // restore to be processed by normal path + return true; + } + delay(5); + } + return false; + } + + bool sendSimpleRequest(const char* method, const char* url, + const char* extraHeaders, size_t extraLen, + char* outHeaders, size_t outHeadersLen, char* outBody, + size_t outBodyLen, bool quiet = false) { + // Build request + char reqStart[256]; + int reqLen = snprintf( + reqStart, sizeof(reqStart), + "%s %s RTSP/1.0\r\nCSeq: %u\r\nUser-Agent: ArduinoAudioTools\r\n", + method, url, (unsigned)m_cseq++); + if (reqLen <= 0) return false; + + // Send start line + mandatory headers + if (m_tcp.write((const uint8_t*)reqStart, reqLen) != (size_t)reqLen) + return false; + // Optional extra headers + if (extraHeaders && extraLen) { + if (m_tcp.write((const uint8_t*)extraHeaders, extraLen) != extraLen) + return false; + } + // End of headers + const char* end = "\r\n"; + if (m_tcp.write((const uint8_t*)end, 2) != 2) return false; + + m_tcp.flush(); + + // Read response headers until CRLFCRLF + int hdrUsed = 0; + memset(outHeaders, 0, outHeadersLen); + if (!readUntilDoubleCRLF(outHeaders, outHeadersLen, hdrUsed, + m_headerTimeoutMs)) { + if (!quiet) LOGE("RTSPClient: header read timeout"); + return false; + } + + // Optionally read body based on Content-Length + int contentLen = parseContentLength(outHeaders); + if (outBody && outBodyLen && contentLen > 0) { + int toRead = contentLen; + if (toRead >= (int)outBodyLen) toRead = (int)outBodyLen - 1; + int got = readExact((uint8_t*)outBody, toRead, 2000); + if (got < 0) return false; + outBody[got] = '\0'; + } + return true; + } + + bool readUntilDoubleCRLF(char* buf, size_t buflen, int& used, + uint32_t timeoutMs = 3000) { + uint32_t start = millis(); + used = 0; + int state = 0; // match \r\n\r\n + while ((millis() - start) < timeoutMs && used < (int)buflen - 1) { + int avail = m_tcp.available(); + if (avail <= 0) { + delay(5); + continue; + } + int n = m_tcp.read((uint8_t*)buf + used, 1); + if (n == 1) { + char c = buf[used++]; + switch (state) { + case 0: + state = (c == '\r') ? 1 : 0; + break; + case 1: + state = (c == '\n') ? 2 : 0; + break; + case 2: + state = (c == '\r') ? 3 : 0; + break; + case 3: + state = (c == '\n') ? 4 : 0; + break; + } + if (state == 4) { + buf[used] = '\0'; + return true; + } + } + } + buf[used] = '\0'; + return false; + } + + int readExact(uint8_t* out, int len, uint32_t timeoutMs) { + uint32_t start = millis(); + int got = 0; + while (got < len && (millis() - start) < timeoutMs) { + int a = m_tcp.available(); + if (a <= 0) { + delay(5); + continue; + } + int n = m_tcp.read(out + got, len - got); + if (n > 0) got += n; + } + return (got == len) ? got : got; // partial OK for DESCRIBE + } + + static int parseContentLength(const char* headers) { + const char* p = strcasestr(headers, "Content-Length:"); + if (!p) return 0; + int len = 0; + if (sscanf(p, "Content-Length: %d", &len) == 1) return len; + return 0; + } + + void parseSessionFromHeaders(const char* headers) { + const char* p = strcasestr(headers, "Session:"); + if (!p) return; + p += 8; // skip "Session:" + while (*p == ' ' || *p == '\t') ++p; + size_t i = 0; + while (*p && *p != '\r' && *p != '\n' && *p != ';' && + i < sizeof(m_sessionId) - 1) { + m_sessionId[i++] = *p++; + } + m_sessionId[i] = '\0'; + } + + void parseServerPortsFromHeaders(const char* headers) { + const char* t = strcasestr(headers, "Transport:"); + if (!t) return; + const char* s = strcasestr(t, "server_port="); + if (!s) return; + s += strlen("server_port="); + int a = 0, b = 0; + if (sscanf(s, "%d-%d", &a, &b) == 2) { + m_serverRtpPort = (uint16_t)a; + } + } + + // --- SDP parsing (rtpmap) --- + void parseSdp(const char* sdp) { + if (!sdp) return; + const char* p = sdp; + while ((p = strcasestr(p, "a=rtpmap:")) != nullptr) { + p += 9; // after a=rtpmap: + int pt = 0; + if (sscanf(p, "%d", &pt) != 1) continue; + const char* space = strchr(p, ' '); + if (!space) continue; + ++space; + // encoding up to '/' or endline + size_t i = 0; + while (space[i] && space[i] != '/' && space[i] != '\r' && + space[i] != '\n' && i < sizeof(m_encoding) - 1) { + m_encoding[i] = space[i]; + ++i; + } + m_encoding[i] = '\0'; + int rate = 0, ch = 0; + const char* afterEnc = space + i; + if (*afterEnc == '/') { + ++afterEnc; + if (sscanf(afterEnc, "%d/%d", &rate, &ch) < 1) { + rate = 0; + ch = 0; + } + } + m_payloadType = (uint8_t)pt; + // Fill AudioInfo only for raw PCM encodings + if (strcasecmp(m_encoding, "L16") == 0) { + m_info = AudioInfo(rate, (ch > 0 ? ch : (ch == 0 ? 1 : ch)), 16); + } else if (strcasecmp(m_encoding, "L8") == 0) { + m_info = AudioInfo(rate, (ch > 0 ? ch : (ch == 0 ? 1 : ch)), 8); + } else { + m_info = AudioInfo(); + } + m_multi_decoder.setAudioInfo(m_info); + + return; // first match + } + } + + // --- Content-Base header parsing --- + void parseContentBaseFromHeaders(const char* headers) { + m_contentBase[0] = '\0'; + if (!headers) return; + const char* p = strcasestr(headers, "Content-Base:"); + if (!p) return; + p += strlen("Content-Base:"); + while (*p == ' ' || *p == '\t') ++p; + size_t i = 0; + while (*p && *p != '\r' && *p != '\n' && i < sizeof(m_contentBase) - 1) { + m_contentBase[i++] = *p++; + } + m_contentBase[i] = '\0'; + // Ensure trailing '/' + if (i > 0 && m_contentBase[i - 1] != '/') { + if (i + 1 < sizeof(m_contentBase)) { + m_contentBase[i++] = '/'; + m_contentBase[i] = '\0'; + } + } + } + + // --- SDP control parsing --- + void parseControlFromSdp(const char* sdp) { + m_sdpControl[0] = '\0'; + if (!sdp) return; + const char* audio = strcasestr(sdp, "\nm=audio "); + const char* searchStart = sdp; + const char* searchEnd = nullptr; + if (audio) { + // find end of this media block (next m= or end) + searchStart = audio; + const char* nextm = strcasestr(audio + 1, "\nm="); + searchEnd = nextm ? nextm : (sdp + strlen(sdp)); + } else { + // fall back to session-level + searchStart = sdp; + searchEnd = sdp + strlen(sdp); + } + const char* p = searchStart; + while (p && p < searchEnd) { + const char* ctrl = strcasestr(p, "a=control:"); + if (!ctrl || ctrl >= searchEnd) break; + ctrl += strlen("a=control:"); + // copy value until CR/LF + size_t i = 0; + while (ctrl[i] && ctrl[i] != '\r' && ctrl[i] != '\n' && + i < sizeof(m_sdpControl) - 1) { + m_sdpControl[i] = ctrl[i]; + ++i; + } + m_sdpControl[i] = '\0'; + break; + } + } + + bool isAbsoluteRtspUrl(const char* url) { + if (!url) return false; + return (strncasecmp(url, "rtsp://", 7) == 0) || + (strncasecmp(url, "rtsps://", 8) == 0); + } + + void buildTrackUrlFromBaseAndControl() { + // default fallback if no control provided + if (m_sdpControl[0] == '\0') { + snprintf(m_trackUrl, sizeof(m_trackUrl), "%strackID=0", m_baseUrl); + return; + } + if (isAbsoluteRtspUrl(m_sdpControl)) { + strncpy(m_trackUrl, m_sdpControl, sizeof(m_trackUrl) - 1); + m_trackUrl[sizeof(m_trackUrl) - 1] = '\0'; + return; + } + const char* base = (m_contentBase[0] ? m_contentBase : m_baseUrl); + size_t blen = strlen(base); + // Construct base ensuring single '/' + char tmp[256]; + size_t pos = 0; + for (; pos < sizeof(tmp) - 1 && pos < blen; ++pos) tmp[pos] = base[pos]; + if (pos > 0 && tmp[pos - 1] != '/' && pos < sizeof(tmp) - 1) + tmp[pos++] = '/'; + // If control starts with '/', skip one to avoid '//' + const char* ctrl = m_sdpControl; + if (*ctrl == '/') ++ctrl; + while (*ctrl && pos < sizeof(tmp) - 1) tmp[pos++] = *ctrl++; + tmp[pos] = '\0'; + strncpy(m_trackUrl, tmp, sizeof(m_trackUrl) - 1); + m_trackUrl[sizeof(m_trackUrl) - 1] = '\0'; + } + + // resampler is started in constructor; audio info will be set dynamically +}; + +} // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/Communication/RTSP/RTSPClientEthernet.h b/src/AudioTools/Communication/RTSP/RTSPClientEthernet.h new file mode 100644 index 0000000000..ed50ea3e19 --- /dev/null +++ b/src/AudioTools/Communication/RTSP/RTSPClientEthernet.h @@ -0,0 +1,29 @@ +#pragma once +#include "Ethernet.h" +#include "EthernetUdp.h" +#include "RTSPClient.h" + +namespace audio_tools { + +/** + * @brief Ethernet RTSP client alias using Arduino Ethernet networking. + * + * Convenience alias for `RTSPClient`, which uses + * `EthernetClient` for RTSP TCP control and `EthernetUDP` for RTP. + * + * Example: + * @code{.cpp} + * I2SStream i2s; // your audio sink + * RTSPClientEthernet client{i2s}; // decode to i2s + * IPAddress cam(192,168,1,20); + * client.begin(cam, 554, "stream1"); // optional path + * while (true) { + * client.copy(); // push next RTP payload to decoder + * } + * @endcode + * @ingroup rtsp + * @author Phil Schatzmann + */ +using RTSPClientEthernet = RTSPClient; + +} // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/Communication/RTSP/RTSPClientWiFi.h b/src/AudioTools/Communication/RTSP/RTSPClientWiFi.h new file mode 100644 index 0000000000..d2cce0238b --- /dev/null +++ b/src/AudioTools/Communication/RTSP/RTSPClientWiFi.h @@ -0,0 +1,28 @@ +#pragma once +#include "RTSPClient.h" +#include "WiFi.h" + +namespace audio_tools { + +/** + * @brief WiFi RTSP client alias using Arduino WiFi networking. + * + * Convenience alias for `RTSPClient`, which uses + * `WiFiClient` for RTSP TCP control and `WiFiUDP` for RTP. + * + * Example: + * @code{.cpp} + * I2SStream i2s; // your audio sink + * RTSPClientWiFi client{i2s}; // decode to i2s + * IPAddress cam(192,168,1,20); + * client.begin(cam, 554, "stream1"); // optional path + * while (true) { + * client.copy(); // push next RTP payload to decoder + * } + * @endcode + * @ingroup rtsp + * @author Phil Schatzmann + */ +using RTSPClientWiFi = RTSPClient; + +} // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/Communication/RTSP/RTSPFormat.h b/src/AudioTools/Communication/RTSP/RTSPFormat.h index 76c73254d9..1e21799a0e 100644 --- a/src/AudioTools/Communication/RTSP/RTSPFormat.h +++ b/src/AudioTools/Communication/RTSP/RTSPFormat.h @@ -35,7 +35,6 @@ namespace audio_tools { * @note Implementations must provide format() method for SDP generation * @ingroup rtsp * @author Phil Schatzmann - * @version 0.1.1 */ class RTSPFormat { diff --git a/src/AudioTools/Communication/RTSP/RTSPOutput.h b/src/AudioTools/Communication/RTSP/RTSPOutput.h index 8eb5121bd3..f500882cd7 100644 --- a/src/AudioTools/Communication/RTSP/RTSPOutput.h +++ b/src/AudioTools/Communication/RTSP/RTSPOutput.h @@ -6,14 +6,6 @@ #include "RTSPAudioStreamer.h" #include "RTSPFormat.h" -/** - * @defgroup rtsp RTSP Streaming - * @ingroup communications - * @file RTSPOutput.h - * @author Phil Schatzmann - * @copyright GPLv3 - */ - namespace audio_tools { /** diff --git a/src/AudioTools/Communication/RTSP/RTSPPlatform.h b/src/AudioTools/Communication/RTSP/RTSPPlatform.h index 757ae5912c..3834ae2cf6 100644 --- a/src/AudioTools/Communication/RTSP/RTSPPlatform.h +++ b/src/AudioTools/Communication/RTSP/RTSPPlatform.h @@ -15,7 +15,9 @@ namespace audio_tools { * This template class provides a unified interface for network operations * across different platforms while maintaining type safety. The template * parameters allow customization of the underlying network implementation. - * + * @ingroup rtsp + * @author Phil Schatzmann + * * @tparam TcpClient TCP client implementation (e.g., WiFiClient, * EthernetClient) * @tparam UdpSocket UDP socket implementation (e.g., WiFiUDP, EthernetUDP) diff --git a/src/AudioTools/Communication/RTSP/RTSPServer.h b/src/AudioTools/Communication/RTSP/RTSPServer.h index 46711f8576..b77372e39b 100644 --- a/src/AudioTools/Communication/RTSP/RTSPServer.h +++ b/src/AudioTools/Communication/RTSP/RTSPServer.h @@ -30,6 +30,9 @@ namespace audio_tools { * - Coordinates with RTSPAudioStreamer for RTP audio delivery * - Runs asynchronously using AudioTools Task system * + * @ingroup rtsp + * @author Phil Schatzmann + * * @section protocol RTSP Protocol Support * - DESCRIBE: Returns SDP session description with audio format * - SETUP: Establishes RTP transport parameters @@ -39,9 +42,6 @@ namespace audio_tools { * - OPTIONS: Returns supported RTSP methods * * @note Supports multiple platforms through AudioTools Task and Timer systems - * @ingroup rtsp - * @author Thomas Pfitzinger - * @version 0.2.0 */ template class RTSPServer { @@ -81,6 +81,21 @@ class RTSPServer { */ ~RTSPServer() { stop(); } + /** + * @brief Set a callback to receive the RTSP URL path for each new session. + * The callback is forwarded to every RtspSession and invoked once per session + * after the first request is parsed. + * + * Return semantics: + * - true: accept session and continue normal RTSP handling + * - false: reject session; the session will be marked closed and no + * responses will be sent for the pending request + */ + void setOnSessionPath(bool (*cb)(const char* path, void* ref), void* ref = nullptr) { + onSessionPathCb = cb; + onSessionPathRef = ref; + } + /** * @brief Initialize WiFi and start RTSP server * @@ -142,6 +157,8 @@ class RTSPServer { int client_count = 0; // number of connected clients streamer_t* streamer = nullptr; // RTSPAudioStreamer object that acts as a // source for data streams + bool (*onSessionPathCb)(const char*, void*) = nullptr; // session path callback + void* onSessionPathRef = nullptr; /** * @brief Start RTSP server asynchronously @@ -252,6 +269,9 @@ class RTSPServer { // our threads RTSP session and state RtspSession* rtsp = new RtspSession(*s, *streamer); + if (onSessionPathCb) { + rtsp->setOnSessionPath(onSessionPathCb, onSessionPathRef); + } assert(rtsp != nullptr); LOGI("Session ready"); diff --git a/src/AudioTools/Communication/RTSP/RTSPSession.h b/src/AudioTools/Communication/RTSP/RTSPSession.h index 6fcf71426a..e0ea766c1b 100644 --- a/src/AudioTools/Communication/RTSP/RTSPSession.h +++ b/src/AudioTools/Communication/RTSP/RTSPSession.h @@ -70,9 +70,8 @@ enum RTSP_CMD_TYPES { * @note This class is typically instantiated by RTSPServer, not directly by * users * @note Requires a configured RTSPAudioStreamer for media delivery - * @author Thomas Pfitzinger + * @author Phil Schatzmann * @ingroup rtsp - * @version 0.2.0 */ template class RtspSession { @@ -166,6 +165,10 @@ class RtspSession { (mRecvBuf[0] == 'S') || (mRecvBuf[0] == 'P') || (mRecvBuf[0] == 'T')) { RTSP_CMD_TYPES C = handleRtspRequest(mRecvBuf.data(), res); + if (!m_sessionOpen) { + // Session was aborted (e.g., rejected by callback); end quickly + return false; + } // TODO this should go in the handling functions if (C == RTSP_PLAY) { m_streaming = true; @@ -208,6 +211,22 @@ class RtspSession { bool isStreaming() { return m_streaming; } + /** + * @brief Set a callback to receive the RTSP URL path that opened the session. + * The callback is invoked once, after the first request is parsed, with the + * path portion of the RTSP URL (starting with '/'). A user reference is + * provided back on invocation. + * + * Return semantics: + * - true: accept session and continue normal RTSP handling + * - false: reject session; the session will be marked closed and no + * responses will be sent for the pending request + */ + void setOnSessionPath(bool (*cb)(const char* path, void* ref), void* ref = nullptr) { + m_onSessionPath = cb; + m_onSessionPathRef = ref; + } + protected: const char* STD_URL_PRE_SUFFIX = "trackID"; @@ -228,6 +247,7 @@ class RtspSession { audio_tools::Vector m_URLSuffix; // stream name suffix audio_tools::Vector m_CSeq; // RTSP command sequence number audio_tools::Vector m_URLHostPort; // host:port part of the URL + audio_tools::Vector m_URLPath; // full RTSP path (starting with '/') unsigned m_ContentLength; // SDP string size uint16_t m_RtpClientPort = 0; // RTP receiver port on client (in host byte order!) @@ -250,6 +270,9 @@ class RtspSession { bool m_is_init = false; bool m_streaming = false; volatile bool m_sessionOpen = true; + bool m_pathNotified = false; + bool (*m_onSessionPath)(const char* path, void* ref) = nullptr; + void* m_onSessionPathRef = nullptr; /** * Initializes memory and buffers @@ -285,6 +308,9 @@ class RtspSession { if (m_URLHostPort.size() == 0) { m_URLHostPort.resize(MAX_HOSTNAME_LEN); } + if (m_URLPath.size() == 0) { + m_URLPath.resize(RTSP_URL_BUFFER_SIZE); + } if (m_Response.size() == 0) { m_Response.resize(RTSP_RESPONSE_BUFFER_SIZE); } @@ -309,11 +335,13 @@ class RtspSession { memset(m_URLSuffix.data(), 0x00, m_URLSuffix.size()); memset(m_CSeq.data(), 0x00, m_CSeq.size()); memset(m_URLHostPort.data(), 0x00, m_URLHostPort.size()); + if (m_URLPath.size() > 0) memset(m_URLPath.data(), 0x00, m_URLPath.size()); m_ContentLength = 0; m_TransportIsTcp = false; m_InterleavedRtp = -1; m_InterleavedRtcp = -1; m_is_init = true; + m_pathNotified = false; } /** @@ -382,6 +410,10 @@ class RtspSession { return false; determineCommandType(); parseUrlHostPortAndSuffix(mCurRequest.data(), CurRequestSize, idxAfterCmd); + if (!m_sessionOpen) { + // Aborted by callback during URL parse; don't proceed further + return false; + } // 3) CSeq and Content-Length if (!parseCSeq(mCurRequest.data(), CurRequestSize, idxAfterCmd)) @@ -510,6 +542,26 @@ class RtspSession { } LOGD("m_URLHostPort: %s", m_URLHostPort.data()); + // Extract full RTSP path starting at current index i up to next space + if (i < reqSize && req[i] == '/') { + unsigned p = 0; + unsigned k = i; + while (k < reqSize && req[k] != ' ' && p < m_URLPath.size() - 1) { + m_URLPath[p++] = req[k++]; + } + m_URLPath[p] = '\0'; + LOGD("m_URLPath: %s", m_URLPath.data()); + if (!m_pathNotified && m_onSessionPath) { + bool ok = m_onSessionPath(m_URLPath.data(), m_onSessionPathRef); + m_pathNotified = true; + if (!ok) { + LOGW("Session rejected by onSessionPath callback"); + m_sessionOpen = false; + // Early exit: abort further parsing of this request + } + } + } + bool ok = false; for (unsigned k = i + 1; (int)k < (int)(reqSize - 5); ++k) { if (req[k] == 'R' && req[k + 1] == 'T' && req[k + 2] == 'S' && req[k + 3] == 'P' && req[k + 4] == '/') { From 6fc2e5ae4300f778fb6115641287c6a83958aeda Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 06:47:07 +0200 Subject: [PATCH 3/4] Move to Communication from AudioLibs --- .../hls/hls-buffer-i2s/hls-buffer-i2s.ino | 2 +- .../hls/hls-i2s/hls-i2s.ino | 2 +- .../streams-url_mts-hex.ino | 2 +- .../communication-rtsp-audiokit.ino | 88 +- .../communication-rtsp555-audiokit.ino | 28 + .../communication-rtsp555-i2s.ino} | 2 +- .../player-sdmmc-vban/player-sdmmc-vban.ino | 2 +- .../streams-audiokit-vban.ino | 2 +- .../streams-generator-vban.ino | 2 +- .../streams-vban-audiokit.ino | 2 +- src/AudioTools/AudioLibs/AudioClientRTSP.h | 722 +--------------- src/AudioTools/AudioLibs/HLSStream.h | 784 +----------------- src/AudioTools/AudioLibs/README.md | 2 +- src/AudioTools/AudioLibs/VBANStream.h | 595 +------------ src/AudioTools/Communication/HLSStream.h | 781 +++++++++++++++++ .../HLSStreamESP32.h | 0 src/AudioTools/Communication/README.md | 2 +- src/AudioTools/Communication/RTSP.h | 10 + .../Communication/RTSP/IAudioSource.h | 1 - src/AudioTools/Communication/RTSPClient555.h | 721 ++++++++++++++++ .../vban => Communication/VBAN}/vban.h | 0 src/AudioTools/Communication/VBANStream.h | 592 +++++++++++++ 22 files changed, 2215 insertions(+), 2127 deletions(-) create mode 100644 examples/examples-communication/rtsp/communication-rtsp555-audiokit/communication-rtsp555-audiokit.ino rename examples/examples-communication/rtsp/{communication-rtsp-i2s/communication-rtsp-i2s.ino => communication-rtsp555-i2s/communication-rtsp555-i2s.ino} (97%) create mode 100644 src/AudioTools/Communication/HLSStream.h rename src/AudioTools/{AudioLibs => Communication}/HLSStreamESP32.h (100%) create mode 100644 src/AudioTools/Communication/RTSPClient555.h rename src/AudioTools/{AudioLibs/vban => Communication/VBAN}/vban.h (100%) create mode 100644 src/AudioTools/Communication/VBANStream.h diff --git a/examples/examples-communication/hls/hls-buffer-i2s/hls-buffer-i2s.ino b/examples/examples-communication/hls/hls-buffer-i2s/hls-buffer-i2s.ino index 992cf35e60..62f61a7200 100644 --- a/examples/examples-communication/hls/hls-buffer-i2s/hls-buffer-i2s.ino +++ b/examples/examples-communication/hls/hls-buffer-i2s/hls-buffer-i2s.ino @@ -11,7 +11,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecHelix.h" #include "AudioTools/AudioCodecs/CodecMTS.h" -#include "AudioTools/AudioLibs/HLSStream.h" +#include "AudioTools/Communication/HLSStream.h" #include "AudioTools/Concurrency/RTOS.h" // #include "AudioTools/AudioLibs/AudioBoardStream.h" diff --git a/examples/examples-communication/hls/hls-i2s/hls-i2s.ino b/examples/examples-communication/hls/hls-i2s/hls-i2s.ino index ffdfd338dd..b9f195de45 100644 --- a/examples/examples-communication/hls/hls-i2s/hls-i2s.ino +++ b/examples/examples-communication/hls/hls-i2s/hls-i2s.ino @@ -9,7 +9,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/HLSStream.h" +#include "AudioTools/Communication/HLSStream.h" #include "AudioTools/AudioCodecs/CodecHelix.h" //#include "AudioTools/AudioLibs/AudioBoardStream.h" diff --git a/examples/examples-communication/http-client/streams-url_mts-hex/streams-url_mts-hex.ino b/examples/examples-communication/http-client/streams-url_mts-hex/streams-url_mts-hex.ino index f180512d93..f64ac47872 100644 --- a/examples/examples-communication/http-client/streams-url_mts-hex/streams-url_mts-hex.ino +++ b/examples/examples-communication/http-client/streams-url_mts-hex/streams-url_mts-hex.ino @@ -7,7 +7,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMTS.h" -#include "AudioTools/AudioLibs/HLSStream.h" +#include "AudioTools/Communication/HLSStream.h" HexDumpOutput out(Serial); HLSStream hls_stream("SSID", "password"); diff --git a/examples/examples-communication/rtsp/communication-rtsp-audiokit/communication-rtsp-audiokit.ino b/examples/examples-communication/rtsp/communication-rtsp-audiokit/communication-rtsp-audiokit.ino index 3243ecd693..c69f92a3d9 100644 --- a/examples/examples-communication/rtsp/communication-rtsp-audiokit/communication-rtsp-audiokit.ino +++ b/examples/examples-communication/rtsp/communication-rtsp-audiokit/communication-rtsp-audiokit.ino @@ -1,28 +1,72 @@ /** - * @file communication-rtsp-i2s.ino - * @author Phil Schatzmann - * @brief Demo for RTSP Client that is playing mp3. I tested with the live555 server with linux - * @version 0.1 - * @date 2022-05-02 - * - * @copyright Copyright (c) 2022 - * + * @file communication-rtsp-audiokit.ino + * @brief RTSP client demo using the new UDP/RTP client and AudioKit output. + * Connects to an RTSP server, decodes audio via MultiDecoder, and plays + * out via `AudioBoardStream` (AudioKit ES8388). Tested with RTSP + * servers. Requires WiFi on ESP32. + * + * Steps: + * - Update WiFi credentials and RTSP server address/path below + * - Builds a fixed pipeline: MultiDecoder -> ResampleStream -> AudioKit output + * - Call client.copy() in loop to push received RTP payloads into decoders */ -#include "AudioTools.h" // https://github.com/pschatzmann/arduino-audio-tools -#include "AudioTools/AudioCodecs/CodecMP3Helix.h" // https://github.com/pschatzmann/arduino-libhelix -#include "AudioTools/AudioLibs/AudioBoardStream.h" // https://github.com/pschatzmann/arduino-audio-driver -#include "AudioTools/AudioLibs/AudioClientRTSP.h" // install https://github.com/pschatzmann/arduino-live555 - -AudioBoardStream i2s(AudioKitEs8388V1); // final output of decoded stream -EncodedAudioStream out_mp3(&i2s, new MP3DecoderHelix()); // Decoding stream -AudioClientRTSP rtsp(1024); - -void setup(){ - rtsp.setLogin("ssid", "password"); - rtsp.begin("https://samples.mplayerhq.hu/A-codecs/MP3/01%20-%20Charity%20Case.mp3", out_mp3); +#include "AudioTools.h" +#include "AudioTools/AudioCodecs/CodecADPCM.h" +#include "AudioTools/AudioCodecs/CodecMP3Helix.h" // https://github.com/pschatzmann/arduino-libhelix +#include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/RTSP.h" // brings RTSPClientWiFi alias + +const char* SSID = "ssid"; +const char* PASS = "password"; +IPAddress srv(192, 168, 1, 39); // change to your RTSP server IP +const uint16_t rtspPort = 8554; // typical RTSP port +const char* rtspPath = + "stream"; // change to your RTSP server path (e.g., "audio", "stream1") +AudioBoardStream i2s(AudioKitEs8388V1); +RTSPClientWiFi client(i2s); +MP3DecoderHelix mp3; // Decoder for "audio/mpeg" (MP3) payloads +ADPCMDecoder adpcm(AV_CODEC_ID_ADPCM_IMA_WAV, 512); // ima adpcm decoder + +void startWiFi() { + WiFi.begin(SSID, PASS); + Serial.print("Connecting to WiFi"); + while (WiFi.status() != WL_CONNECTED) { + delay(500); + Serial.print("."); + } + Serial.println(); + Serial.print("WiFi connected, IP: "); + Serial.println(WiFi.localIP()); + WiFi.setSleep(false); +} + +void setup() { + Serial.begin(115200); + AudioToolsLogger.begin(Serial, AudioToolsLogLevel::Info); + + // Connect WiFi + startWiFi(); + + // Configure and start I2S/AudioKit output + auto cfg = i2s.defaultConfig(TX_MODE); + cfg.sd_active = false; + i2s.begin(cfg); + + // Start RTSP session + client.addDecoder("audio/mpeg", mp3); + client.addDecoder("audio/adpcm", adpcm); + client.setResampleFactor(1.0); // no resampling + // Servers often require a concrete path; also extend header timeout if needed + client.setHeaderTimeoutMs(8000); + if (!client.begin(srv, rtspPort, rtspPath)) { + Serial.println("Failed to start RTSP client"); + stop(); + } + Serial.println("RTSP client started"); } void loop() { - rtsp.loop(); -} \ No newline at end of file + // Push next available RTP payload to decoder chain + client.copy(); +} diff --git a/examples/examples-communication/rtsp/communication-rtsp555-audiokit/communication-rtsp555-audiokit.ino b/examples/examples-communication/rtsp/communication-rtsp555-audiokit/communication-rtsp555-audiokit.ino new file mode 100644 index 0000000000..191cd590f0 --- /dev/null +++ b/examples/examples-communication/rtsp/communication-rtsp555-audiokit/communication-rtsp555-audiokit.ino @@ -0,0 +1,28 @@ + +/** + * @file communication-rtsp555-i2s.ino + * @author Phil Schatzmann + * @brief Demo for RTSP Client that is playing mp3. I tested with the live555 server with linux + * @version 0.1 + * @date 2022-05-02 + * + * @copyright Copyright (c) 2022 + * + */ +#include "AudioTools.h" // https://github.com/pschatzmann/arduino-audio-tools +#include "AudioTools/AudioCodecs/CodecMP3Helix.h" // https://github.com/pschatzmann/arduino-libhelix +#include "AudioTools/AudioLibs/AudioBoardStream.h" // https://github.com/pschatzmann/arduino-audio-driver +#include "AudioTools/Communication/RTSPClient555.h" // install https://github.com/pschatzmann/arduino-live555 + +AudioBoardStream i2s(AudioKitEs8388V1); // final output of decoded stream +EncodedAudioStream out_mp3(&i2s, new MP3DecoderHelix()); // Decoding stream +AudioClientRTSP rtsp(1024); + +void setup(){ + rtsp.setLogin("ssid", "password"); + rtsp.begin("https://samples.mplayerhq.hu/A-codecs/MP3/01%20-%20Charity%20Case.mp3", out_mp3); +} + +void loop() { + rtsp.loop(); +} \ No newline at end of file diff --git a/examples/examples-communication/rtsp/communication-rtsp-i2s/communication-rtsp-i2s.ino b/examples/examples-communication/rtsp/communication-rtsp555-i2s/communication-rtsp555-i2s.ino similarity index 97% rename from examples/examples-communication/rtsp/communication-rtsp-i2s/communication-rtsp-i2s.ino rename to examples/examples-communication/rtsp/communication-rtsp555-i2s/communication-rtsp555-i2s.ino index 00bb1f1605..b141386e49 100644 --- a/examples/examples-communication/rtsp/communication-rtsp-i2s/communication-rtsp-i2s.ino +++ b/examples/examples-communication/rtsp/communication-rtsp555-i2s/communication-rtsp555-i2s.ino @@ -1,5 +1,5 @@ /** - * @file communication-rtsp-i2s.ino + * @file communication-rtsp666-i2s.ino * @author Phil Schatzmann * @brief Demo for RTSP Client that is playing mp3: tested with the live555 server with linux * @version 0.1 diff --git a/examples/examples-communication/vban/player-sdmmc-vban/player-sdmmc-vban.ino b/examples/examples-communication/vban/player-sdmmc-vban/player-sdmmc-vban.ino index 3dd3f51443..67f917a10c 100644 --- a/examples/examples-communication/vban/player-sdmmc-vban/player-sdmmc-vban.ino +++ b/examples/examples-communication/vban/player-sdmmc-vban/player-sdmmc-vban.ino @@ -7,7 +7,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" -#include "AudioTools/AudioLibs/VBANStream.h" +#include "AudioTools/Communication/VBANStream.h" #include "AudioTools/Disk/AudioSourceSDMMC.h" // or AudioSourceIdxSDMMC.h const char *startFilePath="/"; diff --git a/examples/examples-communication/vban/streams-audiokit-vban/streams-audiokit-vban.ino b/examples/examples-communication/vban/streams-audiokit-vban/streams-audiokit-vban.ino index c356859ded..15e20e632c 100644 --- a/examples/examples-communication/vban/streams-audiokit-vban/streams-audiokit-vban.ino +++ b/examples/examples-communication/vban/streams-audiokit-vban/streams-audiokit-vban.ino @@ -5,7 +5,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/VBANStream.h" +#include "AudioTools/Communication/VBANStream.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" // comment out when not using AudioKit AudioInfo info(44100, 2, 16); diff --git a/examples/examples-communication/vban/streams-generator-vban/streams-generator-vban.ino b/examples/examples-communication/vban/streams-generator-vban/streams-generator-vban.ino index 2a149c8342..5c9298f118 100644 --- a/examples/examples-communication/vban/streams-generator-vban/streams-generator-vban.ino +++ b/examples/examples-communication/vban/streams-generator-vban/streams-generator-vban.ino @@ -5,7 +5,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/VBANStream.h" +#include "AudioTools/Communication/VBANStream.h" AudioInfo info(44100, 2, 16); SineWaveGenerator sineWave(32000); // subclass of SoundGenerator with max amplitude of 32000 diff --git a/examples/examples-communication/vban/streams-vban-audiokit/streams-vban-audiokit.ino b/examples/examples-communication/vban/streams-vban-audiokit/streams-vban-audiokit.ino index ae15b4d19b..05861886b4 100644 --- a/examples/examples-communication/vban/streams-vban-audiokit/streams-vban-audiokit.ino +++ b/examples/examples-communication/vban/streams-vban-audiokit/streams-vban-audiokit.ino @@ -5,7 +5,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/VBANStream.h" +#include "AudioTools/Communication/VBANStream.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" // comment out when not using AudioKit AudioBoardStream out(AudioKitEs8388V1); // Audio source e.g. replace with I2SStream diff --git a/src/AudioTools/AudioLibs/AudioClientRTSP.h b/src/AudioTools/AudioLibs/AudioClientRTSP.h index 27517807a1..afb9193d07 100644 --- a/src/AudioTools/AudioLibs/AudioClientRTSP.h +++ b/src/AudioTools/AudioLibs/AudioClientRTSP.h @@ -1,721 +1,3 @@ - #pragma once - -/** -This library is free software; you can redistribute it and/or modify it under -the terms of the GNU Lesser General Public License as published by the -Free Software Foundation; either version 3 of the License, or (at your -option) any later version. (See .) - -This library is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for -more details. - -You should have received a copy of the GNU Lesser General Public License -along with this library; if not, write to the Free Software Foundation, Inc., -51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -**/ - -// Copyright (c) 1996-2023, Live Networks, Inc. All rights reserved -// A demo application, showing how to create and run a RTSP client (that can -// potentially receive multiple streams concurrently). -// - -#include "AudioLogger.h" -#include "Print.h" // Arduino Print -// include live555 -#include "BasicUsageEnvironment.hh" -//#include "liveMedia.hh" -#include "RTSPClient.hh" - -// By default, we request that the server stream its data using RTP/UDP. -// If, instead, you want to request that the server stream via RTP-over-TCP, -// change the following to True: -#define REQUEST_STREAMING_OVER_TCP false - -// by default, print verbose output from each "RTSPClient" -#define RTSP_CLIENT_VERBOSITY_LEVEL 1 -// Even though we're not going to be doing anything with the incoming data, we -// still need to receive it. Define the size of the buffer that we'll use: -#define RTSP_SINK_BUFFER_SIZE 1024 - -// If you don't want to see debugging output for each received frame, then -// comment out the following line: -#undef DEBUG_PRINT_EACH_RECEIVED_FRAME -#define DEBUG_PRINT_EACH_RECEIVED_FRAME 0 - -/// @brief AudioTools internal: rtsp -namespace audiotools_rtsp { - -class OurRTSPClient; -// The main streaming routine (or each "rtsp://" URL): -OurRTSPClient * openURL(UsageEnvironment& env, char const* progName, char const* rtspURL); -// Counts how many streams (i.e., "RTSPClient"s) are currently in use. -static unsigned rtspClientCount = 0; -static char rtspEventLoopWatchVariable = 0; -static Print* rtspOutput = nullptr; -static uint32_t rtspSinkReceiveBufferSize = 0; -static bool rtspUseTCP = REQUEST_STREAMING_OVER_TCP; - -} // namespace audiotools_rtsp - -namespace audio_tools { - -/** - * @brief A simple RTSPClient using https://github.com/pschatzmann/arduino-live555 - * @ingroup communications - * @author Phil Schatzmann - * @copyright GPLv3 -*/ -class AudioClientRTSP { - public: - AudioClientRTSP(uint32_t receiveBufferSize = RTSP_SINK_BUFFER_SIZE, bool useTCP=REQUEST_STREAMING_OVER_TCP, bool blocking = false) { - setBufferSize(receiveBufferSize); - useTCP ? setTCP() : setUDP(); - setBlocking(blocking); - } - - void setBufferSize(int size){ - audiotools_rtsp::rtspSinkReceiveBufferSize = size; - } - - void setTCP(){ - audiotools_rtsp::rtspUseTCP = true; - } - - void setUDP(){ - audiotools_rtsp::rtspUseTCP = false; - } - - void setBlocking(bool flag){ - is_blocking = flag; - } - - /// login to wifi: optional convinience method. You can also just start Wifi the normal way - void setLogin(const char* ssid, const char* password){ - this->ssid = ssid; - this->password = password; - } - - /// Starts the processing - bool begin(const char* url, Print &out) { - audiotools_rtsp::rtspOutput = &out; - if (url==nullptr) { - return false; - } - if (!login()){ - LOGE("wifi down"); - return false; - } - // Begin by setting up our usage environment: - scheduler = BasicTaskScheduler::createNew(); - env = BasicUsageEnvironment::createNew(*scheduler); - - // There are argc-1 URLs: argv[1] through argv[argc-1]. Open and start - // streaming each one: - rtsp_client = audiotools_rtsp::openURL(*env, "RTSPClient", url); - - // All subsequent activity takes place within the event loop: - if (is_blocking) env->taskScheduler().doEventLoop(&audiotools_rtsp::rtspEventLoopWatchVariable); - // This function call does not return, unless, at some point in time, - // "rtspEventLoopWatchVariable" gets set to something non-zero. - - return true; - } - - /// to be called in Arduino loop when blocking = false - void loop() { - if (audiotools_rtsp::rtspEventLoopWatchVariable==0) scheduler->SingleStep(); - } - - void end() { - audiotools_rtsp::rtspEventLoopWatchVariable = 1; - env->reclaim(); - env = NULL; - delete scheduler; - scheduler = NULL; - bool is_blocking = false; - } - - audiotools_rtsp::OurRTSPClient *client() { - return rtsp_client; - } - - protected: - audiotools_rtsp::OurRTSPClient* rtsp_client; - UsageEnvironment* env=nullptr; - BasicTaskScheduler* scheduler=nullptr; - const char* ssid=nullptr; - const char* password = nullptr; - bool is_blocking = false; - - /// login to wifi: optional convinience method. You can also just start Wifi the normal way - bool login(){ - if(WiFi.status() != WL_CONNECTED && ssid!=nullptr && password!=nullptr){ - WiFi.mode(WIFI_STA); - WiFi.begin(ssid, password); - while(WiFi.status() != WL_CONNECTED){ - Serial.print("."); - delay(100); - } - Serial.println(); - Serial.print("Local Address: "); - Serial.println(WiFi.localIP()); - } - return WiFi.status() == WL_CONNECTED; - } - - -}; - -} // namespace audio_tools - -namespace audiotools_rtsp { -// Define a class to hold per-stream state that we maintain throughout each -// stream's lifetime: - -// Forward function definitions: - -// RTSP 'response handlers': -void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, - char* resultString); -void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, - char* resultString); -void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, - char* resultString); - -// Other event handler functions: -void subsessionAfterPlaying( - void* clientData); // called when a stream's subsession (e.g., audio or - // video substream) ends -void subsessionByeHandler(void* clientData, char const* reason); -// called when a RTCP "BYE" is received for a subsession -void streamTimerHandler(void* clientData); -// called at the end of a stream's expected duration (if the stream has not -// already signaled its end using a RTCP "BYE") - -// Used to iterate through each stream's 'subsessions', setting up each one: -void setupNextSubsession(RTSPClient* rtspClient); - -// Used to shut down and close a stream (including its "RTSPClient" object): -void shutdownStream(RTSPClient* rtspClient, int exitCode = 1); - -// A function that outputs a string that identifies each stream (for debugging -// output). Modify this if you wish: -UsageEnvironment& operator<<(UsageEnvironment& env, - const RTSPClient& rtspClient) { - return env << "[URL:\"" << rtspClient.url() << "\"]: "; -} - -// A function that outputs a string that identifies each subsession (for -// debugging output). Modify this if you wish: -UsageEnvironment& operator<<(UsageEnvironment& env, - const MediaSubsession& subsession) { - return env << subsession.mediumName() << "/" << subsession.codecName(); -} - -class StreamClientState { - public: - StreamClientState(); - virtual ~StreamClientState(); - - public: - MediaSubsessionIterator* iter; - MediaSession* session; - MediaSubsession* subsession; - TaskToken streamTimerTask; - double duration; -}; - -// If you're streaming just a single stream (i.e., just from a single URL, -// once), then you can define and use just a single "StreamClientState" -// structure, as a global variable in your application. However, because - in -// this demo application - we're showing how to play multiple streams, -// concurrently, we can't do that. Instead, we have to have a separate -// "StreamClientState" structure for each "RTSPClient". To do this, we subclass -// "RTSPClient", and add a "StreamClientState" field to the subclass: - -class OurRTSPClient : public RTSPClient { - public: - static OurRTSPClient* createNew(UsageEnvironment& env, char const* rtspURL, - int verbosityLevel = 0, - char const* applicationName = NULL, - portNumBits tunnelOverHTTPPortNum = 0); - - protected: - OurRTSPClient(UsageEnvironment& env, char const* rtspURL, int verbosityLevel, - char const* applicationName, portNumBits tunnelOverHTTPPortNum); - // called only by createNew(); - virtual ~OurRTSPClient(); - - public: - StreamClientState scs; -}; - -// Define a data sink (a subclass of "MediaSink") to receive the data for each -// subsession (i.e., each audio or video 'substream'). In practice, this might -// be a class (or a chain of classes) that decodes and then renders the incoming -// audio or video. Or it might be a "FileSink", for outputting the received data -// into a file (as is done by the "openRTSP" application). In this example code, -// however, we define a simple 'dummy' sink that receives incoming data, but -// does nothing with it. - -class OurSink : public MediaSink { - public: - static OurSink* createNew( - UsageEnvironment& env, - MediaSubsession& - subsession, // identifies the kind of data that's being received - char const* streamId = NULL); // identifies the stream itself (optional) - - private: - OurSink(UsageEnvironment& env, MediaSubsession& subsession, - char const* streamId); - // called only by "createNew()" - virtual ~OurSink(); - - static void afterGettingFrame(void* clientData, unsigned frameSize, - unsigned numTruncatedBytes, - struct timeval presentationTime, - unsigned durationInMicroseconds); - void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, - struct timeval presentationTime, - unsigned durationInMicroseconds); - - private: - // redefined virtual functions: - virtual Boolean continuePlaying(); - - private: - u_int8_t* fReceiveBuffer; - MediaSubsession& fSubsession; - char* fStreamId; -}; - -OurRTSPClient* openURL(UsageEnvironment& env, char const* progName, char const* rtspURL) { - // Begin by creating a "RTSPClient" object. Note that there is a separate - // "RTSPClient" object for each stream that we wish to receive (even if more - // than stream uses the same "rtsp://" URL). - OurRTSPClient* rtspClient = OurRTSPClient::createNew( - env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL, progName); - if (rtspClient == NULL) { - env << "Failed to create a RTSP client for URL \"" << rtspURL - << "\": " << env.getResultMsg() << "\n"; - return nullptr; - } - - ++rtspClientCount; - - // Next, send a RTSP "DESCRIBE" command, to get a SDP description for the - // stream. Note that this command - like all RTSP commands - is sent - // asynchronously; we do not block, waiting for a response. Instead, the - // following function call returns immediately, and we handle the RTSP - // response later, from within the event loop: - rtspClient->sendDescribeCommand(continueAfterDESCRIBE); - return rtspClient; -} - -// Implementation of the RTSP 'response handlers': - -void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, - char* resultString) { - do { - UsageEnvironment& env = rtspClient->envir(); // alias - StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias - - if (resultCode != 0) { - env << *rtspClient << "Failed to get a SDP description: " << resultString - << "\n"; - delete[] resultString; - break; - } - - char* const sdpDescription = resultString; - env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n"; - - // Create a media session object from this SDP description: - scs.session = MediaSession::createNew(env, sdpDescription); - delete[] sdpDescription; // because we don't need it anymore - if (scs.session == NULL) { - env << *rtspClient - << "Failed to create a MediaSession object from the SDP description: " - << env.getResultMsg() << "\n"; - break; - } else if (!scs.session->hasSubsessions()) { - env << *rtspClient - << "This session has no media subsessions (i.e., no \"m=\" lines)\n"; - break; - } - - // Then, create and set up our data source objects for the session. We do - // this by iterating over the session's 'subsessions', calling - // "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, - // on each one. (Each 'subsession' will have its own data source.) - scs.iter = new MediaSubsessionIterator(*scs.session); - setupNextSubsession(rtspClient); - return; - } while (0); - - // An unrecoverable error occurred with this stream. - shutdownStream(rtspClient); -} - -void setupNextSubsession(RTSPClient* rtspClient) { - UsageEnvironment& env = rtspClient->envir(); // alias - StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias - - scs.subsession = scs.iter->next(); - if (scs.subsession != NULL) { - if (!scs.subsession->initiate()) { - env << *rtspClient << "Failed to initiate the \"" << *scs.subsession - << "\" subsession: " << env.getResultMsg() << "\n"; - setupNextSubsession( - rtspClient); // give up on this subsession; go to the next one - } else { - env << *rtspClient << "Initiated the \"" << *scs.subsession - << "\" subsession ("; - if (scs.subsession->rtcpIsMuxed()) { - env << "client port " << scs.subsession->clientPortNum(); - } else { - env << "client ports " << scs.subsession->clientPortNum() << "-" - << scs.subsession->clientPortNum() + 1; - } - env << ")\n"; - - // Continue setting up this subsession, by sending a RTSP "SETUP" command: - rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, - rtspUseTCP); - } - return; - } - - // We've finished setting up all of the subsessions. Now, send a RTSP "PLAY" - // command to start the streaming: - if (scs.session->absStartTime() != NULL) { - // Special case: The stream is indexed by 'absolute' time, so send an - // appropriate "PLAY" command: - rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, - scs.session->absStartTime(), - scs.session->absEndTime()); - } else { - scs.duration = scs.session->playEndTime() - scs.session->playStartTime(); - rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY); - } -} - -void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, - char* resultString) { - do { - UsageEnvironment& env = rtspClient->envir(); // alias - StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias - - if (resultCode != 0) { - env << *rtspClient << "Failed to set up the \"" << *scs.subsession - << "\" subsession: " << resultString << "\n"; - break; - } - - env << *rtspClient << "Set up the \"" << *scs.subsession - << "\" subsession ("; - if (scs.subsession->rtcpIsMuxed()) { - env << "client port " << scs.subsession->clientPortNum(); - } else { - env << "client ports " << scs.subsession->clientPortNum() << "-" - << scs.subsession->clientPortNum() + 1; - } - env << ")\n"; - - // Having successfully setup the subsession, create a data sink for it, and - // call "startPlaying()" on it. (This will prepare the data sink to receive - // data; the actual flow of data from the client won't start happening until - // later, after we've sent a RTSP "PLAY" command.) - - scs.subsession->sink = - OurSink::createNew(env, *scs.subsession, rtspClient->url()); - // perhaps use your own custom "MediaSink" subclass instead - if (scs.subsession->sink == NULL) { - env << *rtspClient << "Failed to create a data sink for the \"" - << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n"; - break; - } - - env << *rtspClient << "Created a data sink for the \"" << *scs.subsession - << "\" subsession\n"; - scs.subsession->miscPtr = - rtspClient; // a hack to let subsession handler functions get the - // "RTSPClient" from the subsession - scs.subsession->sink->startPlaying(*(scs.subsession->readSource()), - subsessionAfterPlaying, scs.subsession); - // Also set a handler to be called if a RTCP "BYE" arrives for this - // subsession: - if (scs.subsession->rtcpInstance() != NULL) { - scs.subsession->rtcpInstance()->setByeWithReasonHandler( - subsessionByeHandler, scs.subsession); - } - } while (0); - delete[] resultString; - - // Set up the next subsession, if any: - setupNextSubsession(rtspClient); -} - -void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, - char* resultString) { - Boolean success = False; - - do { - UsageEnvironment& env = rtspClient->envir(); // alias - StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias - - if (resultCode != 0) { - env << *rtspClient << "Failed to start playing session: " << resultString - << "\n"; - break; - } - - // Set a timer to be handled at the end of the stream's expected duration - // (if the stream does not already signal its end using a RTCP "BYE"). This - // is optional. If, instead, you want to keep the stream active - e.g., so - // you can later 'seek' back within it and do another RTSP "PLAY" - then you - // can omit this code. (Alternatively, if you don't want to receive the - // entire stream, you could set this timer for some shorter value.) - if (scs.duration > 0) { - unsigned const delaySlop = - 2; // number of seconds extra to delay, after the stream's expected - // duration. (This is optional.) - scs.duration += delaySlop; - unsigned uSecsToDelay = (unsigned)(scs.duration * 1000000); - scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask( - uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient); - } - - env << *rtspClient << "Started playing session"; - if (scs.duration > 0) { - env << " (for up to " << scs.duration << " seconds)"; - } - env << "...\n"; - - success = True; - } while (0); - delete[] resultString; - - if (!success) { - // An unrecoverable error occurred with this stream. - shutdownStream(rtspClient); - } -} - -// Implementation of the other event handlers: - -void subsessionAfterPlaying(void* clientData) { - MediaSubsession* subsession = (MediaSubsession*)clientData; - RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr); - - // Begin by closing this subsession's stream: - Medium::close(subsession->sink); - subsession->sink = NULL; - - // Next, check whether *all* subsessions' streams have now been closed: - MediaSession& session = subsession->parentSession(); - MediaSubsessionIterator iter(session); - while ((subsession = iter.next()) != NULL) { - if (subsession->sink != NULL) return; // this subsession is still active - } - - // All subsessions' streams have now been closed, so shutdown the client: - shutdownStream(rtspClient); -} - -void subsessionByeHandler(void* clientData, char const* reason) { - MediaSubsession* subsession = (MediaSubsession*)clientData; - RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr; - UsageEnvironment& env = rtspClient->envir(); // alias - - env << *rtspClient << "Received RTCP \"BYE\""; - if (reason != NULL) { - env << " (reason:\"" << reason << "\")"; - delete[] (char*)reason; - } - env << " on \"" << *subsession << "\" subsession\n"; - - // Now act as if the subsession had closed: - subsessionAfterPlaying(subsession); -} - -void streamTimerHandler(void* clientData) { - OurRTSPClient* rtspClient = (OurRTSPClient*)clientData; - StreamClientState& scs = rtspClient->scs; // alias - - scs.streamTimerTask = NULL; - - // Shut down the stream: - shutdownStream(rtspClient); -} - -void shutdownStream(RTSPClient* rtspClient, int exitCode) { - UsageEnvironment& env = rtspClient->envir(); // alias - StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias - - // First, check whether any subsessions have still to be closed: - if (scs.session != NULL) { - Boolean someSubsessionsWereActive = False; - MediaSubsessionIterator iter(*scs.session); - MediaSubsession* subsession; - - while ((subsession = iter.next()) != NULL) { - if (subsession->sink != NULL) { - Medium::close(subsession->sink); - subsession->sink = NULL; - - if (subsession->rtcpInstance() != NULL) { - subsession->rtcpInstance()->setByeHandler( - NULL, NULL); // in case the server sends a RTCP "BYE" while - // handling "TEARDOWN" - } - - someSubsessionsWereActive = True; - } - } - - if (someSubsessionsWereActive) { - // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the - // stream. Don't bother handling the response to the "TEARDOWN". - rtspClient->sendTeardownCommand(*scs.session, NULL); - } - } - - env << *rtspClient << "Closing the stream.\n"; - Medium::close(rtspClient); - // Note that this will also cause this stream's "StreamClientState" structure - // to get reclaimed. - - if (--rtspClientCount == 0) { - // The final stream has ended, so exit the application now. - // (Of course, if you're embedding this code into your own application, you - // might want to comment this out, and replace it with - // "rtspEventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, - // and continue running "main()".) - // exit(exitCode); - rtspEventLoopWatchVariable = 1; - return; - } -} - -// Implementation of "OurRTSPClient": - -OurRTSPClient* OurRTSPClient::createNew(UsageEnvironment& env, - char const* rtspURL, int verbosityLevel, - char const* applicationName, - portNumBits tunnelOverHTTPPortNum) { - return new OurRTSPClient(env, rtspURL, verbosityLevel, applicationName, - tunnelOverHTTPPortNum); -} - -OurRTSPClient::OurRTSPClient(UsageEnvironment& env, char const* rtspURL, - int verbosityLevel, char const* applicationName, - portNumBits tunnelOverHTTPPortNum) - : RTSPClient(env, rtspURL, verbosityLevel, applicationName, - tunnelOverHTTPPortNum, -1) {} - -OurRTSPClient::~OurRTSPClient() {} - -// Implementation of "StreamClientState": - -StreamClientState::StreamClientState() - : iter(NULL), - session(NULL), - subsession(NULL), - streamTimerTask(NULL), - duration(0.0) {} - -StreamClientState::~StreamClientState() { - delete iter; - if (session != NULL) { - // We also need to delete "session", and unschedule "streamTimerTask" (if - // set) - UsageEnvironment& env = session->envir(); // alias - - env.taskScheduler().unscheduleDelayedTask(streamTimerTask); - Medium::close(session); - } -} - -// Implementation of "OurSink": - -OurSink* OurSink::createNew(UsageEnvironment& env, - MediaSubsession& subsession, - char const* streamId) { - return new OurSink(env, subsession, streamId); -} - -OurSink::OurSink(UsageEnvironment& env, MediaSubsession& subsession, - char const* streamId) - : MediaSink(env), fSubsession(subsession) { - fStreamId = strDup(streamId); - fReceiveBuffer = new u_int8_t[rtspSinkReceiveBufferSize]; -} - -OurSink::~OurSink() { - delete[] fReceiveBuffer; - delete[] fStreamId; -} - -void OurSink::afterGettingFrame(void* clientData, unsigned frameSize, - unsigned numTruncatedBytes, - struct timeval presentationTime, - unsigned durationInMicroseconds) { - OurSink* sink = (OurSink*)clientData; - sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, - durationInMicroseconds); -} - -void OurSink::afterGettingFrame(unsigned frameSize, - unsigned numTruncatedBytes, - struct timeval presentationTime, - unsigned /*durationInMicroseconds*/) { - // We've just received a frame of data. (Optionally) print out information - // about it: -#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME - if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; "; - envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() - << ":\tReceived " << frameSize << " bytes"; - if (numTruncatedBytes > 0) - envir() << " (with " << numTruncatedBytes << " bytes truncated)"; - char uSecsStr[6 + 1]; // used to output the 'microseconds' part of the - // presentation time - snprintf(uSecsStr,7 , "%06u", (unsigned)presentationTime.tv_usec); - envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." - << uSecsStr; - if (fSubsession.rtpSource() != NULL && - !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) { - envir() << "!"; // mark the debugging output to indicate that this - // presentation time is not RTCP-synchronized - } -#ifdef DEBUG_PRINT_NPT - envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime); -#endif - envir() << "\n"; -#endif - - // Decode the data - if (rtspOutput) { - size_t writtenSize = rtspOutput->write(fReceiveBuffer, frameSize); - assert(writtenSize == frameSize); - } - - // Then continue, to request the next frame of data: - continuePlaying(); -} - -Boolean OurSink::continuePlaying() { - if (fSource == NULL) return False; // sanity check (should not happen) - - // Request the next frame of data from our input source. "afterGettingFrame()" - // will get called later, when it arrives: - fSource->getNextFrame(fReceiveBuffer, rtspSinkReceiveBufferSize, - afterGettingFrame, this, onSourceClosure, this); - return True; -} - -} // namespace audiotools_rtsp \ No newline at end of file +#WARNING("Obsolete: Use AudioTools/Communication/AudioClientRTSP555.h") +#include "AudioTools/Communication/AudioClientRTSP555.h" \ No newline at end of file diff --git a/src/AudioTools/AudioLibs/HLSStream.h b/src/AudioTools/AudioLibs/HLSStream.h index 3d9e6126b7..1468453e52 100644 --- a/src/AudioTools/AudioLibs/HLSStream.h +++ b/src/AudioTools/AudioLibs/HLSStream.h @@ -1,783 +1,3 @@ #pragma once -#include "AudioTools/AudioCodecs/AudioEncoded.h" -#include "AudioTools/CoreAudio/AudioBasic/Str.h" -#include "AudioTools/Communication/HTTP/URLStream.h" -#include "AudioTools/CoreAudio/StreamCopy.h" -#include "AudioToolsConfig.h" - -#define MAX_HLS_LINE 512 -#define START_URLS_LIMIT 4 -#define HLS_BUFFER_COUNT 2 -#define HLS_MAX_NO_READ 2 -#define HLS_MAX_URL_LEN 256 -#define HLS_TIMEOUT 5000 -#define HLS_UNDER_OVERFLOW_WAIT_TIME 10 - -/// hide hls implementation in it's own namespace - -namespace audio_tools_hls { - -/*** - * @brief We feed the URLLoaderHLS with some url strings. The data of the - * related segments are provided via the readBytes() method. - * @author Phil Schatzmann - * @copyright GPLv3 - */ - -template -class URLLoaderHLS { - public: - URLLoaderHLS() = default; - - ~URLLoaderHLS() { end(); } - - bool begin() { - TRACED(); - buffer.resize(buffer_size * buffer_count); - - active = true; - return true; - } - - void end() { - TRACED(); - url_stream.end(); - buffer.clear(); - active = false; - } - - /// Adds the next url to be played in sequence - void addUrl(const char *url) { - LOGI("Adding %s", url); - StrView url_str(url); - char *str = new char[url_str.length() + 1]; - memcpy(str, url_str.c_str(), url_str.length() + 1); - urls.push_back((const char *)str); - } - - /// Provides the number of open urls which can be played. Refills them, when - /// min limit is reached. - int urlCount() { return urls.size(); } - - /// Available bytes of the audio stream - int available() { - if (!active) return 0; - TRACED(); - bufferRefill(); - - return buffer.available(); - } - - /// Provides data from the audio stream - size_t readBytes(uint8_t *data, size_t len) { - if (!active) return 0; - TRACED(); - bufferRefill(); - - if (buffer.available() < len) LOGW("Buffer underflow"); - return buffer.readArray(data, len); - } - - const char *contentType() { - return url_stream.httpRequest().reply().get(CONTENT_TYPE); - } - - int contentLength() { return url_stream.contentLength(); } - - void setBufferSize(int size, int count) { - buffer_size = size; - buffer_count = count; - // support call after begin()! - if (buffer.size() != 0) { - buffer.resize(buffer_size * buffer_count); - } - } - - void setCACert(const char *cert) { url_stream.setCACert(cert); } - - protected: - Vector urls{10}; - RingBuffer buffer{0}; - bool active = false; - int buffer_size = DEFAULT_BUFFER_SIZE; - int buffer_count = HLS_BUFFER_COUNT; - URLStream url_stream; - const char *url_to_play = nullptr; - - /// try to keep the buffer filled - void bufferRefill() { - TRACED(); - // we have nothing to do - if (urls.empty()) { - LOGD("urls empty"); - delay(HLS_UNDER_OVERFLOW_WAIT_TIME); - return; - } - if (buffer.availableForWrite() == 0) { - LOGD("buffer full"); - delay(HLS_UNDER_OVERFLOW_WAIT_TIME); - return; - } - - // switch current stream if we have no more data - if (!url_stream && !urls.empty()) { - LOGD("Refilling"); - if (url_to_play != nullptr) { - delete url_to_play; - } - url_to_play = urls[0]; - LOGI("playing %s", url_to_play); - url_stream.end(); - url_stream.setConnectionClose(true); - url_stream.setTimeout(HLS_TIMEOUT); - url_stream.begin(url_to_play); - url_stream.waitForData(HLS_TIMEOUT); - urls.pop_front(); - // assert(urls[0]!=url); - - LOGI("Playing %s of %d", url_stream.urlStr(), (int)urls.size()); - } - - int total = 0; - int failed = 0; - int to_write = min(buffer.availableForWrite(), DEFAULT_BUFFER_SIZE); - // try to keep the buffer filled - while (to_write > 0) { - uint8_t tmp[to_write]; - memset(tmp, 0, to_write); - int read = url_stream.readBytes(tmp, to_write); - total += read; - if (read > 0) { - failed = 0; - buffer.writeArray(tmp, read); - LOGD("buffer add %d -> %d:", read, buffer.available()); - - to_write = min(buffer.availableForWrite(), DEFAULT_BUFFER_SIZE); - } else { - delay(10); - } - // After we processed all data we close the stream to get a new url - if (url_stream.totalRead() == url_stream.contentLength()) { - LOGI("Closing stream because all bytes were processed: available: %d", - url_stream.available()); - url_stream.end(); - break; - } - LOGD("Refilled with %d now %d available to write", total, - buffer.availableForWrite()); - } - } -}; - -/** - * Prevent that the same url is loaded twice. We limit the history to - * 20 entries. - */ -class URLHistory { - public: - bool add(const char *url) { - if (url == nullptr) return true; - bool found = false; - StrView url_str(url); - for (int j = 0; j < history.size(); j++) { - if (url_str.equals(history[j])) { - found = true; - break; - } - } - if (!found) { - char *str = new char[url_str.length() + 1]; - memcpy(str, url, url_str.length() + 1); - history.push_back((const char *)str); - if (history.size() > 20) { - delete (history[0]); - history.pop_front(); - } - } - return !found; - } - - void clear() { history.clear(); } - - int size() { return history.size(); } - - protected: - Vector history; -}; - -/** - * @brief Simple Parser for HLS data. - * @author Phil Schatzmann - * @copyright GPLv3 - */ -template -class HLSParser { - public: - // loads the index url - bool begin(const char *urlStr) { - index_url_str = urlStr; - return begin(); - } - - bool begin() { - TRACEI(); - segments_url_str = ""; - bandwidth = 0; - total_read = 0; - - if (!parseIndex()) { - TRACEE(); - return false; - } - - // in some exceptional cases the index provided segement info - if (url_loader.urlCount() == 0) { - if (!parseSegments()) { - TRACEE(); - return false; - } - } else { - segments_url_str = index_url_str; - segmentsActivate(); - } - - if (!url_loader.begin()) { - TRACEE(); - return false; - } - - return true; - } - - int available() { - TRACED(); - int result = 0; - reloadSegments(); - - if (active) result = url_loader.available(); - return result; - } - - size_t readBytes(uint8_t *data, size_t len) { - TRACED(); - size_t result = 0; - reloadSegments(); - - if (active) result = url_loader.readBytes(data, len); - total_read += result; - return result; - } - - const char *indexUrl() { return index_url_str; } - - const char *segmentsUrl() { return segments_url_str.c_str(); } - - /// Provides the codec - const char *getCodec() { return codec.c_str(); } - - /// Provides the content type of the audio data - const char *contentType() { return url_loader.contentType(); } - - /// Provides the http content lengh - int contentLength() { return url_loader.contentLength(); } - - /// Closes the processing - void end() { - TRACEI(); - codec.clear(); - segments_url_str.clear(); - url_stream.end(); - url_loader.end(); - url_history.clear(); - active = false; - } - - /// Defines the number of urls that are preloaded in the URLLoaderHLS - void setUrlCount(int count) { url_count = count; } - - /// Redefines the buffer size - void setBufferSize(int size, int count) { - url_loader.setBufferSize(size, count); - } - - void setCACert(const char *cert) { - url_stream.setCACert(cert); - url_loader.setCACert(cert); - } - - void setPowerSave(bool flag) { url_stream.setPowerSave(flag); } - - void setURLResolver(const char *(*cb)(const char *segment, - const char *reqURL)) { - resolve_url = cb; - } - /// Provides the hls url as string - const char *urlStr() { return url_str.c_str(); } - - /// Povides the number of bytes read - size_t totalRead() { return total_read; }; - - protected: - enum class URLType { Undefined, Index, Segment }; - URLType next_url_type = URLType::Undefined; - int bandwidth = 0; - int url_count = 5; - size_t total_read = 0; - bool url_active = false; - bool is_extm3u = false; - Str codec; - Str segments_url_str; - Str url_str; - const char *index_url_str = nullptr; - URLStream url_stream; - URLLoaderHLS url_loader; - URLHistory url_history; - bool active = false; - bool parse_segments_active = false; - int media_sequence = 0; - int segment_count = 0; - uint64_t next_sement_load_time_planned = 0; - float play_time = 0; - uint64_t next_sement_load_time = 0; - const char *(*resolve_url)(const char *segment, - const char *reqURL) = resolveURL; - - /// Default implementation for url resolver: determine absolue url from - /// relative url - static const char *resolveURL(const char *segment, const char *reqURL) { - // avoid dynamic memory allocation - static char result[HLS_MAX_URL_LEN] = {0}; - StrView result_str(result, HLS_MAX_URL_LEN); - StrView index_url(reqURL); - // Use prefix up to ? or laast / - int end = index_url.lastIndexOf("?"); - if (end >= 0) { - result_str.substring(reqURL, 0, end); - } else { - end = index_url.lastIndexOf("/"); - if (end >= 0) { - result_str.substring(reqURL, 0, end); - } - } - // Use the full url - if (result_str.isEmpty()) { - result_str = reqURL; - } - // add trailing / - if (!result_str.endsWith("/")) { - result_str.add("/"); - } - // add relative segment - result_str.add(segment); - LOGI(">> relative addr: %s for %s", segment, reqURL); - LOGD(">> -> %s", result); - return result; - } - - /// trigger the reloading of segments if the limit is underflowing - void reloadSegments() { - TRACED(); - // get new urls - if (!segments_url_str.isEmpty()) { - parseSegments(); - } - } - - /// parse the index file and the segments - bool parseIndex() { - TRACED(); - url_stream.end(); - url_stream.setTimeout(HLS_TIMEOUT); - url_stream.setConnectionClose(true); - if (!url_stream.begin(index_url_str)) return false; - url_active = true; - return parseIndexLines(); - } - - /// parse the index file - bool parseIndexLines() { - TRACEI(); - char tmp[MAX_HLS_LINE]; - bool result = true; - is_extm3u = false; - - // parse lines - memset(tmp, 0, MAX_HLS_LINE); - while (true) { - memset(tmp, 0, MAX_HLS_LINE); - size_t len = - url_stream.httpRequest().readBytesUntil('\n', tmp, MAX_HLS_LINE); - // stop when there is no more data - if (len == 0 && url_stream.available() == 0) break; - StrView str(tmp); - - // check header - if (str.startsWith("#EXTM3U")) { - is_extm3u = true; - // reset timings - resetTimings(); - } - - if (is_extm3u) { - if (!parseIndexLine(str)) { - return false; - } - } - } - return result; - } - - /// Determine codec for min bandwidth - bool parseIndexLine(StrView &str) { - TRACED(); - LOGI("> %s", str.c_str()); - parseIndexLineMetaData(str); - // in some exceptional cases the index provided segement info - parseSegmentLineMetaData(str); - parseLineURL(str); - return true; - } - - bool parseIndexLineMetaData(StrView &str) { - int tmp_bandwidth; - if (str.startsWith("#")) { - if (str.indexOf("EXT-X-STREAM-INF") >= 0) { - next_url_type = URLType::Index; - // determine min bandwidth - int pos = str.indexOf("BANDWIDTH="); - if (pos > 0) { - StrView num(str.c_str() + pos + 10); - tmp_bandwidth = num.toInt(); - url_active = (tmp_bandwidth < bandwidth || bandwidth == 0); - if (url_active) { - bandwidth = tmp_bandwidth; - LOGD("-> bandwith: %d", bandwidth); - } - } - - pos = str.indexOf("CODECS="); - if (pos > 0) { - int start = pos + 8; - int end = str.indexOf('"', pos + 10); - codec.substring(str, start, end); - LOGI("-> codec: %s", codec.c_str()); - } - } - } - return true; - } - - void resetTimings() { - next_sement_load_time_planned = millis(); - play_time = 0; - next_sement_load_time = 0xFFFFFFFFFFFFFFFF; - } - - /// parse the segment url provided by the index - bool parseSegments() { - TRACED(); - if (parse_segments_active) { - return false; - } - - // make sure that we load at relevant schedule - if (millis() < next_sement_load_time && url_loader.urlCount() > 1) { - delay(1); - return false; - } - parse_segments_active = true; - - LOGI("Available urls: %d", url_loader.urlCount()); - - if (url_stream) url_stream.clear(); - LOGI("parsing %s", segments_url_str.c_str()); - - if (segments_url_str.isEmpty()) { - TRACEE(); - parse_segments_active = false; - return false; - } - - if (!url_stream.begin(segments_url_str.c_str())) { - TRACEE(); - parse_segments_active = false; - return false; - } - - segment_count = 0; - if (!parseSegmentLines()) { - TRACEE(); - parse_segments_active = false; - // do not display as error - return true; - } - - segmentsActivate(); - return true; - } - - void segmentsActivate() { - LOGI("Reloading in %f sec", play_time / 1000.0); - if (play_time > 0) { - next_sement_load_time = next_sement_load_time_planned + play_time; - } - - // we request a minimum of collected urls to play before we start - if (url_history.size() > START_URLS_LIMIT) active = true; - parse_segments_active = false; - } - - /// parse the segments - bool parseSegmentLines() { - TRACEI(); - char tmp[MAX_HLS_LINE]; - bool result = true; - is_extm3u = false; - - // parse lines - memset(tmp, 0, MAX_HLS_LINE); - while (true) { - memset(tmp, 0, MAX_HLS_LINE); - size_t len = - url_stream.httpRequest().readBytesUntil('\n', tmp, MAX_HLS_LINE); - if (len == 0 && url_stream.available() == 0) break; - StrView str(tmp); - - // check header - if (str.startsWith("#EXTM3U")) { - is_extm3u = true; - resetTimings(); - } - - if (is_extm3u) { - if (!parseSegmentLine(str)) { - return false; - } - } - } - return result; - } - - /// Add all segments to queue - bool parseSegmentLine(StrView &str) { - TRACED(); - LOGI("> %s", str.c_str()); - if (!parseSegmentLineMetaData(str)) return false; - parseLineURL(str); - return true; - } - - bool parseSegmentLineMetaData(StrView &str) { - if (str.startsWith("#")) { - if (str.startsWith("#EXT-X-MEDIA-SEQUENCE:")) { - int new_media_sequence = atoi(str.c_str() + 22); - LOGI("media_sequence: %d", new_media_sequence); - if (new_media_sequence == media_sequence) { - LOGW("MEDIA-SEQUENCE already loaded: %d", media_sequence); - return false; - } - media_sequence = new_media_sequence; - } - - // add play time to next_sement_load_time_planned - if (str.startsWith("#EXTINF")) { - next_url_type = URLType::Segment; - StrView sec_str(str.c_str() + 8); - float sec = sec_str.toFloat(); - LOGI("adding play time: %f sec", sec); - play_time += (sec * 1000.0); - } - } - return true; - } - - bool parseLineURL(StrView &str) { - if (!str.startsWith("#")) { - switch (next_url_type) { - case URLType::Undefined: - // we should not get here - assert(false); - break; - case URLType::Index: - if (str.startsWith("http")) { - segments_url_str.set(str); - } else { - segments_url_str.set(resolve_url(str.c_str(), index_url_str)); - } - LOGD("segments_url_str = %s", segments_url_str.c_str()); - break; - case URLType::Segment: - segment_count++; - if (url_history.add(str.c_str())) { - // provide audio urls to the url_loader - if (str.startsWith("http")) { - url_str = str; - } else { - // we create the complete url - url_str = resolve_url(str.c_str(), index_url_str); - } - url_loader.addUrl(url_str.c_str()); - } else { - LOGD("Duplicate ignored: %s", str.c_str()); - } - } - // clear url type - next_url_type = URLType::Undefined; - } - return true; - } -}; - -} // namespace audio_tools_hls - -namespace audio_tools { -/** - * @brief HTTP Live Streaming using HLS: The resulting .ts data is provided - * via readBytes() that dynamically reload new Segments. Please note that - * this reloading adds a considerable delay: So if you want to play back the - * audio, you should buffer the content in a seaparate task. - * - * @author Phil Schatzmann - * @ingroup http *@copyright GPLv3 - */ - -template -class HLSStreamT : public AbstractURLStream { - public: - /// Empty constructor - HLSStreamT() = default; - - /// Convenience constructor which logs in to the WiFi - HLSStreamT(const char *ssid, const char *password) { - setSSID(ssid); - setPassword(password); - } - - /// Open an HLS url - bool begin(const char *urlStr) { - TRACEI(); - login(); - // parse the url to the HLS - bool rc = parser.begin(urlStr); - return rc; - } - - /// Reopens the last url - bool begin() override { - TRACEI(); - login(); - bool rc = parser.begin(); - return rc; - } - - /// ends the request - void end() override { parser.end(); } - - /// Sets the ssid that will be used for logging in (when calling begin) - void setSSID(const char *ssid) override { this->ssid = ssid; } - - /// Sets the password that will be used for logging in (when calling begin) - void setPassword(const char *password) override { this->password = password; } - - /// Returns the string representation of the codec of the audio stream - const char *codec() { return parser.getCodec(); } - - /// Provides the content type from the http reply - const char *contentType() { return parser.contentType(); } - - /// Provides the content length of the actual .ts Segment - int contentLength() override { return parser.contentLength(); } - - /// Provides number of available bytes in the read buffer - int available() override { - TRACED(); - return parser.available(); - } - - /// Provides the data fro the next .ts Segment - size_t readBytes(uint8_t *data, size_t len) override { - TRACED(); - return parser.readBytes(data, len); - } - - /// Redefines the read buffer size - void setBufferSize(int size, int count) { parser.setBufferSize(size, count); } - - /// Defines the certificate - void setCACert(const char *cert) override { parser.setCACert(cert); } - - /// Changes the Wifi to power saving mode - void setPowerSave(bool flag) override { parser.setPowerSave(flag); } - - /// Custom logic to provide the codec as Content-Type to support the - /// MultiCodec - const char *getReplyHeader(const char *header) override { - const char *codec = parser.getCodec(); - const char *result = nullptr; - if (StrView(header).equalsIgnoreCase(CONTENT_TYPE)) { - result = parser.contentType(); - } - if (result) LOGI("-> Format: %s", result); - return result; - } - - /// The resolving of relative addresses can be quite tricky: you can provide - /// your custom resolver implementation - void setURLResolver(const char *(*cb)(const char *segment, - const char *reqURL)) { - parser.setURLResolver(cb); - } - - const char *urlStr() override { return parser.urlStr(); } - - size_t totalRead() override { return parser.totalRead(); }; - /// not implemented - void setConnectionClose(bool flag) override {}; - /// not implemented - bool waitForData(int timeout) override { return false; } - - protected: - audio_tools_hls::HLSParser parser; - const char *ssid = nullptr; - const char *password = nullptr; - - void login() { -#ifdef USE_WIFI - if (ssid != nullptr && password != nullptr && - WiFi.status() != WL_CONNECTED) { - TRACED(); - delay(1000); - WiFi.begin(ssid, password); - while (WiFi.status() != WL_CONNECTED) { - Serial.print("."); - delay(500); - } - } -#else - LOGW("login not supported"); -#endif - } - - /// Added to comply with AbstractURLStream - bool begin(const char *urlStr, const char *acceptMime, MethodID action = GET, - const char *reqMime = "", const char *reqData = "") override { - return begin(urlStr); - } - - HttpRequest &httpRequest() override { - static HttpRequest dummy; - return dummy; - } - - /// Not implemented: potential future improvement - void setClient(Client &clientPar) override {} - - /// Not implemented - void addRequestHeader(const char *header, const char *value) override {} -}; - -/// @brief HLS Stream implementation using URLStream for HTTP requests -/// @ingroup http -using HLSStream = HLSStreamT; - -} // namespace audio_tools +#WARNING("Obsolete - use /AudioTools/Communication/HLSStream.h") +#include "AudioTools/Communication/HLSStream.h" \ No newline at end of file diff --git a/src/AudioTools/AudioLibs/README.md b/src/AudioTools/AudioLibs/README.md index 1701c98238..4d3a6dba4b 100644 --- a/src/AudioTools/AudioLibs/README.md +++ b/src/AudioTools/AudioLibs/README.md @@ -1,2 +1,2 @@ -Integration to different external audio libraries \ No newline at end of file +Integration to different optonal external audio libraries \ No newline at end of file diff --git a/src/AudioTools/AudioLibs/VBANStream.h b/src/AudioTools/AudioLibs/VBANStream.h index 6ff5ceacfb..65ef0a6bf2 100644 --- a/src/AudioTools/AudioLibs/VBANStream.h +++ b/src/AudioTools/AudioLibs/VBANStream.h @@ -1,592 +1,3 @@ - -#include -#include - -#include "AudioTools/AudioLibs/vban/vban.h" -#include "AudioTools/CoreAudio/AudioStreams.h" -#include "AudioTools/Concurrency/RTOS/BufferRTOS.h" - -namespace audio_tools { - -class VBANConfig : public AudioInfo { - public: - VBANConfig() { - sample_rate = 11025; - channels = 1; - bits_per_sample = 16; - } - RxTxMode mode; - /// name of the stream - const char* stream_name = "Stream1"; - /// default port is 6980 - uint16_t udp_port = 6980; - /// Use {0,0,0,0}; as broadcast address - IPAddress target_ip{0, 0, 0, 0}; - /// ssid for wifi connection - const char* ssid = nullptr; - /// password for wifi connection - const char* password = nullptr; - int rx_buffer_count = 30; - // set to true if samples are generated faster then sample rate - bool throttle_active = false; - // when negative the number of ms that are subtracted from the calculated wait - // time to fine tune Overload and Underruns - int throttle_correction_us = 0; - // defines the max write size - int max_write_size = - DEFAULT_BUFFER_SIZE * 2; // just good enough for 44100 stereo - uint8_t format = 0; - - //reply for discovery packet - uint32_t device_flags = 0x00000001; // default: receiver only - uint32_t bitfeature = 0x00000001; // default: audio only - uint32_t device_color = 0x00FF00; // green default - //const char* stream_name_reply = "VBAN SPOT PING"; - const char* device_name = nullptr; // nullptr means use MAC by default - const char* manufacturer_name = "ESP32 AudioTools"; - const char* application_name = "VBAN Streamer"; - const char* host_name = nullptr; // will fallback to WiFi.getHostname() - const char* user_name = "User"; - const char* user_comment = "ESP32 VBAN Audio Device"; -}; - -/** - * @brief VBAN Audio Source and Sink for the ESP32. For further details please - * see https://vb-audio.com/Voicemeeter/vban.htm . - * Inspired by https://github.com/rkinnett/ESP32-VBAN-Audio-Source/tree/master - * and https://github.com/rkinnett/ESP32-VBAN-Network-Audio-Player - * @ingroup communications - * @author Phil Schatzmann - * @copyright GPLv3 - */ - -class VBANStream : public AudioStream { - public: - VBANConfig defaultConfig(RxTxMode mode = TX_MODE) { - VBANConfig def; - def.mode = mode; - return def; - } - - void setOutput(Print &out){ - p_out = &out; - } - - void setAudioInfo(AudioInfo info) override { - cfg.copyFrom(info); - AudioStream::setAudioInfo(info); - auto thc = throttle.defaultConfig(); - thc.copyFrom(info); - thc.correction_us = cfg.throttle_correction_us; - throttle.begin(thc); - if (cfg.mode == TX_MODE) { - configure_tx(); - } - } - - bool begin(VBANConfig cfg) { - this->cfg = cfg; - setAudioInfo(cfg); - return begin(); - } - - bool begin() { - if (cfg.mode == TX_MODE) { - if (cfg.bits_per_sample != 16) { - LOGE("Only 16 bits supported") - return false; - } - tx_buffer.resize(VBAN_PACKET_NUM_SAMPLES); - return begin_tx(); - } else { -#ifdef ESP32 - rx_buffer.resize(DEFAULT_BUFFER_SIZE * cfg.rx_buffer_count); - rx_buffer.setReadMaxWait(10); -#else - rx_buffer.resize(DEFAULT_BUFFER_SIZE, cfg.rx_buffer_count); -#endif - return begin_rx(); - } - } - - size_t write(const uint8_t* data, size_t len) override { - if (!udp_connected) return 0; - - int16_t* adc_data = (int16_t*)data; - size_t samples = len / (cfg.bits_per_sample/8); - - // limit output speed - if (cfg.throttle_active) { - throttle.delayFrames(samples / cfg.channels); - } - - for (int j = 0; j < samples; j++) { - tx_buffer.write(adc_data[j]); - if (tx_buffer.availableForWrite() == 0) { - memcpy(vban.data_frame, tx_buffer.data(), vban.packet_data_bytes); - *vban.packet_counter = packet_counter; // increment packet counter - // Send packet - if (cfg.target_ip == broadcast_address) { - udp.broadcastTo((uint8_t*)&vban.packet, vban.packet_total_bytes, - cfg.udp_port); - } else { - udp.writeTo((uint8_t*)&vban.packet, vban.packet_total_bytes, - cfg.target_ip, cfg.udp_port); - } - // defile delay start time - packet_counter++; - tx_buffer.reset(); - } - } - return len; - } - - int availableForWrite() { return cfg.max_write_size; } - - size_t readBytes(uint8_t* data, size_t len) override { - TRACED(); - size_t samples = len / (cfg.bits_per_sample/8); - if (cfg.throttle_active) { - throttle.delayFrames(samples / cfg.channels); - } - return rx_buffer.readArray(data, len); - } - - int available() { return available_active ? rx_buffer.available() : 0; } - - protected: - const IPAddress broadcast_address{0, 0, 0, 0}; - AsyncUDP udp; - VBan vban; - VBANConfig cfg; - SingleBuffer tx_buffer{0}; - #ifdef ESP32 - BufferRTOS rx_buffer{ 0}; - #else - NBuffer rx_buffer{DEFAULT_BUFFER_SIZE, 0}; - #endif - bool udp_connected = false; - uint32_t packet_counter = 0; - Throttle throttle; - size_t bytes_received = 0; - bool available_active = false; - Print *p_out = nullptr; - - bool begin_tx() { - if (!configure_tx()) { - return false; - } - start_wifi(); - if (WiFi.status() != WL_CONNECTED) { - LOGE("Wifi not connected"); - return false; - } - WiFi.setSleep(false); - IPAddress myIP = WiFi.localIP(); - udp_connected = udp.connect(myIP, cfg.udp_port); - return udp_connected; - } - - bool begin_rx() { - start_wifi(); - if (WiFi.status() != WL_CONNECTED) { - LOGE("Wifi not connected"); - return false; - } - WiFi.setSleep(false); - bytes_received = 0; - this->available_active = false; - // connect to target - if (!udp.listen(cfg.udp_port)) { - LOGE("Could not connect to '%s:%d' target", toString(cfg.target_ip), - cfg.udp_port); - } - // handle data - udp.onPacket([this](AsyncUDPPacket packet) { receive_udp(packet); }); - - return true; - } - - bool configure_tx() { - int rate = vban_sample_rate(); - if (rate < 0) { - LOGE("Invalid sample rate: %d", cfg.sample_rate); - return false; - } - configure_vban((VBanSampleRates)rate); - return true; - } - - void start_wifi() { - if (cfg.ssid == nullptr) return; - if (cfg.password == nullptr) return; - LOGI("ssid %s", cfg.ssid); - // Setup Wifi: - WiFi.begin(cfg.ssid, cfg.password); // Connect to your WiFi router - while (WiFi.status() != WL_CONNECTED) { // Wait for connection - delay(500); - Serial.print("."); - } - Serial.println(); - - LOGI("Wifi connected to IP (%d.%d.%d.%d)", WiFi.localIP()[0], - WiFi.localIP()[1], WiFi.localIP()[2], WiFi.localIP()[3]); - } - - void configure_vban(VBanSampleRates rate) { - // Set vban packet header, counter, and data frame pointers to respective - // parts of packet: - vban.hdr = (VBanHeader*)&vban.packet[0]; - vban.packet_counter = (uint32_t*)&vban.packet[VBAN_PACKET_HEADER_BYTES]; - vban.data_frame = - (uint8_t*)&vban - .packet[VBAN_PACKET_HEADER_BYTES + VBAN_PACKET_COUNTER_BYTES]; - - // Setup the packet header: - strncpy(vban.hdr->preamble, "VBAN", 4); - vban.hdr->sample_rate = - static_cast(VBAN_PROTOCOL_AUDIO) | - rate; // 11025 Hz, which matches default sample rate for soundmodem - vban.hdr->num_samples = - (VBAN_PACKET_NUM_SAMPLES / cfg.channels) - 1; // 255 = 256 samples - vban.hdr->num_channels = cfg.channels - 1; // 0 = 1 channel - vban.hdr->sample_format = - static_cast(VBAN_BITFMT_16_INT) | VBAN_CODEC_PCM; // int16 PCM - strncpy(vban.hdr->stream_name, cfg.stream_name, - min((int)strlen(cfg.stream_name), VBAN_STREAM_NAME_SIZE)); - - vban.packet_data_bytes = - (vban.hdr->num_samples + 1) * (vban.hdr->num_channels + 1) * - ((vban.hdr->sample_format & VBAN_BIT_RESOLUTION_MASK) + 1); - vban.packet_total_bytes = vban.packet_data_bytes + - VBAN_PACKET_HEADER_BYTES + - VBAN_PACKET_COUNTER_BYTES; - } - - int vban_sample_rate() { - int result = -1; - switch (cfg.sample_rate) { - case 6000: - result = SAMPLE_RATE_6000_HZ; - break; - case 12000: - result = SAMPLE_RATE_12000_HZ; - break; - case 24000: - result = SAMPLE_RATE_24000_HZ; - break; - case 48000: - result = SAMPLE_RATE_48000_HZ; - break; - case 96000: - result = SAMPLE_RATE_96000_HZ; - break; - case 192000: - result = SAMPLE_RATE_192000_HZ; - break; - case 384000: - result = SAMPLE_RATE_384000_HZ; - break; - case 8000: - result = SAMPLE_RATE_8000_HZ; - break; - case 16000: - result = SAMPLE_RATE_16000_HZ; - break; - case 32000: - result = SAMPLE_RATE_32000_HZ; - break; - case 64000: - result = SAMPLE_RATE_64000_HZ; - break; - case 128000: - result = SAMPLE_RATE_128000_HZ; - break; - case 256000: - result = SAMPLE_RATE_256000_HZ; - break; - case 512000: - result = SAMPLE_RATE_512000_HZ; - break; - case 11025: - result = SAMPLE_RATE_11025_HZ; - break; - case 22050: - result = SAMPLE_RATE_22050_HZ; - break; - case 44100: - result = SAMPLE_RATE_44100_HZ; - break; - case 88200: - result = SAMPLE_RATE_88200_HZ; - break; - case 176400: - result = SAMPLE_RATE_176400_HZ; - break; - case 352800: - result = SAMPLE_RATE_352800_HZ; - break; - case 705600: - result = SAMPLE_RATE_705600_HZ; - break; - } - return result; - } - - const char* toString(IPAddress adr) { - static char str[11] = {0}; - snprintf(str, 11, "%d.%d.%d.%d", adr[0], adr[1], adr[2], adr[3]); - return str; - } - - /** - * @brief VBAN adjusts the number of samples per packet according to sample - *rate. Assuming 16-bit PCM mono, sample rates 11025, 22050, 44100, and 88200 - *yield packets containing 64, 128, 256, and 256 samples per packet, - *respectively. The even-thousands sample rates below 48000 yield - *non-power-of-2 lengths. For example, sample rate 24000 yields 139 samples - *per packet. This VBAN->DMA->DAC method seems to require the dma buffer - *length be set equal to the number of samples in each VBAN packet. ESP32 - *I2S/DMA does not seem to handle non-power-of-2 buffer lengths well. Sample - *rate 24000 doesn't work reliably at all. Sample rate 32000 is stable but - *stutters. Recommend selecting from sample rates 11025, 22050, 44100, and - *above And set samplesPerPacket to 64 for 11025, 128 for 22050, or 256 for - *all else. - **/ - - void receive_udp(AsyncUDPPacket& packet) { - uint16_t vban_rx_data_bytes, vban_rx_sample_count; - int16_t* vban_rx_data; - uint32_t* vban_rx_pkt_nbr; - uint16_t outBuf[VBAN_PACKET_MAX_SAMPLES + 1]; - size_t bytesOut; - - int len = packet.length(); - if (len > 0) { - LOGD("receive_udp %d", len); - uint8_t* udpIncomingPacket = packet.data(); - - // receive incoming UDP packet - // Check if packet length meets VBAN specification: - if (len < VBAN_PACKET_HEADER_BYTES) { - LOGE("Too short to be VBAN (%u bytes)", len); - return; - } - - // Check if preamble matches VBAN format: - if (strncmp("VBAN", (const char*)udpIncomingPacket, 4) != 0) { - LOGE("Unrecognized preamble %.4s", udpIncomingPacket); - return; - } - - uint8_t protocol = udpIncomingPacket[4] & VBAN_PROTOCOL_MASK; - - if (protocol == VBAN_PROTOCOL_SERVICE) { - // Allow up to ~1024 bytes for service packets like Ping0 - if (len > 1024) { - LOGE("Service packet length invalid: %u bytes", len); - return; - } - } else { - // Audio, serial, etc - if (len <= (VBAN_PACKET_HEADER_BYTES + VBAN_PACKET_COUNTER_BYTES) || len > VBAN_PACKET_MAX_LEN_BYTES) { - LOGE("Audio/other packet length invalid: %u bytes", len); - rx_buffer.reset(); - return; - } - } - - //LOGI("VBAN format byte: 0x%02X", udpIncomingPacket[7]); - //LOGD("VBAN protocol mask applied: 0x%02X", udpIncomingPacket[7] & VBAN_PROTOCOL_MASK); - //Serial.printf("Header[7] = 0x%02X\n", udpIncomingPacket[7]); - - - //------------------------------------------------------------------------- - //SUPPORT PING REQUEST - if ( protocol == VBAN_PROTOCOL_SERVICE ) { - - uint8_t service_type = udpIncomingPacket[5]; - uint8_t service_fnct = udpIncomingPacket[6]; - - if (service_type == VBAN_SERVICE_IDENTIFICATION) { - bool isReply = (service_fnct & VBAN_SERVICE_FNCT_REPLY) != 0; - uint8_t function = service_fnct & 0x7F; - - if (!isReply && function == 0) { - LOGI("Received VBAN PING0 request"); - sendVbanPing0Reply(packet); - } - } - return; - } - //-------------------------------------------------------------------------- - - vban_rx_data_bytes = - len - (VBAN_PACKET_HEADER_BYTES + VBAN_PACKET_COUNTER_BYTES); - vban_rx_pkt_nbr = (uint32_t*)&udpIncomingPacket[VBAN_PACKET_HEADER_BYTES]; - vban_rx_data = (int16_t*)&udpIncomingPacket[VBAN_PACKET_HEADER_BYTES + - VBAN_PACKET_COUNTER_BYTES]; - vban_rx_sample_count = vban_rx_data_bytes / (cfg.bits_per_sample / 8); - uint8_t vbanSampleRateIdx = udpIncomingPacket[4] & VBAN_SR_MASK; - uint8_t vbchannels = udpIncomingPacket[6] + 1; - uint8_t vbframes = udpIncomingPacket[5] + 1; - uint8_t vbformat = udpIncomingPacket[7] & VBAN_PROTOCOL_MASK; - uint8_t vbformat_bits = udpIncomingPacket[7] & VBAN_BIT_RESOLUTION_MASK; - uint32_t vbanSampleRate = VBanSRList[vbanSampleRateIdx]; - - //LOGD("sample_count: %d - frames: %d", vban_rx_sample_count, vbframes); - //assert (vban_rx_sample_count == vbframes*vbchannels); - - // E.g. do not process any text - if (vbformat != cfg.format){ - LOGE("Format ignored: 0x%x", vbformat); - return; - } - - // Currently we support only 16 bits. - if (vbformat_bits != VBAN_BITFMT_16_INT){ - LOGE("Format only 16 bits supported"); - return; - } - - // Just to be safe, re-check sample count against max sample count to - // avoid overrunning outBuf later - if (vban_rx_sample_count > VBAN_PACKET_MAX_SAMPLES) { - LOGE("unexpected packet size: %u", vban_rx_sample_count); - return; - } - - // update sample rate - if (cfg.sample_rate != vbanSampleRate || cfg.channels != vbchannels) { - // update audio info - cfg.sample_rate = vbanSampleRate; - cfg.channels = vbchannels; - setAudioInfo(cfg); - // remove any buffered data - rx_buffer.reset(); - available_active = false; - } - - if (p_out!=nullptr){ - int size_written = p_out->write((uint8_t*)vban_rx_data, vban_rx_data_bytes); - if (size_written != vban_rx_data_bytes) { - LOGE("buffer overflow %d -> %d", vban_rx_data_bytes, size_written); - } - return; - } - - // write data to buffer - int size_written = rx_buffer.writeArray((uint8_t*)vban_rx_data, vban_rx_data_bytes); - if (size_written != vban_rx_data_bytes) { - LOGE("buffer overflow %d -> %d", vban_rx_data_bytes, size_written); - } - - // report available bytes only when buffer is 50% full - if (!available_active) { - bytes_received += vban_rx_data_bytes; - if (bytes_received >= cfg.rx_buffer_count * DEFAULT_BUFFER_SIZE * 0.75){ - available_active = true; - LOGI("Activating vban"); - } - } - } - } -//------------------------------------------------------------------------------------- - //implement ping reply based on VBAN standard - void sendVbanPing0Reply(AsyncUDPPacket& sourcePacket) { - - // Prepare VBAN 28-byte service header - uint8_t header[28]; - memset(header, 0, sizeof(header)); - memcpy(header, "VBAN", 4); - header[4] = VBAN_PROTOCOL_SERVICE; - header[5] = VBAN_SERVICE_FNCT_PING0 | VBAN_SERVICE_FNCT_REPLY; // Service function + reply bit - header[6] = 0x00; // must be zero - // Copy incoming stream name from discovery packet - const uint8_t* data = sourcePacket.data(); - memcpy(&header[8], &data[8], 16); - // Copy frame number (little endian) - - uint32_t frameNumber = (uint32_t)((data[24] & 0xFF) | ((data[25] & 0xFF) << 8) | ((data[26] & 0xFF) << 16) | ((data[27] & 0xFF) << 24)); - memcpy(&header[24], &frameNumber, 4); - - // Construct the PING0 payload using the struct - VBAN_PING0 ping0; - memset(&ping0, 0, sizeof(ping0)); - - // Fill fields with your config data and fixed values - ping0.bitType = cfg.device_flags; - ping0.bitfeature = cfg.bitfeature; - ping0.bitfeatureEx = 0x00000000; - ping0.PreferedRate = 44100; - ping0.MinRate = 8000; - ping0.MaxRate = 96000; - ping0.color_rgb = cfg.device_color; - - // Version string, 8 bytes total (zero padded) - memcpy(ping0.nVersion, "v1.0", 4); - - // GPS_Position left empty (all zero), so no need to set - // USER_Position 8 bytes - memcpy(ping0.USER_Position, "USRPOS", 6); - // LangCode_ascii 8 bytes ("EN" + padding) - memset(ping0.LangCode_ascii, 0, sizeof(ping0.LangCode_ascii)); - memcpy(ping0.LangCode_ascii, "EN", 2); - // reserved_ascii and reservedEx are zeroed by memset - // IP as string, max 32 bytes - - char ipStr[16]; // Enough for "255.255.255.255\0" - sprintf(ipStr, "%d.%d.%d.%d", WiFi.localIP()[0], WiFi.localIP()[1], WiFi.localIP()[2], WiFi.localIP()[3]); - safe_strncpy(ping0.DistantIP_ascii, ipStr, sizeof(ping0.DistantIP_ascii)); - // Ports (network byte order) - - ping0.DistantPort = cfg.udp_port; //returs port I am listening for VBAN - more useful then UDP ephemeral port - ping0.DistantReserved = 0; - - // Device name (64 bytes) - if (cfg.device_name && cfg.device_name[0] != '\0') { - safe_strncpy(ping0.DeviceName_ascii, cfg.device_name, sizeof(ping0.DeviceName_ascii)); - } else { - uint8_t mac[6]; - WiFi.macAddress(mac); - char macStr[64]; - snprintf(macStr, sizeof(macStr), "%02X:%02X:%02X:%02X:%02X:%02X", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); - safe_strncpy(ping0.DeviceName_ascii, macStr, sizeof(ping0.DeviceName_ascii)); - } - - // Manufacturer name (64 bytes) - safe_strncpy(ping0.ManufacturerName_ascii, cfg.manufacturer_name, sizeof(ping0.ManufacturerName_ascii)); - // Application name (64 bytes) - safe_strncpy(ping0.ApplicationName_ascii, cfg.application_name, sizeof(ping0.ApplicationName_ascii)); - // Host name (64 bytes) - const char* hostName = cfg.host_name; - if (!hostName || hostName[0] == '\0') { - hostName = WiFi.getHostname(); - if (!hostName) hostName = "ESP32"; - } - safe_strncpy(ping0.HostName_ascii, hostName, sizeof(ping0.HostName_ascii)); - - // UserName_utf8 - safe_strncpy(ping0.UserName_utf8, cfg.user_name, sizeof(ping0.UserName_utf8)); - //UserComment_utf8 - safe_strncpy(ping0.UserComment_utf8, cfg.user_comment, sizeof(ping0.UserComment_utf8)); - - // Prepare final packet: header + payload - uint8_t packet[28 + sizeof(VBAN_PING0)]; - memcpy(packet, header, 28); - memcpy(packet + 28, &ping0, sizeof(VBAN_PING0)); - - // Send UDP packet - udp.writeTo(packet, sizeof(packet), sourcePacket.remoteIP(), sourcePacket.remotePort()); -} - - // Safely copy a C-string with guaranteed null termination - void safe_strncpy(char* dest, const char* src, size_t dest_size) { - if (dest_size == 0) return; - strncpy(dest, src, dest_size - 1); - dest[dest_size - 1] = '\0'; - } - //----------------------------------------------------------------------------------- -}; - -} // namespace audio_tools \ No newline at end of file +#pragma once +#WARNING("Obsolete - use /AudioTools/Communication/VBANStream.h ") +#include "AudioTools/Communication" \ No newline at end of file diff --git a/src/AudioTools/Communication/HLSStream.h b/src/AudioTools/Communication/HLSStream.h new file mode 100644 index 0000000000..50aff75668 --- /dev/null +++ b/src/AudioTools/Communication/HLSStream.h @@ -0,0 +1,781 @@ +#pragma once +#include "AudioTools/AudioCodecs/AudioEncoded.h" +#include "AudioTools/CoreAudio/AudioBasic/Str.h" +#include "AudioTools/CoreAudio/AudioHttp/URLStream.h" +#include "AudioTools/CoreAudio/StreamCopy.h" +#include "AudioToolsConfig.h" + +#define MAX_HLS_LINE 512 +#define START_URLS_LIMIT 4 +#define HLS_BUFFER_COUNT 2 +#define HLS_MAX_NO_READ 2 +#define HLS_MAX_URL_LEN 256 +#define HLS_TIMEOUT 5000 +#define HLS_UNDER_OVERFLOW_WAIT_TIME 10 + +/// hide hls implementation in it's own namespace + +namespace audio_tools_hls { + +/*** + * @brief We feed the URLLoaderHLS with some url strings. The data of the + * related segments are provided via the readBytes() method. + * @author Phil Schatzmann + * @copyright GPLv3 + */ + +template +class URLLoaderHLS { + public: + URLLoaderHLS() = default; + + ~URLLoaderHLS() { end(); } + + bool begin() { + TRACED(); + buffer.resize(buffer_size * buffer_count); + + active = true; + return true; + } + + void end() { + TRACED(); + url_stream.end(); + buffer.clear(); + active = false; + } + + /// Adds the next url to be played in sequence + void addUrl(const char *url) { + LOGI("Adding %s", url); + StrView url_str(url); + char *str = new char[url_str.length() + 1]; + memcpy(str, url_str.c_str(), url_str.length() + 1); + urls.push_back((const char *)str); + } + + /// Provides the number of open urls which can be played. Refills them, when + /// min limit is reached. + int urlCount() { return urls.size(); } + + /// Available bytes of the audio stream + int available() { + if (!active) return 0; + TRACED(); + bufferRefill(); + + return buffer.available(); + } + + /// Provides data from the audio stream + size_t readBytes(uint8_t *data, size_t len) { + if (!active) return 0; + TRACED(); + bufferRefill(); + + if (buffer.available() < len) LOGW("Buffer underflow"); + return buffer.readArray(data, len); + } + + const char *contentType() { + return url_stream.httpRequest().reply().get(CONTENT_TYPE); + } + + int contentLength() { return url_stream.contentLength(); } + + void setBufferSize(int size, int count) { + buffer_size = size; + buffer_count = count; + // support call after begin()! + if (buffer.size() != 0) { + buffer.resize(buffer_size * buffer_count); + } + } + + void setCACert(const char *cert) { url_stream.setCACert(cert); } + + protected: + Vector urls{10}; + RingBuffer buffer{0}; + bool active = false; + int buffer_size = DEFAULT_BUFFER_SIZE; + int buffer_count = HLS_BUFFER_COUNT; + URLStream url_stream; + const char *url_to_play = nullptr; + + /// try to keep the buffer filled + void bufferRefill() { + TRACED(); + // we have nothing to do + if (urls.empty()) { + LOGD("urls empty"); + delay(HLS_UNDER_OVERFLOW_WAIT_TIME); + return; + } + if (buffer.availableForWrite() == 0) { + LOGD("buffer full"); + delay(HLS_UNDER_OVERFLOW_WAIT_TIME); + return; + } + + // switch current stream if we have no more data + if (!url_stream && !urls.empty()) { + LOGD("Refilling"); + if (url_to_play != nullptr) { + delete url_to_play; + } + url_to_play = urls[0]; + LOGI("playing %s", url_to_play); + url_stream.end(); + url_stream.setConnectionClose(true); + url_stream.setTimeout(HLS_TIMEOUT); + url_stream.begin(url_to_play); + url_stream.waitForData(HLS_TIMEOUT); + urls.pop_front(); + // assert(urls[0]!=url); + + LOGI("Playing %s of %d", url_stream.urlStr(), (int)urls.size()); + } + + int total = 0; + int failed = 0; + int to_write = min(buffer.availableForWrite(), DEFAULT_BUFFER_SIZE); + // try to keep the buffer filled + while (to_write > 0) { + uint8_t tmp[to_write]; + memset(tmp, 0, to_write); + int read = url_stream.readBytes(tmp, to_write); + total += read; + if (read > 0) { + failed = 0; + buffer.writeArray(tmp, read); + LOGD("buffer add %d -> %d:", read, buffer.available()); + + to_write = min(buffer.availableForWrite(), DEFAULT_BUFFER_SIZE); + } else { + delay(10); + } + // After we processed all data we close the stream to get a new url + if (url_stream.totalRead() == url_stream.contentLength()) { + LOGI("Closing stream because all bytes were processed: available: %d", + url_stream.available()); + url_stream.end(); + break; + } + LOGD("Refilled with %d now %d available to write", total, + buffer.availableForWrite()); + } + } +}; + +/** + * Prevent that the same url is loaded twice. We limit the history to + * 20 entries. + */ +class URLHistory { + public: + bool add(const char *url) { + if (url == nullptr) return true; + bool found = false; + StrView url_str(url); + for (int j = 0; j < history.size(); j++) { + if (url_str.equals(history[j])) { + found = true; + break; + } + } + if (!found) { + char *str = new char[url_str.length() + 1]; + memcpy(str, url, url_str.length() + 1); + history.push_back((const char *)str); + if (history.size() > 20) { + delete (history[0]); + history.pop_front(); + } + } + return !found; + } + + void clear() { history.clear(); } + + int size() { return history.size(); } + + protected: + Vector history; +}; + +/** + * @brief Simple Parser for HLS data. + * @author Phil Schatzmann + * @copyright GPLv3 + */ +template +class HLSParser { + public: + // loads the index url + bool begin(const char *urlStr) { + index_url_str = urlStr; + return begin(); + } + + bool begin() { + TRACEI(); + segments_url_str = ""; + bandwidth = 0; + total_read = 0; + + if (!parseIndex()) { + TRACEE(); + return false; + } + + // in some exceptional cases the index provided segement info + if (url_loader.urlCount() == 0) { + if (!parseSegments()) { + TRACEE(); + return false; + } + } else { + segments_url_str = index_url_str; + segmentsActivate(); + } + + if (!url_loader.begin()) { + TRACEE(); + return false; + } + + return true; + } + + int available() { + TRACED(); + int result = 0; + reloadSegments(); + + if (active) result = url_loader.available(); + return result; + } + + size_t readBytes(uint8_t *data, size_t len) { + TRACED(); + size_t result = 0; + reloadSegments(); + + if (active) result = url_loader.readBytes(data, len); + total_read += result; + return result; + } + + const char *indexUrl() { return index_url_str; } + + const char *segmentsUrl() { return segments_url_str.c_str(); } + + /// Provides the codec + const char *getCodec() { return codec.c_str(); } + + /// Provides the content type of the audio data + const char *contentType() { return url_loader.contentType(); } + + /// Provides the http content lengh + int contentLength() { return url_loader.contentLength(); } + + /// Closes the processing + void end() { + TRACEI(); + codec.clear(); + segments_url_str.clear(); + url_stream.end(); + url_loader.end(); + url_history.clear(); + active = false; + } + + /// Defines the number of urls that are preloaded in the URLLoaderHLS + void setUrlCount(int count) { url_count = count; } + + /// Redefines the buffer size + void setBufferSize(int size, int count) { + url_loader.setBufferSize(size, count); + } + + void setCACert(const char *cert) { + url_stream.setCACert(cert); + url_loader.setCACert(cert); + } + + void setPowerSave(bool flag) { url_stream.setPowerSave(flag); } + + void setURLResolver(const char *(*cb)(const char *segment, + const char *reqURL)) { + resolve_url = cb; + } + /// Provides the hls url as string + const char *urlStr() { return url_str.c_str(); } + + /// Povides the number of bytes read + size_t totalRead() { return total_read; }; + + protected: + enum class URLType { Undefined, Index, Segment }; + URLType next_url_type = URLType::Undefined; + int bandwidth = 0; + int url_count = 5; + size_t total_read = 0; + bool url_active = false; + bool is_extm3u = false; + Str codec; + Str segments_url_str; + Str url_str; + const char *index_url_str = nullptr; + URLStream url_stream; + URLLoaderHLS url_loader; + URLHistory url_history; + bool active = false; + bool parse_segments_active = false; + int media_sequence = 0; + int segment_count = 0; + uint64_t next_sement_load_time_planned = 0; + float play_time = 0; + uint64_t next_sement_load_time = 0; + const char *(*resolve_url)(const char *segment, + const char *reqURL) = resolveURL; + + /// Default implementation for url resolver: determine absolue url from + /// relative url + static const char *resolveURL(const char *segment, const char *reqURL) { + // avoid dynamic memory allocation + static char result[HLS_MAX_URL_LEN] = {0}; + StrView result_str(result, HLS_MAX_URL_LEN); + StrView index_url(reqURL); + // Use prefix up to ? or laast / + int end = index_url.lastIndexOf("?"); + if (end >= 0) { + result_str.substring(reqURL, 0, end); + } else { + end = index_url.lastIndexOf("/"); + if (end >= 0) { + result_str.substring(reqURL, 0, end); + } + } + // Use the full url + if (result_str.isEmpty()) { + result_str = reqURL; + } + // add trailing / + if (!result_str.endsWith("/")) { + result_str.add("/"); + } + // add relative segment + result_str.add(segment); + LOGI(">> relative addr: %s for %s", segment, reqURL); + LOGD(">> -> %s", result); + return result; + } + + /// trigger the reloading of segments if the limit is underflowing + void reloadSegments() { + TRACED(); + // get new urls + if (!segments_url_str.isEmpty()) { + parseSegments(); + } + } + + /// parse the index file and the segments + bool parseIndex() { + TRACED(); + url_stream.end(); + url_stream.setTimeout(HLS_TIMEOUT); + url_stream.setConnectionClose(true); + if (!url_stream.begin(index_url_str)) return false; + url_active = true; + return parseIndexLines(); + } + + /// parse the index file + bool parseIndexLines() { + TRACEI(); + char tmp[MAX_HLS_LINE]; + bool result = true; + is_extm3u = false; + + // parse lines + memset(tmp, 0, MAX_HLS_LINE); + while (true) { + memset(tmp, 0, MAX_HLS_LINE); + size_t len = + url_stream.httpRequest().readBytesUntil('\n', tmp, MAX_HLS_LINE); + // stop when there is no more data + if (len == 0 && url_stream.available() == 0) break; + StrView str(tmp); + + // check header + if (str.startsWith("#EXTM3U")) { + is_extm3u = true; + // reset timings + resetTimings(); + } + + if (is_extm3u) { + if (!parseIndexLine(str)) { + return false; + } + } + } + return result; + } + + /// Determine codec for min bandwidth + bool parseIndexLine(StrView &str) { + TRACED(); + LOGI("> %s", str.c_str()); + parseIndexLineMetaData(str); + // in some exceptional cases the index provided segement info + parseSegmentLineMetaData(str); + parseLineURL(str); + return true; + } + + bool parseIndexLineMetaData(StrView &str) { + int tmp_bandwidth; + if (str.startsWith("#")) { + if (str.indexOf("EXT-X-STREAM-INF") >= 0) { + next_url_type = URLType::Index; + // determine min bandwidth + int pos = str.indexOf("BANDWIDTH="); + if (pos > 0) { + StrView num(str.c_str() + pos + 10); + tmp_bandwidth = num.toInt(); + url_active = (tmp_bandwidth < bandwidth || bandwidth == 0); + if (url_active) { + bandwidth = tmp_bandwidth; + LOGD("-> bandwith: %d", bandwidth); + } + } + + pos = str.indexOf("CODECS="); + if (pos > 0) { + int start = pos + 8; + int end = str.indexOf('"', pos + 10); + codec.substring(str, start, end); + LOGI("-> codec: %s", codec.c_str()); + } + } + } + return true; + } + + void resetTimings() { + next_sement_load_time_planned = millis(); + play_time = 0; + next_sement_load_time = 0xFFFFFFFFFFFFFFFF; + } + + /// parse the segment url provided by the index + bool parseSegments() { + TRACED(); + if (parse_segments_active) { + return false; + } + + // make sure that we load at relevant schedule + if (millis() < next_sement_load_time && url_loader.urlCount() > 1) { + delay(1); + return false; + } + parse_segments_active = true; + + LOGI("Available urls: %d", url_loader.urlCount()); + + if (url_stream) url_stream.clear(); + LOGI("parsing %s", segments_url_str.c_str()); + + if (segments_url_str.isEmpty()) { + TRACEE(); + parse_segments_active = false; + return false; + } + + if (!url_stream.begin(segments_url_str.c_str())) { + TRACEE(); + parse_segments_active = false; + return false; + } + + segment_count = 0; + if (!parseSegmentLines()) { + TRACEE(); + parse_segments_active = false; + // do not display as error + return true; + } + + segmentsActivate(); + return true; + } + + void segmentsActivate() { + LOGI("Reloading in %f sec", play_time / 1000.0); + if (play_time > 0) { + next_sement_load_time = next_sement_load_time_planned + play_time; + } + + // we request a minimum of collected urls to play before we start + if (url_history.size() > START_URLS_LIMIT) active = true; + parse_segments_active = false; + } + + /// parse the segments + bool parseSegmentLines() { + TRACEI(); + char tmp[MAX_HLS_LINE]; + bool result = true; + is_extm3u = false; + + // parse lines + memset(tmp, 0, MAX_HLS_LINE); + while (true) { + memset(tmp, 0, MAX_HLS_LINE); + size_t len = + url_stream.httpRequest().readBytesUntil('\n', tmp, MAX_HLS_LINE); + if (len == 0 && url_stream.available() == 0) break; + StrView str(tmp); + + // check header + if (str.startsWith("#EXTM3U")) { + is_extm3u = true; + resetTimings(); + } + + if (is_extm3u) { + if (!parseSegmentLine(str)) { + return false; + } + } + } + return result; + } + + /// Add all segments to queue + bool parseSegmentLine(StrView &str) { + TRACED(); + LOGI("> %s", str.c_str()); + if (!parseSegmentLineMetaData(str)) return false; + parseLineURL(str); + return true; + } + + bool parseSegmentLineMetaData(StrView &str) { + if (str.startsWith("#")) { + if (str.startsWith("#EXT-X-MEDIA-SEQUENCE:")) { + int new_media_sequence = atoi(str.c_str() + 22); + LOGI("media_sequence: %d", new_media_sequence); + if (new_media_sequence == media_sequence) { + LOGW("MEDIA-SEQUENCE already loaded: %d", media_sequence); + return false; + } + media_sequence = new_media_sequence; + } + + // add play time to next_sement_load_time_planned + if (str.startsWith("#EXTINF")) { + next_url_type = URLType::Segment; + StrView sec_str(str.c_str() + 8); + float sec = sec_str.toFloat(); + LOGI("adding play time: %f sec", sec); + play_time += (sec * 1000.0); + } + } + return true; + } + + bool parseLineURL(StrView &str) { + if (!str.startsWith("#")) { + switch (next_url_type) { + case URLType::Undefined: + // we should not get here + assert(false); + break; + case URLType::Index: + if (str.startsWith("http")) { + segments_url_str.set(str); + } else { + segments_url_str.set(resolve_url(str.c_str(), index_url_str)); + } + LOGD("segments_url_str = %s", segments_url_str.c_str()); + break; + case URLType::Segment: + segment_count++; + if (url_history.add(str.c_str())) { + // provide audio urls to the url_loader + if (str.startsWith("http")) { + url_str = str; + } else { + // we create the complete url + url_str = resolve_url(str.c_str(), index_url_str); + } + url_loader.addUrl(url_str.c_str()); + } else { + LOGD("Duplicate ignored: %s", str.c_str()); + } + } + // clear url type + next_url_type = URLType::Undefined; + } + return true; + } +}; + +} // namespace audio_tools_hls + +namespace audio_tools { +/** + * @brief HTTP Live Streaming using HLS: The resulting .ts data is provided + * via readBytes() that dynamically reload new Segments. Please note that + * this reloading adds a considerable delay: So if you want to play back the + * audio, you should buffer the content in a seaparate task. + * + * @author Phil Schatzmann + * @ingroup http *@copyright GPLv3 + */ + +template +class HLSStreamT : public AbstractURLStream { + public: + /// Empty constructor + HLSStreamT() = default; + + /// Convenience constructor which logs in to the WiFi + HLSStreamT(const char *ssid, const char *password) { + setSSID(ssid); + setPassword(password); + } + + /// Open an HLS url + bool begin(const char *urlStr) { + TRACEI(); + login(); + // parse the url to the HLS + bool rc = parser.begin(urlStr); + return rc; + } + + /// Reopens the last url + bool begin() override { + TRACEI(); + login(); + bool rc = parser.begin(); + return rc; + } + + /// ends the request + void end() override { parser.end(); } + + /// Sets the ssid that will be used for logging in (when calling begin) + void setSSID(const char *ssid) override { this->ssid = ssid; } + + /// Sets the password that will be used for logging in (when calling begin) + void setPassword(const char *password) override { this->password = password; } + + /// Returns the string representation of the codec of the audio stream + const char *codec() { return parser.getCodec(); } + + /// Provides the content type from the http reply + const char *contentType() { return parser.contentType(); } + + /// Provides the content length of the actual .ts Segment + int contentLength() override { return parser.contentLength(); } + + /// Provides number of available bytes in the read buffer + int available() override { + TRACED(); + return parser.available(); + } + + /// Provides the data fro the next .ts Segment + size_t readBytes(uint8_t *data, size_t len) override { + TRACED(); + return parser.readBytes(data, len); + } + + /// Redefines the read buffer size + void setBufferSize(int size, int count) { parser.setBufferSize(size, count); } + + /// Defines the certificate + void setCACert(const char *cert) override { parser.setCACert(cert); } + + /// Changes the Wifi to power saving mode + void setPowerSave(bool flag) override { parser.setPowerSave(flag); } + + /// Custom logic to provide the codec as Content-Type to support the + /// MultiCodec + const char *getReplyHeader(const char *header) override { + const char *codec = parser.getCodec(); + const char *result = nullptr; + if (StrView(header).equalsIgnoreCase(CONTENT_TYPE)) { + result = parser.contentType(); + } + if (result) LOGI("-> Format: %s", result); + return result; + } + + /// The resolving of relative addresses can be quite tricky: you can provide + /// your custom resolver implementation + void setURLResolver(const char *(*cb)(const char *segment, + const char *reqURL)) { + parser.setURLResolver(cb); + } + + const char *urlStr() override { return parser.urlStr(); } + + size_t totalRead() override { return parser.totalRead(); }; + /// not implemented + void setConnectionClose(bool flag) override {}; + /// not implemented + bool waitForData(int timeout) override { return false; } + + protected: + audio_tools_hls::HLSParser parser; + const char *ssid = nullptr; + const char *password = nullptr; + + void login() { +#ifdef USE_WIFI + if (ssid != nullptr && password != nullptr && + WiFi.status() != WL_CONNECTED) { + TRACED(); + delay(1000); + WiFi.begin(ssid, password); + while (WiFi.status() != WL_CONNECTED) { + Serial.print("."); + delay(500); + } + } +#else + LOGW("login not supported"); +#endif + } + + /// Added to comply with AbstractURLStream + bool begin(const char *urlStr, const char *acceptMime, MethodID action = GET, + const char *reqMime = "", const char *reqData = "") override { + return begin(urlStr); + } + + HttpRequest &httpRequest() override { + static HttpRequest dummy; + return dummy; + } + + /// Not implemented: potential future improvement + void setClient(Client &clientPar) override {} + + /// Not implemented + void addRequestHeader(const char *header, const char *value) override {} +}; + +using HLSStream = HLSStreamT; + +} // namespace audio_tools diff --git a/src/AudioTools/AudioLibs/HLSStreamESP32.h b/src/AudioTools/Communication/HLSStreamESP32.h similarity index 100% rename from src/AudioTools/AudioLibs/HLSStreamESP32.h rename to src/AudioTools/Communication/HLSStreamESP32.h diff --git a/src/AudioTools/Communication/README.md b/src/AudioTools/Communication/README.md index 97e4b99ed1..d677dd4146 100644 --- a/src/AudioTools/Communication/README.md +++ b/src/AudioTools/Communication/README.md @@ -1,2 +1,2 @@ -Different classes to send and receive audio over the wire \ No newline at end of file +Different optional classes to send and receive audio over the wire or air \ No newline at end of file diff --git a/src/AudioTools/Communication/RTSP.h b/src/AudioTools/Communication/RTSP.h index a0eb7c2cf6..3c6833dcfe 100644 --- a/src/AudioTools/Communication/RTSP.h +++ b/src/AudioTools/Communication/RTSP.h @@ -1,5 +1,13 @@ #pragma once +/** + * @defgroup rtsp RTSP Streaming + * @ingroup communications + * @file RTSP.h + * @author Phil Schatzmann + * @copyright GPLv3 + */ + #include "AudioTools/CoreAudio/AudioPlayer.h" #include "AudioTools/CoreAudio/AudioStreams.h" #include "RTSP/IAudioSource.h" @@ -8,6 +16,8 @@ #include "RTSP/RTSPFormat.h" #include "RTSP/RTSPOutput.h" #include "RTSP/RTSPAudioStreamer.h" +#include "RTSP/RTSPClient.h" #ifdef ESP32 #include "RTSP/RTSPPlatformWiFi.h" +#include "RTSP/RTSPClientWiFi.h" #endif \ No newline at end of file diff --git a/src/AudioTools/Communication/RTSP/IAudioSource.h b/src/AudioTools/Communication/RTSP/IAudioSource.h index 5c2d6080f3..c48896d9b7 100644 --- a/src/AudioTools/Communication/RTSP/IAudioSource.h +++ b/src/AudioTools/Communication/RTSP/IAudioSource.h @@ -18,7 +18,6 @@ namespace audio_tools { /** * @brief Audio Source Interface - Contract for Audio Data Providers * - * @version 0.1.1 */ class IAudioSource { public: diff --git a/src/AudioTools/Communication/RTSPClient555.h b/src/AudioTools/Communication/RTSPClient555.h new file mode 100644 index 0000000000..27517807a1 --- /dev/null +++ b/src/AudioTools/Communication/RTSPClient555.h @@ -0,0 +1,721 @@ + +#pragma once + +/** +This library is free software; you can redistribute it and/or modify it under +the terms of the GNU Lesser General Public License as published by the +Free Software Foundation; either version 3 of the License, or (at your +option) any later version. (See .) + +This library is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for +more details. + +You should have received a copy of the GNU Lesser General Public License +along with this library; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +**/ + +// Copyright (c) 1996-2023, Live Networks, Inc. All rights reserved +// A demo application, showing how to create and run a RTSP client (that can +// potentially receive multiple streams concurrently). +// + +#include "AudioLogger.h" +#include "Print.h" // Arduino Print +// include live555 +#include "BasicUsageEnvironment.hh" +//#include "liveMedia.hh" +#include "RTSPClient.hh" + +// By default, we request that the server stream its data using RTP/UDP. +// If, instead, you want to request that the server stream via RTP-over-TCP, +// change the following to True: +#define REQUEST_STREAMING_OVER_TCP false + +// by default, print verbose output from each "RTSPClient" +#define RTSP_CLIENT_VERBOSITY_LEVEL 1 +// Even though we're not going to be doing anything with the incoming data, we +// still need to receive it. Define the size of the buffer that we'll use: +#define RTSP_SINK_BUFFER_SIZE 1024 + +// If you don't want to see debugging output for each received frame, then +// comment out the following line: +#undef DEBUG_PRINT_EACH_RECEIVED_FRAME +#define DEBUG_PRINT_EACH_RECEIVED_FRAME 0 + +/// @brief AudioTools internal: rtsp +namespace audiotools_rtsp { + +class OurRTSPClient; +// The main streaming routine (or each "rtsp://" URL): +OurRTSPClient * openURL(UsageEnvironment& env, char const* progName, char const* rtspURL); +// Counts how many streams (i.e., "RTSPClient"s) are currently in use. +static unsigned rtspClientCount = 0; +static char rtspEventLoopWatchVariable = 0; +static Print* rtspOutput = nullptr; +static uint32_t rtspSinkReceiveBufferSize = 0; +static bool rtspUseTCP = REQUEST_STREAMING_OVER_TCP; + +} // namespace audiotools_rtsp + +namespace audio_tools { + +/** + * @brief A simple RTSPClient using https://github.com/pschatzmann/arduino-live555 + * @ingroup communications + * @author Phil Schatzmann + * @copyright GPLv3 +*/ +class AudioClientRTSP { + public: + AudioClientRTSP(uint32_t receiveBufferSize = RTSP_SINK_BUFFER_SIZE, bool useTCP=REQUEST_STREAMING_OVER_TCP, bool blocking = false) { + setBufferSize(receiveBufferSize); + useTCP ? setTCP() : setUDP(); + setBlocking(blocking); + } + + void setBufferSize(int size){ + audiotools_rtsp::rtspSinkReceiveBufferSize = size; + } + + void setTCP(){ + audiotools_rtsp::rtspUseTCP = true; + } + + void setUDP(){ + audiotools_rtsp::rtspUseTCP = false; + } + + void setBlocking(bool flag){ + is_blocking = flag; + } + + /// login to wifi: optional convinience method. You can also just start Wifi the normal way + void setLogin(const char* ssid, const char* password){ + this->ssid = ssid; + this->password = password; + } + + /// Starts the processing + bool begin(const char* url, Print &out) { + audiotools_rtsp::rtspOutput = &out; + if (url==nullptr) { + return false; + } + if (!login()){ + LOGE("wifi down"); + return false; + } + // Begin by setting up our usage environment: + scheduler = BasicTaskScheduler::createNew(); + env = BasicUsageEnvironment::createNew(*scheduler); + + // There are argc-1 URLs: argv[1] through argv[argc-1]. Open and start + // streaming each one: + rtsp_client = audiotools_rtsp::openURL(*env, "RTSPClient", url); + + // All subsequent activity takes place within the event loop: + if (is_blocking) env->taskScheduler().doEventLoop(&audiotools_rtsp::rtspEventLoopWatchVariable); + // This function call does not return, unless, at some point in time, + // "rtspEventLoopWatchVariable" gets set to something non-zero. + + return true; + } + + /// to be called in Arduino loop when blocking = false + void loop() { + if (audiotools_rtsp::rtspEventLoopWatchVariable==0) scheduler->SingleStep(); + } + + void end() { + audiotools_rtsp::rtspEventLoopWatchVariable = 1; + env->reclaim(); + env = NULL; + delete scheduler; + scheduler = NULL; + bool is_blocking = false; + } + + audiotools_rtsp::OurRTSPClient *client() { + return rtsp_client; + } + + protected: + audiotools_rtsp::OurRTSPClient* rtsp_client; + UsageEnvironment* env=nullptr; + BasicTaskScheduler* scheduler=nullptr; + const char* ssid=nullptr; + const char* password = nullptr; + bool is_blocking = false; + + /// login to wifi: optional convinience method. You can also just start Wifi the normal way + bool login(){ + if(WiFi.status() != WL_CONNECTED && ssid!=nullptr && password!=nullptr){ + WiFi.mode(WIFI_STA); + WiFi.begin(ssid, password); + while(WiFi.status() != WL_CONNECTED){ + Serial.print("."); + delay(100); + } + Serial.println(); + Serial.print("Local Address: "); + Serial.println(WiFi.localIP()); + } + return WiFi.status() == WL_CONNECTED; + } + + +}; + +} // namespace audio_tools + +namespace audiotools_rtsp { +// Define a class to hold per-stream state that we maintain throughout each +// stream's lifetime: + +// Forward function definitions: + +// RTSP 'response handlers': +void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, + char* resultString); +void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, + char* resultString); +void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, + char* resultString); + +// Other event handler functions: +void subsessionAfterPlaying( + void* clientData); // called when a stream's subsession (e.g., audio or + // video substream) ends +void subsessionByeHandler(void* clientData, char const* reason); +// called when a RTCP "BYE" is received for a subsession +void streamTimerHandler(void* clientData); +// called at the end of a stream's expected duration (if the stream has not +// already signaled its end using a RTCP "BYE") + +// Used to iterate through each stream's 'subsessions', setting up each one: +void setupNextSubsession(RTSPClient* rtspClient); + +// Used to shut down and close a stream (including its "RTSPClient" object): +void shutdownStream(RTSPClient* rtspClient, int exitCode = 1); + +// A function that outputs a string that identifies each stream (for debugging +// output). Modify this if you wish: +UsageEnvironment& operator<<(UsageEnvironment& env, + const RTSPClient& rtspClient) { + return env << "[URL:\"" << rtspClient.url() << "\"]: "; +} + +// A function that outputs a string that identifies each subsession (for +// debugging output). Modify this if you wish: +UsageEnvironment& operator<<(UsageEnvironment& env, + const MediaSubsession& subsession) { + return env << subsession.mediumName() << "/" << subsession.codecName(); +} + +class StreamClientState { + public: + StreamClientState(); + virtual ~StreamClientState(); + + public: + MediaSubsessionIterator* iter; + MediaSession* session; + MediaSubsession* subsession; + TaskToken streamTimerTask; + double duration; +}; + +// If you're streaming just a single stream (i.e., just from a single URL, +// once), then you can define and use just a single "StreamClientState" +// structure, as a global variable in your application. However, because - in +// this demo application - we're showing how to play multiple streams, +// concurrently, we can't do that. Instead, we have to have a separate +// "StreamClientState" structure for each "RTSPClient". To do this, we subclass +// "RTSPClient", and add a "StreamClientState" field to the subclass: + +class OurRTSPClient : public RTSPClient { + public: + static OurRTSPClient* createNew(UsageEnvironment& env, char const* rtspURL, + int verbosityLevel = 0, + char const* applicationName = NULL, + portNumBits tunnelOverHTTPPortNum = 0); + + protected: + OurRTSPClient(UsageEnvironment& env, char const* rtspURL, int verbosityLevel, + char const* applicationName, portNumBits tunnelOverHTTPPortNum); + // called only by createNew(); + virtual ~OurRTSPClient(); + + public: + StreamClientState scs; +}; + +// Define a data sink (a subclass of "MediaSink") to receive the data for each +// subsession (i.e., each audio or video 'substream'). In practice, this might +// be a class (or a chain of classes) that decodes and then renders the incoming +// audio or video. Or it might be a "FileSink", for outputting the received data +// into a file (as is done by the "openRTSP" application). In this example code, +// however, we define a simple 'dummy' sink that receives incoming data, but +// does nothing with it. + +class OurSink : public MediaSink { + public: + static OurSink* createNew( + UsageEnvironment& env, + MediaSubsession& + subsession, // identifies the kind of data that's being received + char const* streamId = NULL); // identifies the stream itself (optional) + + private: + OurSink(UsageEnvironment& env, MediaSubsession& subsession, + char const* streamId); + // called only by "createNew()" + virtual ~OurSink(); + + static void afterGettingFrame(void* clientData, unsigned frameSize, + unsigned numTruncatedBytes, + struct timeval presentationTime, + unsigned durationInMicroseconds); + void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, + struct timeval presentationTime, + unsigned durationInMicroseconds); + + private: + // redefined virtual functions: + virtual Boolean continuePlaying(); + + private: + u_int8_t* fReceiveBuffer; + MediaSubsession& fSubsession; + char* fStreamId; +}; + +OurRTSPClient* openURL(UsageEnvironment& env, char const* progName, char const* rtspURL) { + // Begin by creating a "RTSPClient" object. Note that there is a separate + // "RTSPClient" object for each stream that we wish to receive (even if more + // than stream uses the same "rtsp://" URL). + OurRTSPClient* rtspClient = OurRTSPClient::createNew( + env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL, progName); + if (rtspClient == NULL) { + env << "Failed to create a RTSP client for URL \"" << rtspURL + << "\": " << env.getResultMsg() << "\n"; + return nullptr; + } + + ++rtspClientCount; + + // Next, send a RTSP "DESCRIBE" command, to get a SDP description for the + // stream. Note that this command - like all RTSP commands - is sent + // asynchronously; we do not block, waiting for a response. Instead, the + // following function call returns immediately, and we handle the RTSP + // response later, from within the event loop: + rtspClient->sendDescribeCommand(continueAfterDESCRIBE); + return rtspClient; +} + +// Implementation of the RTSP 'response handlers': + +void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, + char* resultString) { + do { + UsageEnvironment& env = rtspClient->envir(); // alias + StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias + + if (resultCode != 0) { + env << *rtspClient << "Failed to get a SDP description: " << resultString + << "\n"; + delete[] resultString; + break; + } + + char* const sdpDescription = resultString; + env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n"; + + // Create a media session object from this SDP description: + scs.session = MediaSession::createNew(env, sdpDescription); + delete[] sdpDescription; // because we don't need it anymore + if (scs.session == NULL) { + env << *rtspClient + << "Failed to create a MediaSession object from the SDP description: " + << env.getResultMsg() << "\n"; + break; + } else if (!scs.session->hasSubsessions()) { + env << *rtspClient + << "This session has no media subsessions (i.e., no \"m=\" lines)\n"; + break; + } + + // Then, create and set up our data source objects for the session. We do + // this by iterating over the session's 'subsessions', calling + // "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, + // on each one. (Each 'subsession' will have its own data source.) + scs.iter = new MediaSubsessionIterator(*scs.session); + setupNextSubsession(rtspClient); + return; + } while (0); + + // An unrecoverable error occurred with this stream. + shutdownStream(rtspClient); +} + +void setupNextSubsession(RTSPClient* rtspClient) { + UsageEnvironment& env = rtspClient->envir(); // alias + StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias + + scs.subsession = scs.iter->next(); + if (scs.subsession != NULL) { + if (!scs.subsession->initiate()) { + env << *rtspClient << "Failed to initiate the \"" << *scs.subsession + << "\" subsession: " << env.getResultMsg() << "\n"; + setupNextSubsession( + rtspClient); // give up on this subsession; go to the next one + } else { + env << *rtspClient << "Initiated the \"" << *scs.subsession + << "\" subsession ("; + if (scs.subsession->rtcpIsMuxed()) { + env << "client port " << scs.subsession->clientPortNum(); + } else { + env << "client ports " << scs.subsession->clientPortNum() << "-" + << scs.subsession->clientPortNum() + 1; + } + env << ")\n"; + + // Continue setting up this subsession, by sending a RTSP "SETUP" command: + rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, + rtspUseTCP); + } + return; + } + + // We've finished setting up all of the subsessions. Now, send a RTSP "PLAY" + // command to start the streaming: + if (scs.session->absStartTime() != NULL) { + // Special case: The stream is indexed by 'absolute' time, so send an + // appropriate "PLAY" command: + rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, + scs.session->absStartTime(), + scs.session->absEndTime()); + } else { + scs.duration = scs.session->playEndTime() - scs.session->playStartTime(); + rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY); + } +} + +void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, + char* resultString) { + do { + UsageEnvironment& env = rtspClient->envir(); // alias + StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias + + if (resultCode != 0) { + env << *rtspClient << "Failed to set up the \"" << *scs.subsession + << "\" subsession: " << resultString << "\n"; + break; + } + + env << *rtspClient << "Set up the \"" << *scs.subsession + << "\" subsession ("; + if (scs.subsession->rtcpIsMuxed()) { + env << "client port " << scs.subsession->clientPortNum(); + } else { + env << "client ports " << scs.subsession->clientPortNum() << "-" + << scs.subsession->clientPortNum() + 1; + } + env << ")\n"; + + // Having successfully setup the subsession, create a data sink for it, and + // call "startPlaying()" on it. (This will prepare the data sink to receive + // data; the actual flow of data from the client won't start happening until + // later, after we've sent a RTSP "PLAY" command.) + + scs.subsession->sink = + OurSink::createNew(env, *scs.subsession, rtspClient->url()); + // perhaps use your own custom "MediaSink" subclass instead + if (scs.subsession->sink == NULL) { + env << *rtspClient << "Failed to create a data sink for the \"" + << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n"; + break; + } + + env << *rtspClient << "Created a data sink for the \"" << *scs.subsession + << "\" subsession\n"; + scs.subsession->miscPtr = + rtspClient; // a hack to let subsession handler functions get the + // "RTSPClient" from the subsession + scs.subsession->sink->startPlaying(*(scs.subsession->readSource()), + subsessionAfterPlaying, scs.subsession); + // Also set a handler to be called if a RTCP "BYE" arrives for this + // subsession: + if (scs.subsession->rtcpInstance() != NULL) { + scs.subsession->rtcpInstance()->setByeWithReasonHandler( + subsessionByeHandler, scs.subsession); + } + } while (0); + delete[] resultString; + + // Set up the next subsession, if any: + setupNextSubsession(rtspClient); +} + +void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, + char* resultString) { + Boolean success = False; + + do { + UsageEnvironment& env = rtspClient->envir(); // alias + StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias + + if (resultCode != 0) { + env << *rtspClient << "Failed to start playing session: " << resultString + << "\n"; + break; + } + + // Set a timer to be handled at the end of the stream's expected duration + // (if the stream does not already signal its end using a RTCP "BYE"). This + // is optional. If, instead, you want to keep the stream active - e.g., so + // you can later 'seek' back within it and do another RTSP "PLAY" - then you + // can omit this code. (Alternatively, if you don't want to receive the + // entire stream, you could set this timer for some shorter value.) + if (scs.duration > 0) { + unsigned const delaySlop = + 2; // number of seconds extra to delay, after the stream's expected + // duration. (This is optional.) + scs.duration += delaySlop; + unsigned uSecsToDelay = (unsigned)(scs.duration * 1000000); + scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask( + uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient); + } + + env << *rtspClient << "Started playing session"; + if (scs.duration > 0) { + env << " (for up to " << scs.duration << " seconds)"; + } + env << "...\n"; + + success = True; + } while (0); + delete[] resultString; + + if (!success) { + // An unrecoverable error occurred with this stream. + shutdownStream(rtspClient); + } +} + +// Implementation of the other event handlers: + +void subsessionAfterPlaying(void* clientData) { + MediaSubsession* subsession = (MediaSubsession*)clientData; + RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr); + + // Begin by closing this subsession's stream: + Medium::close(subsession->sink); + subsession->sink = NULL; + + // Next, check whether *all* subsessions' streams have now been closed: + MediaSession& session = subsession->parentSession(); + MediaSubsessionIterator iter(session); + while ((subsession = iter.next()) != NULL) { + if (subsession->sink != NULL) return; // this subsession is still active + } + + // All subsessions' streams have now been closed, so shutdown the client: + shutdownStream(rtspClient); +} + +void subsessionByeHandler(void* clientData, char const* reason) { + MediaSubsession* subsession = (MediaSubsession*)clientData; + RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr; + UsageEnvironment& env = rtspClient->envir(); // alias + + env << *rtspClient << "Received RTCP \"BYE\""; + if (reason != NULL) { + env << " (reason:\"" << reason << "\")"; + delete[] (char*)reason; + } + env << " on \"" << *subsession << "\" subsession\n"; + + // Now act as if the subsession had closed: + subsessionAfterPlaying(subsession); +} + +void streamTimerHandler(void* clientData) { + OurRTSPClient* rtspClient = (OurRTSPClient*)clientData; + StreamClientState& scs = rtspClient->scs; // alias + + scs.streamTimerTask = NULL; + + // Shut down the stream: + shutdownStream(rtspClient); +} + +void shutdownStream(RTSPClient* rtspClient, int exitCode) { + UsageEnvironment& env = rtspClient->envir(); // alias + StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias + + // First, check whether any subsessions have still to be closed: + if (scs.session != NULL) { + Boolean someSubsessionsWereActive = False; + MediaSubsessionIterator iter(*scs.session); + MediaSubsession* subsession; + + while ((subsession = iter.next()) != NULL) { + if (subsession->sink != NULL) { + Medium::close(subsession->sink); + subsession->sink = NULL; + + if (subsession->rtcpInstance() != NULL) { + subsession->rtcpInstance()->setByeHandler( + NULL, NULL); // in case the server sends a RTCP "BYE" while + // handling "TEARDOWN" + } + + someSubsessionsWereActive = True; + } + } + + if (someSubsessionsWereActive) { + // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the + // stream. Don't bother handling the response to the "TEARDOWN". + rtspClient->sendTeardownCommand(*scs.session, NULL); + } + } + + env << *rtspClient << "Closing the stream.\n"; + Medium::close(rtspClient); + // Note that this will also cause this stream's "StreamClientState" structure + // to get reclaimed. + + if (--rtspClientCount == 0) { + // The final stream has ended, so exit the application now. + // (Of course, if you're embedding this code into your own application, you + // might want to comment this out, and replace it with + // "rtspEventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, + // and continue running "main()".) + // exit(exitCode); + rtspEventLoopWatchVariable = 1; + return; + } +} + +// Implementation of "OurRTSPClient": + +OurRTSPClient* OurRTSPClient::createNew(UsageEnvironment& env, + char const* rtspURL, int verbosityLevel, + char const* applicationName, + portNumBits tunnelOverHTTPPortNum) { + return new OurRTSPClient(env, rtspURL, verbosityLevel, applicationName, + tunnelOverHTTPPortNum); +} + +OurRTSPClient::OurRTSPClient(UsageEnvironment& env, char const* rtspURL, + int verbosityLevel, char const* applicationName, + portNumBits tunnelOverHTTPPortNum) + : RTSPClient(env, rtspURL, verbosityLevel, applicationName, + tunnelOverHTTPPortNum, -1) {} + +OurRTSPClient::~OurRTSPClient() {} + +// Implementation of "StreamClientState": + +StreamClientState::StreamClientState() + : iter(NULL), + session(NULL), + subsession(NULL), + streamTimerTask(NULL), + duration(0.0) {} + +StreamClientState::~StreamClientState() { + delete iter; + if (session != NULL) { + // We also need to delete "session", and unschedule "streamTimerTask" (if + // set) + UsageEnvironment& env = session->envir(); // alias + + env.taskScheduler().unscheduleDelayedTask(streamTimerTask); + Medium::close(session); + } +} + +// Implementation of "OurSink": + +OurSink* OurSink::createNew(UsageEnvironment& env, + MediaSubsession& subsession, + char const* streamId) { + return new OurSink(env, subsession, streamId); +} + +OurSink::OurSink(UsageEnvironment& env, MediaSubsession& subsession, + char const* streamId) + : MediaSink(env), fSubsession(subsession) { + fStreamId = strDup(streamId); + fReceiveBuffer = new u_int8_t[rtspSinkReceiveBufferSize]; +} + +OurSink::~OurSink() { + delete[] fReceiveBuffer; + delete[] fStreamId; +} + +void OurSink::afterGettingFrame(void* clientData, unsigned frameSize, + unsigned numTruncatedBytes, + struct timeval presentationTime, + unsigned durationInMicroseconds) { + OurSink* sink = (OurSink*)clientData; + sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, + durationInMicroseconds); +} + +void OurSink::afterGettingFrame(unsigned frameSize, + unsigned numTruncatedBytes, + struct timeval presentationTime, + unsigned /*durationInMicroseconds*/) { + // We've just received a frame of data. (Optionally) print out information + // about it: +#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME + if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; "; + envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() + << ":\tReceived " << frameSize << " bytes"; + if (numTruncatedBytes > 0) + envir() << " (with " << numTruncatedBytes << " bytes truncated)"; + char uSecsStr[6 + 1]; // used to output the 'microseconds' part of the + // presentation time + snprintf(uSecsStr,7 , "%06u", (unsigned)presentationTime.tv_usec); + envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." + << uSecsStr; + if (fSubsession.rtpSource() != NULL && + !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) { + envir() << "!"; // mark the debugging output to indicate that this + // presentation time is not RTCP-synchronized + } +#ifdef DEBUG_PRINT_NPT + envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime); +#endif + envir() << "\n"; +#endif + + // Decode the data + if (rtspOutput) { + size_t writtenSize = rtspOutput->write(fReceiveBuffer, frameSize); + assert(writtenSize == frameSize); + } + + // Then continue, to request the next frame of data: + continuePlaying(); +} + +Boolean OurSink::continuePlaying() { + if (fSource == NULL) return False; // sanity check (should not happen) + + // Request the next frame of data from our input source. "afterGettingFrame()" + // will get called later, when it arrives: + fSource->getNextFrame(fReceiveBuffer, rtspSinkReceiveBufferSize, + afterGettingFrame, this, onSourceClosure, this); + return True; +} + +} // namespace audiotools_rtsp \ No newline at end of file diff --git a/src/AudioTools/AudioLibs/vban/vban.h b/src/AudioTools/Communication/VBAN/vban.h similarity index 100% rename from src/AudioTools/AudioLibs/vban/vban.h rename to src/AudioTools/Communication/VBAN/vban.h diff --git a/src/AudioTools/Communication/VBANStream.h b/src/AudioTools/Communication/VBANStream.h new file mode 100644 index 0000000000..b1637d022e --- /dev/null +++ b/src/AudioTools/Communication/VBANStream.h @@ -0,0 +1,592 @@ + +#include +#include + +#include "AudioTools/Communication/VBAN/vban.h" +#include "AudioTools/CoreAudio/AudioStreams.h" +#include "AudioTools/Concurrency/RTOS/BufferRTOS.h" + +namespace audio_tools { + +class VBANConfig : public AudioInfo { + public: + VBANConfig() { + sample_rate = 11025; + channels = 1; + bits_per_sample = 16; + } + RxTxMode mode; + /// name of the stream + const char* stream_name = "Stream1"; + /// default port is 6980 + uint16_t udp_port = 6980; + /// Use {0,0,0,0}; as broadcast address + IPAddress target_ip{0, 0, 0, 0}; + /// ssid for wifi connection + const char* ssid = nullptr; + /// password for wifi connection + const char* password = nullptr; + int rx_buffer_count = 30; + // set to true if samples are generated faster then sample rate + bool throttle_active = false; + // when negative the number of ms that are subtracted from the calculated wait + // time to fine tune Overload and Underruns + int throttle_correction_us = 0; + // defines the max write size + int max_write_size = + DEFAULT_BUFFER_SIZE * 2; // just good enough for 44100 stereo + uint8_t format = 0; + + //reply for discovery packet + uint32_t device_flags = 0x00000001; // default: receiver only + uint32_t bitfeature = 0x00000001; // default: audio only + uint32_t device_color = 0x00FF00; // green default + //const char* stream_name_reply = "VBAN SPOT PING"; + const char* device_name = nullptr; // nullptr means use MAC by default + const char* manufacturer_name = "ESP32 AudioTools"; + const char* application_name = "VBAN Streamer"; + const char* host_name = nullptr; // will fallback to WiFi.getHostname() + const char* user_name = "User"; + const char* user_comment = "ESP32 VBAN Audio Device"; +}; + +/** + * @brief VBAN Audio Source and Sink for the ESP32. For further details please + * see https://vb-audio.com/Voicemeeter/vban.htm . + * Inspired by https://github.com/rkinnett/ESP32-VBAN-Audio-Source/tree/master + * and https://github.com/rkinnett/ESP32-VBAN-Network-Audio-Player + * @ingroup communications + * @author Phil Schatzmann + * @copyright GPLv3 + */ + +class VBANStream : public AudioStream { + public: + VBANConfig defaultConfig(RxTxMode mode = TX_MODE) { + VBANConfig def; + def.mode = mode; + return def; + } + + void setOutput(Print &out){ + p_out = &out; + } + + void setAudioInfo(AudioInfo info) override { + cfg.copyFrom(info); + AudioStream::setAudioInfo(info); + auto thc = throttle.defaultConfig(); + thc.copyFrom(info); + thc.correction_us = cfg.throttle_correction_us; + throttle.begin(thc); + if (cfg.mode == TX_MODE) { + configure_tx(); + } + } + + bool begin(VBANConfig cfg) { + this->cfg = cfg; + setAudioInfo(cfg); + return begin(); + } + + bool begin() { + if (cfg.mode == TX_MODE) { + if (cfg.bits_per_sample != 16) { + LOGE("Only 16 bits supported") + return false; + } + tx_buffer.resize(VBAN_PACKET_NUM_SAMPLES); + return begin_tx(); + } else { +#ifdef ESP32 + rx_buffer.resize(DEFAULT_BUFFER_SIZE * cfg.rx_buffer_count); + rx_buffer.setReadMaxWait(10); +#else + rx_buffer.resize(DEFAULT_BUFFER_SIZE, cfg.rx_buffer_count); +#endif + return begin_rx(); + } + } + + size_t write(const uint8_t* data, size_t len) override { + if (!udp_connected) return 0; + + int16_t* adc_data = (int16_t*)data; + size_t samples = len / (cfg.bits_per_sample/8); + + // limit output speed + if (cfg.throttle_active) { + throttle.delayFrames(samples / cfg.channels); + } + + for (int j = 0; j < samples; j++) { + tx_buffer.write(adc_data[j]); + if (tx_buffer.availableForWrite() == 0) { + memcpy(vban.data_frame, tx_buffer.data(), vban.packet_data_bytes); + *vban.packet_counter = packet_counter; // increment packet counter + // Send packet + if (cfg.target_ip == broadcast_address) { + udp.broadcastTo((uint8_t*)&vban.packet, vban.packet_total_bytes, + cfg.udp_port); + } else { + udp.writeTo((uint8_t*)&vban.packet, vban.packet_total_bytes, + cfg.target_ip, cfg.udp_port); + } + // defile delay start time + packet_counter++; + tx_buffer.reset(); + } + } + return len; + } + + int availableForWrite() { return cfg.max_write_size; } + + size_t readBytes(uint8_t* data, size_t len) override { + TRACED(); + size_t samples = len / (cfg.bits_per_sample/8); + if (cfg.throttle_active) { + throttle.delayFrames(samples / cfg.channels); + } + return rx_buffer.readArray(data, len); + } + + int available() { return available_active ? rx_buffer.available() : 0; } + + protected: + const IPAddress broadcast_address{0, 0, 0, 0}; + AsyncUDP udp; + VBan vban; + VBANConfig cfg; + SingleBuffer tx_buffer{0}; + #ifdef ESP32 + BufferRTOS rx_buffer{ 0}; + #else + NBuffer rx_buffer{DEFAULT_BUFFER_SIZE, 0}; + #endif + bool udp_connected = false; + uint32_t packet_counter = 0; + Throttle throttle; + size_t bytes_received = 0; + bool available_active = false; + Print *p_out = nullptr; + + bool begin_tx() { + if (!configure_tx()) { + return false; + } + start_wifi(); + if (WiFi.status() != WL_CONNECTED) { + LOGE("Wifi not connected"); + return false; + } + WiFi.setSleep(false); + IPAddress myIP = WiFi.localIP(); + udp_connected = udp.connect(myIP, cfg.udp_port); + return udp_connected; + } + + bool begin_rx() { + start_wifi(); + if (WiFi.status() != WL_CONNECTED) { + LOGE("Wifi not connected"); + return false; + } + WiFi.setSleep(false); + bytes_received = 0; + this->available_active = false; + // connect to target + if (!udp.listen(cfg.udp_port)) { + LOGE("Could not connect to '%s:%d' target", toString(cfg.target_ip), + cfg.udp_port); + } + // handle data + udp.onPacket([this](AsyncUDPPacket packet) { receive_udp(packet); }); + + return true; + } + + bool configure_tx() { + int rate = vban_sample_rate(); + if (rate < 0) { + LOGE("Invalid sample rate: %d", cfg.sample_rate); + return false; + } + configure_vban((VBanSampleRates)rate); + return true; + } + + void start_wifi() { + if (cfg.ssid == nullptr) return; + if (cfg.password == nullptr) return; + LOGI("ssid %s", cfg.ssid); + // Setup Wifi: + WiFi.begin(cfg.ssid, cfg.password); // Connect to your WiFi router + while (WiFi.status() != WL_CONNECTED) { // Wait for connection + delay(500); + Serial.print("."); + } + Serial.println(); + + LOGI("Wifi connected to IP (%d.%d.%d.%d)", WiFi.localIP()[0], + WiFi.localIP()[1], WiFi.localIP()[2], WiFi.localIP()[3]); + } + + void configure_vban(VBanSampleRates rate) { + // Set vban packet header, counter, and data frame pointers to respective + // parts of packet: + vban.hdr = (VBanHeader*)&vban.packet[0]; + vban.packet_counter = (uint32_t*)&vban.packet[VBAN_PACKET_HEADER_BYTES]; + vban.data_frame = + (uint8_t*)&vban + .packet[VBAN_PACKET_HEADER_BYTES + VBAN_PACKET_COUNTER_BYTES]; + + // Setup the packet header: + strncpy(vban.hdr->preamble, "VBAN", 4); + vban.hdr->sample_rate = + static_cast(VBAN_PROTOCOL_AUDIO) | + rate; // 11025 Hz, which matches default sample rate for soundmodem + vban.hdr->num_samples = + (VBAN_PACKET_NUM_SAMPLES / cfg.channels) - 1; // 255 = 256 samples + vban.hdr->num_channels = cfg.channels - 1; // 0 = 1 channel + vban.hdr->sample_format = + static_cast(VBAN_BITFMT_16_INT) | VBAN_CODEC_PCM; // int16 PCM + strncpy(vban.hdr->stream_name, cfg.stream_name, + min((int)strlen(cfg.stream_name), VBAN_STREAM_NAME_SIZE)); + + vban.packet_data_bytes = + (vban.hdr->num_samples + 1) * (vban.hdr->num_channels + 1) * + ((vban.hdr->sample_format & VBAN_BIT_RESOLUTION_MASK) + 1); + vban.packet_total_bytes = vban.packet_data_bytes + + VBAN_PACKET_HEADER_BYTES + + VBAN_PACKET_COUNTER_BYTES; + } + + int vban_sample_rate() { + int result = -1; + switch (cfg.sample_rate) { + case 6000: + result = SAMPLE_RATE_6000_HZ; + break; + case 12000: + result = SAMPLE_RATE_12000_HZ; + break; + case 24000: + result = SAMPLE_RATE_24000_HZ; + break; + case 48000: + result = SAMPLE_RATE_48000_HZ; + break; + case 96000: + result = SAMPLE_RATE_96000_HZ; + break; + case 192000: + result = SAMPLE_RATE_192000_HZ; + break; + case 384000: + result = SAMPLE_RATE_384000_HZ; + break; + case 8000: + result = SAMPLE_RATE_8000_HZ; + break; + case 16000: + result = SAMPLE_RATE_16000_HZ; + break; + case 32000: + result = SAMPLE_RATE_32000_HZ; + break; + case 64000: + result = SAMPLE_RATE_64000_HZ; + break; + case 128000: + result = SAMPLE_RATE_128000_HZ; + break; + case 256000: + result = SAMPLE_RATE_256000_HZ; + break; + case 512000: + result = SAMPLE_RATE_512000_HZ; + break; + case 11025: + result = SAMPLE_RATE_11025_HZ; + break; + case 22050: + result = SAMPLE_RATE_22050_HZ; + break; + case 44100: + result = SAMPLE_RATE_44100_HZ; + break; + case 88200: + result = SAMPLE_RATE_88200_HZ; + break; + case 176400: + result = SAMPLE_RATE_176400_HZ; + break; + case 352800: + result = SAMPLE_RATE_352800_HZ; + break; + case 705600: + result = SAMPLE_RATE_705600_HZ; + break; + } + return result; + } + + const char* toString(IPAddress adr) { + static char str[11] = {0}; + snprintf(str, 11, "%d.%d.%d.%d", adr[0], adr[1], adr[2], adr[3]); + return str; + } + + /** + * @brief VBAN adjusts the number of samples per packet according to sample + *rate. Assuming 16-bit PCM mono, sample rates 11025, 22050, 44100, and 88200 + *yield packets containing 64, 128, 256, and 256 samples per packet, + *respectively. The even-thousands sample rates below 48000 yield + *non-power-of-2 lengths. For example, sample rate 24000 yields 139 samples + *per packet. This VBAN->DMA->DAC method seems to require the dma buffer + *length be set equal to the number of samples in each VBAN packet. ESP32 + *I2S/DMA does not seem to handle non-power-of-2 buffer lengths well. Sample + *rate 24000 doesn't work reliably at all. Sample rate 32000 is stable but + *stutters. Recommend selecting from sample rates 11025, 22050, 44100, and + *above And set samplesPerPacket to 64 for 11025, 128 for 22050, or 256 for + *all else. + **/ + + void receive_udp(AsyncUDPPacket& packet) { + uint16_t vban_rx_data_bytes, vban_rx_sample_count; + int16_t* vban_rx_data; + uint32_t* vban_rx_pkt_nbr; + uint16_t outBuf[VBAN_PACKET_MAX_SAMPLES + 1]; + size_t bytesOut; + + int len = packet.length(); + if (len > 0) { + LOGD("receive_udp %d", len); + uint8_t* udpIncomingPacket = packet.data(); + + // receive incoming UDP packet + // Check if packet length meets VBAN specification: + if (len < VBAN_PACKET_HEADER_BYTES) { + LOGE("Too short to be VBAN (%u bytes)", len); + return; + } + + // Check if preamble matches VBAN format: + if (strncmp("VBAN", (const char*)udpIncomingPacket, 4) != 0) { + LOGE("Unrecognized preamble %.4s", udpIncomingPacket); + return; + } + + uint8_t protocol = udpIncomingPacket[4] & VBAN_PROTOCOL_MASK; + + if (protocol == VBAN_PROTOCOL_SERVICE) { + // Allow up to ~1024 bytes for service packets like Ping0 + if (len > 1024) { + LOGE("Service packet length invalid: %u bytes", len); + return; + } + } else { + // Audio, serial, etc + if (len <= (VBAN_PACKET_HEADER_BYTES + VBAN_PACKET_COUNTER_BYTES) || len > VBAN_PACKET_MAX_LEN_BYTES) { + LOGE("Audio/other packet length invalid: %u bytes", len); + rx_buffer.reset(); + return; + } + } + + //LOGI("VBAN format byte: 0x%02X", udpIncomingPacket[7]); + //LOGD("VBAN protocol mask applied: 0x%02X", udpIncomingPacket[7] & VBAN_PROTOCOL_MASK); + //Serial.printf("Header[7] = 0x%02X\n", udpIncomingPacket[7]); + + + //------------------------------------------------------------------------- + //SUPPORT PING REQUEST + if ( protocol == VBAN_PROTOCOL_SERVICE ) { + + uint8_t service_type = udpIncomingPacket[5]; + uint8_t service_fnct = udpIncomingPacket[6]; + + if (service_type == VBAN_SERVICE_IDENTIFICATION) { + bool isReply = (service_fnct & VBAN_SERVICE_FNCT_REPLY) != 0; + uint8_t function = service_fnct & 0x7F; + + if (!isReply && function == 0) { + LOGI("Received VBAN PING0 request"); + sendVbanPing0Reply(packet); + } + } + return; + } + //-------------------------------------------------------------------------- + + vban_rx_data_bytes = + len - (VBAN_PACKET_HEADER_BYTES + VBAN_PACKET_COUNTER_BYTES); + vban_rx_pkt_nbr = (uint32_t*)&udpIncomingPacket[VBAN_PACKET_HEADER_BYTES]; + vban_rx_data = (int16_t*)&udpIncomingPacket[VBAN_PACKET_HEADER_BYTES + + VBAN_PACKET_COUNTER_BYTES]; + vban_rx_sample_count = vban_rx_data_bytes / (cfg.bits_per_sample / 8); + uint8_t vbanSampleRateIdx = udpIncomingPacket[4] & VBAN_SR_MASK; + uint8_t vbchannels = udpIncomingPacket[6] + 1; + uint8_t vbframes = udpIncomingPacket[5] + 1; + uint8_t vbformat = udpIncomingPacket[7] & VBAN_PROTOCOL_MASK; + uint8_t vbformat_bits = udpIncomingPacket[7] & VBAN_BIT_RESOLUTION_MASK; + uint32_t vbanSampleRate = VBanSRList[vbanSampleRateIdx]; + + //LOGD("sample_count: %d - frames: %d", vban_rx_sample_count, vbframes); + //assert (vban_rx_sample_count == vbframes*vbchannels); + + // E.g. do not process any text + if (vbformat != cfg.format){ + LOGE("Format ignored: 0x%x", vbformat); + return; + } + + // Currently we support only 16 bits. + if (vbformat_bits != VBAN_BITFMT_16_INT){ + LOGE("Format only 16 bits supported"); + return; + } + + // Just to be safe, re-check sample count against max sample count to + // avoid overrunning outBuf later + if (vban_rx_sample_count > VBAN_PACKET_MAX_SAMPLES) { + LOGE("unexpected packet size: %u", vban_rx_sample_count); + return; + } + + // update sample rate + if (cfg.sample_rate != vbanSampleRate || cfg.channels != vbchannels) { + // update audio info + cfg.sample_rate = vbanSampleRate; + cfg.channels = vbchannels; + setAudioInfo(cfg); + // remove any buffered data + rx_buffer.reset(); + available_active = false; + } + + if (p_out!=nullptr){ + int size_written = p_out->write((uint8_t*)vban_rx_data, vban_rx_data_bytes); + if (size_written != vban_rx_data_bytes) { + LOGE("buffer overflow %d -> %d", vban_rx_data_bytes, size_written); + } + return; + } + + // write data to buffer + int size_written = rx_buffer.writeArray((uint8_t*)vban_rx_data, vban_rx_data_bytes); + if (size_written != vban_rx_data_bytes) { + LOGE("buffer overflow %d -> %d", vban_rx_data_bytes, size_written); + } + + // report available bytes only when buffer is 50% full + if (!available_active) { + bytes_received += vban_rx_data_bytes; + if (bytes_received >= cfg.rx_buffer_count * DEFAULT_BUFFER_SIZE * 0.75){ + available_active = true; + LOGI("Activating vban"); + } + } + } + } +//------------------------------------------------------------------------------------- + //implement ping reply based on VBAN standard + void sendVbanPing0Reply(AsyncUDPPacket& sourcePacket) { + + // Prepare VBAN 28-byte service header + uint8_t header[28]; + memset(header, 0, sizeof(header)); + memcpy(header, "VBAN", 4); + header[4] = VBAN_PROTOCOL_SERVICE; + header[5] = VBAN_SERVICE_FNCT_PING0 | VBAN_SERVICE_FNCT_REPLY; // Service function + reply bit + header[6] = 0x00; // must be zero + // Copy incoming stream name from discovery packet + const uint8_t* data = sourcePacket.data(); + memcpy(&header[8], &data[8], 16); + // Copy frame number (little endian) + + uint32_t frameNumber = (uint32_t)((data[24] & 0xFF) | ((data[25] & 0xFF) << 8) | ((data[26] & 0xFF) << 16) | ((data[27] & 0xFF) << 24)); + memcpy(&header[24], &frameNumber, 4); + + // Construct the PING0 payload using the struct + VBAN_PING0 ping0; + memset(&ping0, 0, sizeof(ping0)); + + // Fill fields with your config data and fixed values + ping0.bitType = cfg.device_flags; + ping0.bitfeature = cfg.bitfeature; + ping0.bitfeatureEx = 0x00000000; + ping0.PreferedRate = 44100; + ping0.MinRate = 8000; + ping0.MaxRate = 96000; + ping0.color_rgb = cfg.device_color; + + // Version string, 8 bytes total (zero padded) + memcpy(ping0.nVersion, "v1.0", 4); + + // GPS_Position left empty (all zero), so no need to set + // USER_Position 8 bytes + memcpy(ping0.USER_Position, "USRPOS", 6); + // LangCode_ascii 8 bytes ("EN" + padding) + memset(ping0.LangCode_ascii, 0, sizeof(ping0.LangCode_ascii)); + memcpy(ping0.LangCode_ascii, "EN", 2); + // reserved_ascii and reservedEx are zeroed by memset + // IP as string, max 32 bytes + + char ipStr[16]; // Enough for "255.255.255.255\0" + sprintf(ipStr, "%d.%d.%d.%d", WiFi.localIP()[0], WiFi.localIP()[1], WiFi.localIP()[2], WiFi.localIP()[3]); + safe_strncpy(ping0.DistantIP_ascii, ipStr, sizeof(ping0.DistantIP_ascii)); + // Ports (network byte order) + + ping0.DistantPort = cfg.udp_port; //returs port I am listening for VBAN - more useful then UDP ephemeral port + ping0.DistantReserved = 0; + + // Device name (64 bytes) + if (cfg.device_name && cfg.device_name[0] != '\0') { + safe_strncpy(ping0.DeviceName_ascii, cfg.device_name, sizeof(ping0.DeviceName_ascii)); + } else { + uint8_t mac[6]; + WiFi.macAddress(mac); + char macStr[64]; + snprintf(macStr, sizeof(macStr), "%02X:%02X:%02X:%02X:%02X:%02X", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + safe_strncpy(ping0.DeviceName_ascii, macStr, sizeof(ping0.DeviceName_ascii)); + } + + // Manufacturer name (64 bytes) + safe_strncpy(ping0.ManufacturerName_ascii, cfg.manufacturer_name, sizeof(ping0.ManufacturerName_ascii)); + // Application name (64 bytes) + safe_strncpy(ping0.ApplicationName_ascii, cfg.application_name, sizeof(ping0.ApplicationName_ascii)); + // Host name (64 bytes) + const char* hostName = cfg.host_name; + if (!hostName || hostName[0] == '\0') { + hostName = WiFi.getHostname(); + if (!hostName) hostName = "ESP32"; + } + safe_strncpy(ping0.HostName_ascii, hostName, sizeof(ping0.HostName_ascii)); + + // UserName_utf8 + safe_strncpy(ping0.UserName_utf8, cfg.user_name, sizeof(ping0.UserName_utf8)); + //UserComment_utf8 + safe_strncpy(ping0.UserComment_utf8, cfg.user_comment, sizeof(ping0.UserComment_utf8)); + + // Prepare final packet: header + payload + uint8_t packet[28 + sizeof(VBAN_PING0)]; + memcpy(packet, header, 28); + memcpy(packet + 28, &ping0, sizeof(VBAN_PING0)); + + // Send UDP packet + udp.writeTo(packet, sizeof(packet), sourcePacket.remoteIP(), sourcePacket.remotePort()); +} + + // Safely copy a C-string with guaranteed null termination + void safe_strncpy(char* dest, const char* src, size_t dest_size) { + if (dest_size == 0) return; + strncpy(dest, src, dest_size - 1); + dest[dest_size - 1] = '\0'; + } + //----------------------------------------------------------------------------------- +}; + +} // namespace audio_tools \ No newline at end of file From 621bd9984c08964448724a65cba524aa94fa454a Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 06:50:33 +0200 Subject: [PATCH 4/4] bump to 1.2.0 --- library.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library.properties b/library.properties index b844d89390..afc250c77b 100644 --- a/library.properties +++ b/library.properties @@ -1,5 +1,5 @@ name=audio-tools -version=1.1.3 +version=1.2.0 author=Phil Schatzmann maintainer=Phil Schatzmann sentence=Some useful audio processing classes