From cf4dce0b325453ec9abf90a4dcb035a69130c6c9 Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 06:47:07 +0200 Subject: [PATCH 01/15] Move to Communication from AudioLibs --- .../hls/hls-buffer-i2s/hls-buffer-i2s.ino | 2 +- .../hls/hls-i2s/hls-i2s.ino | 2 +- .../streams-url_mts-hex.ino | 2 +- .../communication-rtsp-audiokit.ino | 88 +- .../communication-rtsp555-audiokit.ino | 28 + .../communication-rtsp555-i2s.ino} | 2 +- .../player-sdmmc-vban/player-sdmmc-vban.ino | 2 +- .../streams-audiokit-vban.ino | 2 +- .../streams-generator-vban.ino | 2 +- .../streams-vban-audiokit.ino | 2 +- src/AudioTools/AudioLibs/AudioClientRTSP.h | 722 +--------------- src/AudioTools/AudioLibs/HLSStream.h | 782 +----------------- src/AudioTools/AudioLibs/README.md | 2 +- src/AudioTools/AudioLibs/VBANStream.h | 595 +------------ src/AudioTools/Communication/HLSStream.h | 781 +++++++++++++++++ .../HLSStreamESP32.h | 0 src/AudioTools/Communication/README.md | 2 +- src/AudioTools/Communication/RTSP.h | 10 + .../Communication/RTSP/IAudioSource.h | 1 - src/AudioTools/Communication/RTSPClient555.h | 721 ++++++++++++++++ .../vban => Communication/VBAN}/vban.h | 0 src/AudioTools/Communication/VBANStream.h | 592 +++++++++++++ 22 files changed, 2215 insertions(+), 2125 deletions(-) create mode 100644 examples/examples-communication/rtsp/communication-rtsp555-audiokit/communication-rtsp555-audiokit.ino rename examples/examples-communication/rtsp/{communication-rtsp-i2s/communication-rtsp-i2s.ino => communication-rtsp555-i2s/communication-rtsp555-i2s.ino} (97%) create mode 100644 src/AudioTools/Communication/HLSStream.h rename src/AudioTools/{AudioLibs => Communication}/HLSStreamESP32.h (100%) create mode 100644 src/AudioTools/Communication/RTSPClient555.h rename src/AudioTools/{AudioLibs/vban => Communication/VBAN}/vban.h (100%) create mode 100644 src/AudioTools/Communication/VBANStream.h diff --git a/examples/examples-communication/hls/hls-buffer-i2s/hls-buffer-i2s.ino b/examples/examples-communication/hls/hls-buffer-i2s/hls-buffer-i2s.ino index 992cf35e60..62f61a7200 100644 --- a/examples/examples-communication/hls/hls-buffer-i2s/hls-buffer-i2s.ino +++ b/examples/examples-communication/hls/hls-buffer-i2s/hls-buffer-i2s.ino @@ -11,7 +11,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecHelix.h" #include "AudioTools/AudioCodecs/CodecMTS.h" -#include "AudioTools/AudioLibs/HLSStream.h" +#include "AudioTools/Communication/HLSStream.h" #include "AudioTools/Concurrency/RTOS.h" // #include "AudioTools/AudioLibs/AudioBoardStream.h" diff --git a/examples/examples-communication/hls/hls-i2s/hls-i2s.ino b/examples/examples-communication/hls/hls-i2s/hls-i2s.ino index ffdfd338dd..b9f195de45 100644 --- a/examples/examples-communication/hls/hls-i2s/hls-i2s.ino +++ b/examples/examples-communication/hls/hls-i2s/hls-i2s.ino @@ -9,7 +9,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/HLSStream.h" +#include "AudioTools/Communication/HLSStream.h" #include "AudioTools/AudioCodecs/CodecHelix.h" //#include "AudioTools/AudioLibs/AudioBoardStream.h" diff --git a/examples/examples-communication/http-client/streams-url_mts-hex/streams-url_mts-hex.ino b/examples/examples-communication/http-client/streams-url_mts-hex/streams-url_mts-hex.ino index f180512d93..f64ac47872 100644 --- a/examples/examples-communication/http-client/streams-url_mts-hex/streams-url_mts-hex.ino +++ b/examples/examples-communication/http-client/streams-url_mts-hex/streams-url_mts-hex.ino @@ -7,7 +7,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMTS.h" -#include "AudioTools/AudioLibs/HLSStream.h" +#include "AudioTools/Communication/HLSStream.h" HexDumpOutput out(Serial); HLSStream hls_stream("SSID", "password"); diff --git a/examples/examples-communication/rtsp/communication-rtsp-audiokit/communication-rtsp-audiokit.ino b/examples/examples-communication/rtsp/communication-rtsp-audiokit/communication-rtsp-audiokit.ino index 3243ecd693..c69f92a3d9 100644 --- a/examples/examples-communication/rtsp/communication-rtsp-audiokit/communication-rtsp-audiokit.ino +++ b/examples/examples-communication/rtsp/communication-rtsp-audiokit/communication-rtsp-audiokit.ino @@ -1,28 +1,72 @@ /** - * @file communication-rtsp-i2s.ino - * @author Phil Schatzmann - * @brief Demo for RTSP Client that is playing mp3. I tested with the live555 server with linux - * @version 0.1 - * @date 2022-05-02 - * - * @copyright Copyright (c) 2022 - * + * @file communication-rtsp-audiokit.ino + * @brief RTSP client demo using the new UDP/RTP client and AudioKit output. + * Connects to an RTSP server, decodes audio via MultiDecoder, and plays + * out via `AudioBoardStream` (AudioKit ES8388). Tested with RTSP + * servers. Requires WiFi on ESP32. + * + * Steps: + * - Update WiFi credentials and RTSP server address/path below + * - Builds a fixed pipeline: MultiDecoder -> ResampleStream -> AudioKit output + * - Call client.copy() in loop to push received RTP payloads into decoders */ -#include "AudioTools.h" // https://github.com/pschatzmann/arduino-audio-tools -#include "AudioTools/AudioCodecs/CodecMP3Helix.h" // https://github.com/pschatzmann/arduino-libhelix -#include "AudioTools/AudioLibs/AudioBoardStream.h" // https://github.com/pschatzmann/arduino-audio-driver -#include "AudioTools/AudioLibs/AudioClientRTSP.h" // install https://github.com/pschatzmann/arduino-live555 - -AudioBoardStream i2s(AudioKitEs8388V1); // final output of decoded stream -EncodedAudioStream out_mp3(&i2s, new MP3DecoderHelix()); // Decoding stream -AudioClientRTSP rtsp(1024); - -void setup(){ - rtsp.setLogin("ssid", "password"); - rtsp.begin("https://samples.mplayerhq.hu/A-codecs/MP3/01%20-%20Charity%20Case.mp3", out_mp3); +#include "AudioTools.h" +#include "AudioTools/AudioCodecs/CodecADPCM.h" +#include "AudioTools/AudioCodecs/CodecMP3Helix.h" // https://github.com/pschatzmann/arduino-libhelix +#include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/RTSP.h" // brings RTSPClientWiFi alias + +const char* SSID = "ssid"; +const char* PASS = "password"; +IPAddress srv(192, 168, 1, 39); // change to your RTSP server IP +const uint16_t rtspPort = 8554; // typical RTSP port +const char* rtspPath = + "stream"; // change to your RTSP server path (e.g., "audio", "stream1") +AudioBoardStream i2s(AudioKitEs8388V1); +RTSPClientWiFi client(i2s); +MP3DecoderHelix mp3; // Decoder for "audio/mpeg" (MP3) payloads +ADPCMDecoder adpcm(AV_CODEC_ID_ADPCM_IMA_WAV, 512); // ima adpcm decoder + +void startWiFi() { + WiFi.begin(SSID, PASS); + Serial.print("Connecting to WiFi"); + while (WiFi.status() != WL_CONNECTED) { + delay(500); + Serial.print("."); + } + Serial.println(); + Serial.print("WiFi connected, IP: "); + Serial.println(WiFi.localIP()); + WiFi.setSleep(false); +} + +void setup() { + Serial.begin(115200); + AudioToolsLogger.begin(Serial, AudioToolsLogLevel::Info); + + // Connect WiFi + startWiFi(); + + // Configure and start I2S/AudioKit output + auto cfg = i2s.defaultConfig(TX_MODE); + cfg.sd_active = false; + i2s.begin(cfg); + + // Start RTSP session + client.addDecoder("audio/mpeg", mp3); + client.addDecoder("audio/adpcm", adpcm); + client.setResampleFactor(1.0); // no resampling + // Servers often require a concrete path; also extend header timeout if needed + client.setHeaderTimeoutMs(8000); + if (!client.begin(srv, rtspPort, rtspPath)) { + Serial.println("Failed to start RTSP client"); + stop(); + } + Serial.println("RTSP client started"); } void loop() { - rtsp.loop(); -} \ No newline at end of file + // Push next available RTP payload to decoder chain + client.copy(); +} diff --git a/examples/examples-communication/rtsp/communication-rtsp555-audiokit/communication-rtsp555-audiokit.ino b/examples/examples-communication/rtsp/communication-rtsp555-audiokit/communication-rtsp555-audiokit.ino new file mode 100644 index 0000000000..191cd590f0 --- /dev/null +++ b/examples/examples-communication/rtsp/communication-rtsp555-audiokit/communication-rtsp555-audiokit.ino @@ -0,0 +1,28 @@ + +/** + * @file communication-rtsp555-i2s.ino + * @author Phil Schatzmann + * @brief Demo for RTSP Client that is playing mp3. I tested with the live555 server with linux + * @version 0.1 + * @date 2022-05-02 + * + * @copyright Copyright (c) 2022 + * + */ +#include "AudioTools.h" // https://github.com/pschatzmann/arduino-audio-tools +#include "AudioTools/AudioCodecs/CodecMP3Helix.h" // https://github.com/pschatzmann/arduino-libhelix +#include "AudioTools/AudioLibs/AudioBoardStream.h" // https://github.com/pschatzmann/arduino-audio-driver +#include "AudioTools/Communication/RTSPClient555.h" // install https://github.com/pschatzmann/arduino-live555 + +AudioBoardStream i2s(AudioKitEs8388V1); // final output of decoded stream +EncodedAudioStream out_mp3(&i2s, new MP3DecoderHelix()); // Decoding stream +AudioClientRTSP rtsp(1024); + +void setup(){ + rtsp.setLogin("ssid", "password"); + rtsp.begin("https://samples.mplayerhq.hu/A-codecs/MP3/01%20-%20Charity%20Case.mp3", out_mp3); +} + +void loop() { + rtsp.loop(); +} \ No newline at end of file diff --git a/examples/examples-communication/rtsp/communication-rtsp-i2s/communication-rtsp-i2s.ino b/examples/examples-communication/rtsp/communication-rtsp555-i2s/communication-rtsp555-i2s.ino similarity index 97% rename from examples/examples-communication/rtsp/communication-rtsp-i2s/communication-rtsp-i2s.ino rename to examples/examples-communication/rtsp/communication-rtsp555-i2s/communication-rtsp555-i2s.ino index 00bb1f1605..b141386e49 100644 --- a/examples/examples-communication/rtsp/communication-rtsp-i2s/communication-rtsp-i2s.ino +++ b/examples/examples-communication/rtsp/communication-rtsp555-i2s/communication-rtsp555-i2s.ino @@ -1,5 +1,5 @@ /** - * @file communication-rtsp-i2s.ino + * @file communication-rtsp666-i2s.ino * @author Phil Schatzmann * @brief Demo for RTSP Client that is playing mp3: tested with the live555 server with linux * @version 0.1 diff --git a/examples/examples-communication/vban/player-sdmmc-vban/player-sdmmc-vban.ino b/examples/examples-communication/vban/player-sdmmc-vban/player-sdmmc-vban.ino index 3dd3f51443..67f917a10c 100644 --- a/examples/examples-communication/vban/player-sdmmc-vban/player-sdmmc-vban.ino +++ b/examples/examples-communication/vban/player-sdmmc-vban/player-sdmmc-vban.ino @@ -7,7 +7,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" -#include "AudioTools/AudioLibs/VBANStream.h" +#include "AudioTools/Communication/VBANStream.h" #include "AudioTools/Disk/AudioSourceSDMMC.h" // or AudioSourceIdxSDMMC.h const char *startFilePath="/"; diff --git a/examples/examples-communication/vban/streams-audiokit-vban/streams-audiokit-vban.ino b/examples/examples-communication/vban/streams-audiokit-vban/streams-audiokit-vban.ino index c356859ded..15e20e632c 100644 --- a/examples/examples-communication/vban/streams-audiokit-vban/streams-audiokit-vban.ino +++ b/examples/examples-communication/vban/streams-audiokit-vban/streams-audiokit-vban.ino @@ -5,7 +5,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/VBANStream.h" +#include "AudioTools/Communication/VBANStream.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" // comment out when not using AudioKit AudioInfo info(44100, 2, 16); diff --git a/examples/examples-communication/vban/streams-generator-vban/streams-generator-vban.ino b/examples/examples-communication/vban/streams-generator-vban/streams-generator-vban.ino index 2a149c8342..5c9298f118 100644 --- a/examples/examples-communication/vban/streams-generator-vban/streams-generator-vban.ino +++ b/examples/examples-communication/vban/streams-generator-vban/streams-generator-vban.ino @@ -5,7 +5,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/VBANStream.h" +#include "AudioTools/Communication/VBANStream.h" AudioInfo info(44100, 2, 16); SineWaveGenerator sineWave(32000); // subclass of SoundGenerator with max amplitude of 32000 diff --git a/examples/examples-communication/vban/streams-vban-audiokit/streams-vban-audiokit.ino b/examples/examples-communication/vban/streams-vban-audiokit/streams-vban-audiokit.ino index ae15b4d19b..05861886b4 100644 --- a/examples/examples-communication/vban/streams-vban-audiokit/streams-vban-audiokit.ino +++ b/examples/examples-communication/vban/streams-vban-audiokit/streams-vban-audiokit.ino @@ -5,7 +5,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/VBANStream.h" +#include "AudioTools/Communication/VBANStream.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" // comment out when not using AudioKit AudioBoardStream out(AudioKitEs8388V1); // Audio source e.g. replace with I2SStream diff --git a/src/AudioTools/AudioLibs/AudioClientRTSP.h b/src/AudioTools/AudioLibs/AudioClientRTSP.h index 27517807a1..afb9193d07 100644 --- a/src/AudioTools/AudioLibs/AudioClientRTSP.h +++ b/src/AudioTools/AudioLibs/AudioClientRTSP.h @@ -1,721 +1,3 @@ - #pragma once - -/** -This library is free software; you can redistribute it and/or modify it under -the terms of the GNU Lesser General Public License as published by the -Free Software Foundation; either version 3 of the License, or (at your -option) any later version. (See .) - -This library is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for -more details. - -You should have received a copy of the GNU Lesser General Public License -along with this library; if not, write to the Free Software Foundation, Inc., -51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -**/ - -// Copyright (c) 1996-2023, Live Networks, Inc. All rights reserved -// A demo application, showing how to create and run a RTSP client (that can -// potentially receive multiple streams concurrently). -// - -#include "AudioLogger.h" -#include "Print.h" // Arduino Print -// include live555 -#include "BasicUsageEnvironment.hh" -//#include "liveMedia.hh" -#include "RTSPClient.hh" - -// By default, we request that the server stream its data using RTP/UDP. -// If, instead, you want to request that the server stream via RTP-over-TCP, -// change the following to True: -#define REQUEST_STREAMING_OVER_TCP false - -// by default, print verbose output from each "RTSPClient" -#define RTSP_CLIENT_VERBOSITY_LEVEL 1 -// Even though we're not going to be doing anything with the incoming data, we -// still need to receive it. Define the size of the buffer that we'll use: -#define RTSP_SINK_BUFFER_SIZE 1024 - -// If you don't want to see debugging output for each received frame, then -// comment out the following line: -#undef DEBUG_PRINT_EACH_RECEIVED_FRAME -#define DEBUG_PRINT_EACH_RECEIVED_FRAME 0 - -/// @brief AudioTools internal: rtsp -namespace audiotools_rtsp { - -class OurRTSPClient; -// The main streaming routine (or each "rtsp://" URL): -OurRTSPClient * openURL(UsageEnvironment& env, char const* progName, char const* rtspURL); -// Counts how many streams (i.e., "RTSPClient"s) are currently in use. -static unsigned rtspClientCount = 0; -static char rtspEventLoopWatchVariable = 0; -static Print* rtspOutput = nullptr; -static uint32_t rtspSinkReceiveBufferSize = 0; -static bool rtspUseTCP = REQUEST_STREAMING_OVER_TCP; - -} // namespace audiotools_rtsp - -namespace audio_tools { - -/** - * @brief A simple RTSPClient using https://github.com/pschatzmann/arduino-live555 - * @ingroup communications - * @author Phil Schatzmann - * @copyright GPLv3 -*/ -class AudioClientRTSP { - public: - AudioClientRTSP(uint32_t receiveBufferSize = RTSP_SINK_BUFFER_SIZE, bool useTCP=REQUEST_STREAMING_OVER_TCP, bool blocking = false) { - setBufferSize(receiveBufferSize); - useTCP ? setTCP() : setUDP(); - setBlocking(blocking); - } - - void setBufferSize(int size){ - audiotools_rtsp::rtspSinkReceiveBufferSize = size; - } - - void setTCP(){ - audiotools_rtsp::rtspUseTCP = true; - } - - void setUDP(){ - audiotools_rtsp::rtspUseTCP = false; - } - - void setBlocking(bool flag){ - is_blocking = flag; - } - - /// login to wifi: optional convinience method. You can also just start Wifi the normal way - void setLogin(const char* ssid, const char* password){ - this->ssid = ssid; - this->password = password; - } - - /// Starts the processing - bool begin(const char* url, Print &out) { - audiotools_rtsp::rtspOutput = &out; - if (url==nullptr) { - return false; - } - if (!login()){ - LOGE("wifi down"); - return false; - } - // Begin by setting up our usage environment: - scheduler = BasicTaskScheduler::createNew(); - env = BasicUsageEnvironment::createNew(*scheduler); - - // There are argc-1 URLs: argv[1] through argv[argc-1]. Open and start - // streaming each one: - rtsp_client = audiotools_rtsp::openURL(*env, "RTSPClient", url); - - // All subsequent activity takes place within the event loop: - if (is_blocking) env->taskScheduler().doEventLoop(&audiotools_rtsp::rtspEventLoopWatchVariable); - // This function call does not return, unless, at some point in time, - // "rtspEventLoopWatchVariable" gets set to something non-zero. - - return true; - } - - /// to be called in Arduino loop when blocking = false - void loop() { - if (audiotools_rtsp::rtspEventLoopWatchVariable==0) scheduler->SingleStep(); - } - - void end() { - audiotools_rtsp::rtspEventLoopWatchVariable = 1; - env->reclaim(); - env = NULL; - delete scheduler; - scheduler = NULL; - bool is_blocking = false; - } - - audiotools_rtsp::OurRTSPClient *client() { - return rtsp_client; - } - - protected: - audiotools_rtsp::OurRTSPClient* rtsp_client; - UsageEnvironment* env=nullptr; - BasicTaskScheduler* scheduler=nullptr; - const char* ssid=nullptr; - const char* password = nullptr; - bool is_blocking = false; - - /// login to wifi: optional convinience method. You can also just start Wifi the normal way - bool login(){ - if(WiFi.status() != WL_CONNECTED && ssid!=nullptr && password!=nullptr){ - WiFi.mode(WIFI_STA); - WiFi.begin(ssid, password); - while(WiFi.status() != WL_CONNECTED){ - Serial.print("."); - delay(100); - } - Serial.println(); - Serial.print("Local Address: "); - Serial.println(WiFi.localIP()); - } - return WiFi.status() == WL_CONNECTED; - } - - -}; - -} // namespace audio_tools - -namespace audiotools_rtsp { -// Define a class to hold per-stream state that we maintain throughout each -// stream's lifetime: - -// Forward function definitions: - -// RTSP 'response handlers': -void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, - char* resultString); -void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, - char* resultString); -void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, - char* resultString); - -// Other event handler functions: -void subsessionAfterPlaying( - void* clientData); // called when a stream's subsession (e.g., audio or - // video substream) ends -void subsessionByeHandler(void* clientData, char const* reason); -// called when a RTCP "BYE" is received for a subsession -void streamTimerHandler(void* clientData); -// called at the end of a stream's expected duration (if the stream has not -// already signaled its end using a RTCP "BYE") - -// Used to iterate through each stream's 'subsessions', setting up each one: -void setupNextSubsession(RTSPClient* rtspClient); - -// Used to shut down and close a stream (including its "RTSPClient" object): -void shutdownStream(RTSPClient* rtspClient, int exitCode = 1); - -// A function that outputs a string that identifies each stream (for debugging -// output). Modify this if you wish: -UsageEnvironment& operator<<(UsageEnvironment& env, - const RTSPClient& rtspClient) { - return env << "[URL:\"" << rtspClient.url() << "\"]: "; -} - -// A function that outputs a string that identifies each subsession (for -// debugging output). Modify this if you wish: -UsageEnvironment& operator<<(UsageEnvironment& env, - const MediaSubsession& subsession) { - return env << subsession.mediumName() << "/" << subsession.codecName(); -} - -class StreamClientState { - public: - StreamClientState(); - virtual ~StreamClientState(); - - public: - MediaSubsessionIterator* iter; - MediaSession* session; - MediaSubsession* subsession; - TaskToken streamTimerTask; - double duration; -}; - -// If you're streaming just a single stream (i.e., just from a single URL, -// once), then you can define and use just a single "StreamClientState" -// structure, as a global variable in your application. However, because - in -// this demo application - we're showing how to play multiple streams, -// concurrently, we can't do that. Instead, we have to have a separate -// "StreamClientState" structure for each "RTSPClient". To do this, we subclass -// "RTSPClient", and add a "StreamClientState" field to the subclass: - -class OurRTSPClient : public RTSPClient { - public: - static OurRTSPClient* createNew(UsageEnvironment& env, char const* rtspURL, - int verbosityLevel = 0, - char const* applicationName = NULL, - portNumBits tunnelOverHTTPPortNum = 0); - - protected: - OurRTSPClient(UsageEnvironment& env, char const* rtspURL, int verbosityLevel, - char const* applicationName, portNumBits tunnelOverHTTPPortNum); - // called only by createNew(); - virtual ~OurRTSPClient(); - - public: - StreamClientState scs; -}; - -// Define a data sink (a subclass of "MediaSink") to receive the data for each -// subsession (i.e., each audio or video 'substream'). In practice, this might -// be a class (or a chain of classes) that decodes and then renders the incoming -// audio or video. Or it might be a "FileSink", for outputting the received data -// into a file (as is done by the "openRTSP" application). In this example code, -// however, we define a simple 'dummy' sink that receives incoming data, but -// does nothing with it. - -class OurSink : public MediaSink { - public: - static OurSink* createNew( - UsageEnvironment& env, - MediaSubsession& - subsession, // identifies the kind of data that's being received - char const* streamId = NULL); // identifies the stream itself (optional) - - private: - OurSink(UsageEnvironment& env, MediaSubsession& subsession, - char const* streamId); - // called only by "createNew()" - virtual ~OurSink(); - - static void afterGettingFrame(void* clientData, unsigned frameSize, - unsigned numTruncatedBytes, - struct timeval presentationTime, - unsigned durationInMicroseconds); - void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, - struct timeval presentationTime, - unsigned durationInMicroseconds); - - private: - // redefined virtual functions: - virtual Boolean continuePlaying(); - - private: - u_int8_t* fReceiveBuffer; - MediaSubsession& fSubsession; - char* fStreamId; -}; - -OurRTSPClient* openURL(UsageEnvironment& env, char const* progName, char const* rtspURL) { - // Begin by creating a "RTSPClient" object. Note that there is a separate - // "RTSPClient" object for each stream that we wish to receive (even if more - // than stream uses the same "rtsp://" URL). - OurRTSPClient* rtspClient = OurRTSPClient::createNew( - env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL, progName); - if (rtspClient == NULL) { - env << "Failed to create a RTSP client for URL \"" << rtspURL - << "\": " << env.getResultMsg() << "\n"; - return nullptr; - } - - ++rtspClientCount; - - // Next, send a RTSP "DESCRIBE" command, to get a SDP description for the - // stream. Note that this command - like all RTSP commands - is sent - // asynchronously; we do not block, waiting for a response. Instead, the - // following function call returns immediately, and we handle the RTSP - // response later, from within the event loop: - rtspClient->sendDescribeCommand(continueAfterDESCRIBE); - return rtspClient; -} - -// Implementation of the RTSP 'response handlers': - -void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, - char* resultString) { - do { - UsageEnvironment& env = rtspClient->envir(); // alias - StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias - - if (resultCode != 0) { - env << *rtspClient << "Failed to get a SDP description: " << resultString - << "\n"; - delete[] resultString; - break; - } - - char* const sdpDescription = resultString; - env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n"; - - // Create a media session object from this SDP description: - scs.session = MediaSession::createNew(env, sdpDescription); - delete[] sdpDescription; // because we don't need it anymore - if (scs.session == NULL) { - env << *rtspClient - << "Failed to create a MediaSession object from the SDP description: " - << env.getResultMsg() << "\n"; - break; - } else if (!scs.session->hasSubsessions()) { - env << *rtspClient - << "This session has no media subsessions (i.e., no \"m=\" lines)\n"; - break; - } - - // Then, create and set up our data source objects for the session. We do - // this by iterating over the session's 'subsessions', calling - // "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, - // on each one. (Each 'subsession' will have its own data source.) - scs.iter = new MediaSubsessionIterator(*scs.session); - setupNextSubsession(rtspClient); - return; - } while (0); - - // An unrecoverable error occurred with this stream. - shutdownStream(rtspClient); -} - -void setupNextSubsession(RTSPClient* rtspClient) { - UsageEnvironment& env = rtspClient->envir(); // alias - StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias - - scs.subsession = scs.iter->next(); - if (scs.subsession != NULL) { - if (!scs.subsession->initiate()) { - env << *rtspClient << "Failed to initiate the \"" << *scs.subsession - << "\" subsession: " << env.getResultMsg() << "\n"; - setupNextSubsession( - rtspClient); // give up on this subsession; go to the next one - } else { - env << *rtspClient << "Initiated the \"" << *scs.subsession - << "\" subsession ("; - if (scs.subsession->rtcpIsMuxed()) { - env << "client port " << scs.subsession->clientPortNum(); - } else { - env << "client ports " << scs.subsession->clientPortNum() << "-" - << scs.subsession->clientPortNum() + 1; - } - env << ")\n"; - - // Continue setting up this subsession, by sending a RTSP "SETUP" command: - rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, - rtspUseTCP); - } - return; - } - - // We've finished setting up all of the subsessions. Now, send a RTSP "PLAY" - // command to start the streaming: - if (scs.session->absStartTime() != NULL) { - // Special case: The stream is indexed by 'absolute' time, so send an - // appropriate "PLAY" command: - rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, - scs.session->absStartTime(), - scs.session->absEndTime()); - } else { - scs.duration = scs.session->playEndTime() - scs.session->playStartTime(); - rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY); - } -} - -void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, - char* resultString) { - do { - UsageEnvironment& env = rtspClient->envir(); // alias - StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias - - if (resultCode != 0) { - env << *rtspClient << "Failed to set up the \"" << *scs.subsession - << "\" subsession: " << resultString << "\n"; - break; - } - - env << *rtspClient << "Set up the \"" << *scs.subsession - << "\" subsession ("; - if (scs.subsession->rtcpIsMuxed()) { - env << "client port " << scs.subsession->clientPortNum(); - } else { - env << "client ports " << scs.subsession->clientPortNum() << "-" - << scs.subsession->clientPortNum() + 1; - } - env << ")\n"; - - // Having successfully setup the subsession, create a data sink for it, and - // call "startPlaying()" on it. (This will prepare the data sink to receive - // data; the actual flow of data from the client won't start happening until - // later, after we've sent a RTSP "PLAY" command.) - - scs.subsession->sink = - OurSink::createNew(env, *scs.subsession, rtspClient->url()); - // perhaps use your own custom "MediaSink" subclass instead - if (scs.subsession->sink == NULL) { - env << *rtspClient << "Failed to create a data sink for the \"" - << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n"; - break; - } - - env << *rtspClient << "Created a data sink for the \"" << *scs.subsession - << "\" subsession\n"; - scs.subsession->miscPtr = - rtspClient; // a hack to let subsession handler functions get the - // "RTSPClient" from the subsession - scs.subsession->sink->startPlaying(*(scs.subsession->readSource()), - subsessionAfterPlaying, scs.subsession); - // Also set a handler to be called if a RTCP "BYE" arrives for this - // subsession: - if (scs.subsession->rtcpInstance() != NULL) { - scs.subsession->rtcpInstance()->setByeWithReasonHandler( - subsessionByeHandler, scs.subsession); - } - } while (0); - delete[] resultString; - - // Set up the next subsession, if any: - setupNextSubsession(rtspClient); -} - -void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, - char* resultString) { - Boolean success = False; - - do { - UsageEnvironment& env = rtspClient->envir(); // alias - StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias - - if (resultCode != 0) { - env << *rtspClient << "Failed to start playing session: " << resultString - << "\n"; - break; - } - - // Set a timer to be handled at the end of the stream's expected duration - // (if the stream does not already signal its end using a RTCP "BYE"). This - // is optional. If, instead, you want to keep the stream active - e.g., so - // you can later 'seek' back within it and do another RTSP "PLAY" - then you - // can omit this code. (Alternatively, if you don't want to receive the - // entire stream, you could set this timer for some shorter value.) - if (scs.duration > 0) { - unsigned const delaySlop = - 2; // number of seconds extra to delay, after the stream's expected - // duration. (This is optional.) - scs.duration += delaySlop; - unsigned uSecsToDelay = (unsigned)(scs.duration * 1000000); - scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask( - uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient); - } - - env << *rtspClient << "Started playing session"; - if (scs.duration > 0) { - env << " (for up to " << scs.duration << " seconds)"; - } - env << "...\n"; - - success = True; - } while (0); - delete[] resultString; - - if (!success) { - // An unrecoverable error occurred with this stream. - shutdownStream(rtspClient); - } -} - -// Implementation of the other event handlers: - -void subsessionAfterPlaying(void* clientData) { - MediaSubsession* subsession = (MediaSubsession*)clientData; - RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr); - - // Begin by closing this subsession's stream: - Medium::close(subsession->sink); - subsession->sink = NULL; - - // Next, check whether *all* subsessions' streams have now been closed: - MediaSession& session = subsession->parentSession(); - MediaSubsessionIterator iter(session); - while ((subsession = iter.next()) != NULL) { - if (subsession->sink != NULL) return; // this subsession is still active - } - - // All subsessions' streams have now been closed, so shutdown the client: - shutdownStream(rtspClient); -} - -void subsessionByeHandler(void* clientData, char const* reason) { - MediaSubsession* subsession = (MediaSubsession*)clientData; - RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr; - UsageEnvironment& env = rtspClient->envir(); // alias - - env << *rtspClient << "Received RTCP \"BYE\""; - if (reason != NULL) { - env << " (reason:\"" << reason << "\")"; - delete[] (char*)reason; - } - env << " on \"" << *subsession << "\" subsession\n"; - - // Now act as if the subsession had closed: - subsessionAfterPlaying(subsession); -} - -void streamTimerHandler(void* clientData) { - OurRTSPClient* rtspClient = (OurRTSPClient*)clientData; - StreamClientState& scs = rtspClient->scs; // alias - - scs.streamTimerTask = NULL; - - // Shut down the stream: - shutdownStream(rtspClient); -} - -void shutdownStream(RTSPClient* rtspClient, int exitCode) { - UsageEnvironment& env = rtspClient->envir(); // alias - StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias - - // First, check whether any subsessions have still to be closed: - if (scs.session != NULL) { - Boolean someSubsessionsWereActive = False; - MediaSubsessionIterator iter(*scs.session); - MediaSubsession* subsession; - - while ((subsession = iter.next()) != NULL) { - if (subsession->sink != NULL) { - Medium::close(subsession->sink); - subsession->sink = NULL; - - if (subsession->rtcpInstance() != NULL) { - subsession->rtcpInstance()->setByeHandler( - NULL, NULL); // in case the server sends a RTCP "BYE" while - // handling "TEARDOWN" - } - - someSubsessionsWereActive = True; - } - } - - if (someSubsessionsWereActive) { - // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the - // stream. Don't bother handling the response to the "TEARDOWN". - rtspClient->sendTeardownCommand(*scs.session, NULL); - } - } - - env << *rtspClient << "Closing the stream.\n"; - Medium::close(rtspClient); - // Note that this will also cause this stream's "StreamClientState" structure - // to get reclaimed. - - if (--rtspClientCount == 0) { - // The final stream has ended, so exit the application now. - // (Of course, if you're embedding this code into your own application, you - // might want to comment this out, and replace it with - // "rtspEventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, - // and continue running "main()".) - // exit(exitCode); - rtspEventLoopWatchVariable = 1; - return; - } -} - -// Implementation of "OurRTSPClient": - -OurRTSPClient* OurRTSPClient::createNew(UsageEnvironment& env, - char const* rtspURL, int verbosityLevel, - char const* applicationName, - portNumBits tunnelOverHTTPPortNum) { - return new OurRTSPClient(env, rtspURL, verbosityLevel, applicationName, - tunnelOverHTTPPortNum); -} - -OurRTSPClient::OurRTSPClient(UsageEnvironment& env, char const* rtspURL, - int verbosityLevel, char const* applicationName, - portNumBits tunnelOverHTTPPortNum) - : RTSPClient(env, rtspURL, verbosityLevel, applicationName, - tunnelOverHTTPPortNum, -1) {} - -OurRTSPClient::~OurRTSPClient() {} - -// Implementation of "StreamClientState": - -StreamClientState::StreamClientState() - : iter(NULL), - session(NULL), - subsession(NULL), - streamTimerTask(NULL), - duration(0.0) {} - -StreamClientState::~StreamClientState() { - delete iter; - if (session != NULL) { - // We also need to delete "session", and unschedule "streamTimerTask" (if - // set) - UsageEnvironment& env = session->envir(); // alias - - env.taskScheduler().unscheduleDelayedTask(streamTimerTask); - Medium::close(session); - } -} - -// Implementation of "OurSink": - -OurSink* OurSink::createNew(UsageEnvironment& env, - MediaSubsession& subsession, - char const* streamId) { - return new OurSink(env, subsession, streamId); -} - -OurSink::OurSink(UsageEnvironment& env, MediaSubsession& subsession, - char const* streamId) - : MediaSink(env), fSubsession(subsession) { - fStreamId = strDup(streamId); - fReceiveBuffer = new u_int8_t[rtspSinkReceiveBufferSize]; -} - -OurSink::~OurSink() { - delete[] fReceiveBuffer; - delete[] fStreamId; -} - -void OurSink::afterGettingFrame(void* clientData, unsigned frameSize, - unsigned numTruncatedBytes, - struct timeval presentationTime, - unsigned durationInMicroseconds) { - OurSink* sink = (OurSink*)clientData; - sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, - durationInMicroseconds); -} - -void OurSink::afterGettingFrame(unsigned frameSize, - unsigned numTruncatedBytes, - struct timeval presentationTime, - unsigned /*durationInMicroseconds*/) { - // We've just received a frame of data. (Optionally) print out information - // about it: -#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME - if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; "; - envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() - << ":\tReceived " << frameSize << " bytes"; - if (numTruncatedBytes > 0) - envir() << " (with " << numTruncatedBytes << " bytes truncated)"; - char uSecsStr[6 + 1]; // used to output the 'microseconds' part of the - // presentation time - snprintf(uSecsStr,7 , "%06u", (unsigned)presentationTime.tv_usec); - envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." - << uSecsStr; - if (fSubsession.rtpSource() != NULL && - !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) { - envir() << "!"; // mark the debugging output to indicate that this - // presentation time is not RTCP-synchronized - } -#ifdef DEBUG_PRINT_NPT - envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime); -#endif - envir() << "\n"; -#endif - - // Decode the data - if (rtspOutput) { - size_t writtenSize = rtspOutput->write(fReceiveBuffer, frameSize); - assert(writtenSize == frameSize); - } - - // Then continue, to request the next frame of data: - continuePlaying(); -} - -Boolean OurSink::continuePlaying() { - if (fSource == NULL) return False; // sanity check (should not happen) - - // Request the next frame of data from our input source. "afterGettingFrame()" - // will get called later, when it arrives: - fSource->getNextFrame(fReceiveBuffer, rtspSinkReceiveBufferSize, - afterGettingFrame, this, onSourceClosure, this); - return True; -} - -} // namespace audiotools_rtsp \ No newline at end of file +#WARNING("Obsolete: Use AudioTools/Communication/AudioClientRTSP555.h") +#include "AudioTools/Communication/AudioClientRTSP555.h" \ No newline at end of file diff --git a/src/AudioTools/AudioLibs/HLSStream.h b/src/AudioTools/AudioLibs/HLSStream.h index 50aff75668..1468453e52 100644 --- a/src/AudioTools/AudioLibs/HLSStream.h +++ b/src/AudioTools/AudioLibs/HLSStream.h @@ -1,781 +1,3 @@ #pragma once -#include "AudioTools/AudioCodecs/AudioEncoded.h" -#include "AudioTools/CoreAudio/AudioBasic/Str.h" -#include "AudioTools/CoreAudio/AudioHttp/URLStream.h" -#include "AudioTools/CoreAudio/StreamCopy.h" -#include "AudioToolsConfig.h" - -#define MAX_HLS_LINE 512 -#define START_URLS_LIMIT 4 -#define HLS_BUFFER_COUNT 2 -#define HLS_MAX_NO_READ 2 -#define HLS_MAX_URL_LEN 256 -#define HLS_TIMEOUT 5000 -#define HLS_UNDER_OVERFLOW_WAIT_TIME 10 - -/// hide hls implementation in it's own namespace - -namespace audio_tools_hls { - -/*** - * @brief We feed the URLLoaderHLS with some url strings. The data of the - * related segments are provided via the readBytes() method. - * @author Phil Schatzmann - * @copyright GPLv3 - */ - -template -class URLLoaderHLS { - public: - URLLoaderHLS() = default; - - ~URLLoaderHLS() { end(); } - - bool begin() { - TRACED(); - buffer.resize(buffer_size * buffer_count); - - active = true; - return true; - } - - void end() { - TRACED(); - url_stream.end(); - buffer.clear(); - active = false; - } - - /// Adds the next url to be played in sequence - void addUrl(const char *url) { - LOGI("Adding %s", url); - StrView url_str(url); - char *str = new char[url_str.length() + 1]; - memcpy(str, url_str.c_str(), url_str.length() + 1); - urls.push_back((const char *)str); - } - - /// Provides the number of open urls which can be played. Refills them, when - /// min limit is reached. - int urlCount() { return urls.size(); } - - /// Available bytes of the audio stream - int available() { - if (!active) return 0; - TRACED(); - bufferRefill(); - - return buffer.available(); - } - - /// Provides data from the audio stream - size_t readBytes(uint8_t *data, size_t len) { - if (!active) return 0; - TRACED(); - bufferRefill(); - - if (buffer.available() < len) LOGW("Buffer underflow"); - return buffer.readArray(data, len); - } - - const char *contentType() { - return url_stream.httpRequest().reply().get(CONTENT_TYPE); - } - - int contentLength() { return url_stream.contentLength(); } - - void setBufferSize(int size, int count) { - buffer_size = size; - buffer_count = count; - // support call after begin()! - if (buffer.size() != 0) { - buffer.resize(buffer_size * buffer_count); - } - } - - void setCACert(const char *cert) { url_stream.setCACert(cert); } - - protected: - Vector urls{10}; - RingBuffer buffer{0}; - bool active = false; - int buffer_size = DEFAULT_BUFFER_SIZE; - int buffer_count = HLS_BUFFER_COUNT; - URLStream url_stream; - const char *url_to_play = nullptr; - - /// try to keep the buffer filled - void bufferRefill() { - TRACED(); - // we have nothing to do - if (urls.empty()) { - LOGD("urls empty"); - delay(HLS_UNDER_OVERFLOW_WAIT_TIME); - return; - } - if (buffer.availableForWrite() == 0) { - LOGD("buffer full"); - delay(HLS_UNDER_OVERFLOW_WAIT_TIME); - return; - } - - // switch current stream if we have no more data - if (!url_stream && !urls.empty()) { - LOGD("Refilling"); - if (url_to_play != nullptr) { - delete url_to_play; - } - url_to_play = urls[0]; - LOGI("playing %s", url_to_play); - url_stream.end(); - url_stream.setConnectionClose(true); - url_stream.setTimeout(HLS_TIMEOUT); - url_stream.begin(url_to_play); - url_stream.waitForData(HLS_TIMEOUT); - urls.pop_front(); - // assert(urls[0]!=url); - - LOGI("Playing %s of %d", url_stream.urlStr(), (int)urls.size()); - } - - int total = 0; - int failed = 0; - int to_write = min(buffer.availableForWrite(), DEFAULT_BUFFER_SIZE); - // try to keep the buffer filled - while (to_write > 0) { - uint8_t tmp[to_write]; - memset(tmp, 0, to_write); - int read = url_stream.readBytes(tmp, to_write); - total += read; - if (read > 0) { - failed = 0; - buffer.writeArray(tmp, read); - LOGD("buffer add %d -> %d:", read, buffer.available()); - - to_write = min(buffer.availableForWrite(), DEFAULT_BUFFER_SIZE); - } else { - delay(10); - } - // After we processed all data we close the stream to get a new url - if (url_stream.totalRead() == url_stream.contentLength()) { - LOGI("Closing stream because all bytes were processed: available: %d", - url_stream.available()); - url_stream.end(); - break; - } - LOGD("Refilled with %d now %d available to write", total, - buffer.availableForWrite()); - } - } -}; - -/** - * Prevent that the same url is loaded twice. We limit the history to - * 20 entries. - */ -class URLHistory { - public: - bool add(const char *url) { - if (url == nullptr) return true; - bool found = false; - StrView url_str(url); - for (int j = 0; j < history.size(); j++) { - if (url_str.equals(history[j])) { - found = true; - break; - } - } - if (!found) { - char *str = new char[url_str.length() + 1]; - memcpy(str, url, url_str.length() + 1); - history.push_back((const char *)str); - if (history.size() > 20) { - delete (history[0]); - history.pop_front(); - } - } - return !found; - } - - void clear() { history.clear(); } - - int size() { return history.size(); } - - protected: - Vector history; -}; - -/** - * @brief Simple Parser for HLS data. - * @author Phil Schatzmann - * @copyright GPLv3 - */ -template -class HLSParser { - public: - // loads the index url - bool begin(const char *urlStr) { - index_url_str = urlStr; - return begin(); - } - - bool begin() { - TRACEI(); - segments_url_str = ""; - bandwidth = 0; - total_read = 0; - - if (!parseIndex()) { - TRACEE(); - return false; - } - - // in some exceptional cases the index provided segement info - if (url_loader.urlCount() == 0) { - if (!parseSegments()) { - TRACEE(); - return false; - } - } else { - segments_url_str = index_url_str; - segmentsActivate(); - } - - if (!url_loader.begin()) { - TRACEE(); - return false; - } - - return true; - } - - int available() { - TRACED(); - int result = 0; - reloadSegments(); - - if (active) result = url_loader.available(); - return result; - } - - size_t readBytes(uint8_t *data, size_t len) { - TRACED(); - size_t result = 0; - reloadSegments(); - - if (active) result = url_loader.readBytes(data, len); - total_read += result; - return result; - } - - const char *indexUrl() { return index_url_str; } - - const char *segmentsUrl() { return segments_url_str.c_str(); } - - /// Provides the codec - const char *getCodec() { return codec.c_str(); } - - /// Provides the content type of the audio data - const char *contentType() { return url_loader.contentType(); } - - /// Provides the http content lengh - int contentLength() { return url_loader.contentLength(); } - - /// Closes the processing - void end() { - TRACEI(); - codec.clear(); - segments_url_str.clear(); - url_stream.end(); - url_loader.end(); - url_history.clear(); - active = false; - } - - /// Defines the number of urls that are preloaded in the URLLoaderHLS - void setUrlCount(int count) { url_count = count; } - - /// Redefines the buffer size - void setBufferSize(int size, int count) { - url_loader.setBufferSize(size, count); - } - - void setCACert(const char *cert) { - url_stream.setCACert(cert); - url_loader.setCACert(cert); - } - - void setPowerSave(bool flag) { url_stream.setPowerSave(flag); } - - void setURLResolver(const char *(*cb)(const char *segment, - const char *reqURL)) { - resolve_url = cb; - } - /// Provides the hls url as string - const char *urlStr() { return url_str.c_str(); } - - /// Povides the number of bytes read - size_t totalRead() { return total_read; }; - - protected: - enum class URLType { Undefined, Index, Segment }; - URLType next_url_type = URLType::Undefined; - int bandwidth = 0; - int url_count = 5; - size_t total_read = 0; - bool url_active = false; - bool is_extm3u = false; - Str codec; - Str segments_url_str; - Str url_str; - const char *index_url_str = nullptr; - URLStream url_stream; - URLLoaderHLS url_loader; - URLHistory url_history; - bool active = false; - bool parse_segments_active = false; - int media_sequence = 0; - int segment_count = 0; - uint64_t next_sement_load_time_planned = 0; - float play_time = 0; - uint64_t next_sement_load_time = 0; - const char *(*resolve_url)(const char *segment, - const char *reqURL) = resolveURL; - - /// Default implementation for url resolver: determine absolue url from - /// relative url - static const char *resolveURL(const char *segment, const char *reqURL) { - // avoid dynamic memory allocation - static char result[HLS_MAX_URL_LEN] = {0}; - StrView result_str(result, HLS_MAX_URL_LEN); - StrView index_url(reqURL); - // Use prefix up to ? or laast / - int end = index_url.lastIndexOf("?"); - if (end >= 0) { - result_str.substring(reqURL, 0, end); - } else { - end = index_url.lastIndexOf("/"); - if (end >= 0) { - result_str.substring(reqURL, 0, end); - } - } - // Use the full url - if (result_str.isEmpty()) { - result_str = reqURL; - } - // add trailing / - if (!result_str.endsWith("/")) { - result_str.add("/"); - } - // add relative segment - result_str.add(segment); - LOGI(">> relative addr: %s for %s", segment, reqURL); - LOGD(">> -> %s", result); - return result; - } - - /// trigger the reloading of segments if the limit is underflowing - void reloadSegments() { - TRACED(); - // get new urls - if (!segments_url_str.isEmpty()) { - parseSegments(); - } - } - - /// parse the index file and the segments - bool parseIndex() { - TRACED(); - url_stream.end(); - url_stream.setTimeout(HLS_TIMEOUT); - url_stream.setConnectionClose(true); - if (!url_stream.begin(index_url_str)) return false; - url_active = true; - return parseIndexLines(); - } - - /// parse the index file - bool parseIndexLines() { - TRACEI(); - char tmp[MAX_HLS_LINE]; - bool result = true; - is_extm3u = false; - - // parse lines - memset(tmp, 0, MAX_HLS_LINE); - while (true) { - memset(tmp, 0, MAX_HLS_LINE); - size_t len = - url_stream.httpRequest().readBytesUntil('\n', tmp, MAX_HLS_LINE); - // stop when there is no more data - if (len == 0 && url_stream.available() == 0) break; - StrView str(tmp); - - // check header - if (str.startsWith("#EXTM3U")) { - is_extm3u = true; - // reset timings - resetTimings(); - } - - if (is_extm3u) { - if (!parseIndexLine(str)) { - return false; - } - } - } - return result; - } - - /// Determine codec for min bandwidth - bool parseIndexLine(StrView &str) { - TRACED(); - LOGI("> %s", str.c_str()); - parseIndexLineMetaData(str); - // in some exceptional cases the index provided segement info - parseSegmentLineMetaData(str); - parseLineURL(str); - return true; - } - - bool parseIndexLineMetaData(StrView &str) { - int tmp_bandwidth; - if (str.startsWith("#")) { - if (str.indexOf("EXT-X-STREAM-INF") >= 0) { - next_url_type = URLType::Index; - // determine min bandwidth - int pos = str.indexOf("BANDWIDTH="); - if (pos > 0) { - StrView num(str.c_str() + pos + 10); - tmp_bandwidth = num.toInt(); - url_active = (tmp_bandwidth < bandwidth || bandwidth == 0); - if (url_active) { - bandwidth = tmp_bandwidth; - LOGD("-> bandwith: %d", bandwidth); - } - } - - pos = str.indexOf("CODECS="); - if (pos > 0) { - int start = pos + 8; - int end = str.indexOf('"', pos + 10); - codec.substring(str, start, end); - LOGI("-> codec: %s", codec.c_str()); - } - } - } - return true; - } - - void resetTimings() { - next_sement_load_time_planned = millis(); - play_time = 0; - next_sement_load_time = 0xFFFFFFFFFFFFFFFF; - } - - /// parse the segment url provided by the index - bool parseSegments() { - TRACED(); - if (parse_segments_active) { - return false; - } - - // make sure that we load at relevant schedule - if (millis() < next_sement_load_time && url_loader.urlCount() > 1) { - delay(1); - return false; - } - parse_segments_active = true; - - LOGI("Available urls: %d", url_loader.urlCount()); - - if (url_stream) url_stream.clear(); - LOGI("parsing %s", segments_url_str.c_str()); - - if (segments_url_str.isEmpty()) { - TRACEE(); - parse_segments_active = false; - return false; - } - - if (!url_stream.begin(segments_url_str.c_str())) { - TRACEE(); - parse_segments_active = false; - return false; - } - - segment_count = 0; - if (!parseSegmentLines()) { - TRACEE(); - parse_segments_active = false; - // do not display as error - return true; - } - - segmentsActivate(); - return true; - } - - void segmentsActivate() { - LOGI("Reloading in %f sec", play_time / 1000.0); - if (play_time > 0) { - next_sement_load_time = next_sement_load_time_planned + play_time; - } - - // we request a minimum of collected urls to play before we start - if (url_history.size() > START_URLS_LIMIT) active = true; - parse_segments_active = false; - } - - /// parse the segments - bool parseSegmentLines() { - TRACEI(); - char tmp[MAX_HLS_LINE]; - bool result = true; - is_extm3u = false; - - // parse lines - memset(tmp, 0, MAX_HLS_LINE); - while (true) { - memset(tmp, 0, MAX_HLS_LINE); - size_t len = - url_stream.httpRequest().readBytesUntil('\n', tmp, MAX_HLS_LINE); - if (len == 0 && url_stream.available() == 0) break; - StrView str(tmp); - - // check header - if (str.startsWith("#EXTM3U")) { - is_extm3u = true; - resetTimings(); - } - - if (is_extm3u) { - if (!parseSegmentLine(str)) { - return false; - } - } - } - return result; - } - - /// Add all segments to queue - bool parseSegmentLine(StrView &str) { - TRACED(); - LOGI("> %s", str.c_str()); - if (!parseSegmentLineMetaData(str)) return false; - parseLineURL(str); - return true; - } - - bool parseSegmentLineMetaData(StrView &str) { - if (str.startsWith("#")) { - if (str.startsWith("#EXT-X-MEDIA-SEQUENCE:")) { - int new_media_sequence = atoi(str.c_str() + 22); - LOGI("media_sequence: %d", new_media_sequence); - if (new_media_sequence == media_sequence) { - LOGW("MEDIA-SEQUENCE already loaded: %d", media_sequence); - return false; - } - media_sequence = new_media_sequence; - } - - // add play time to next_sement_load_time_planned - if (str.startsWith("#EXTINF")) { - next_url_type = URLType::Segment; - StrView sec_str(str.c_str() + 8); - float sec = sec_str.toFloat(); - LOGI("adding play time: %f sec", sec); - play_time += (sec * 1000.0); - } - } - return true; - } - - bool parseLineURL(StrView &str) { - if (!str.startsWith("#")) { - switch (next_url_type) { - case URLType::Undefined: - // we should not get here - assert(false); - break; - case URLType::Index: - if (str.startsWith("http")) { - segments_url_str.set(str); - } else { - segments_url_str.set(resolve_url(str.c_str(), index_url_str)); - } - LOGD("segments_url_str = %s", segments_url_str.c_str()); - break; - case URLType::Segment: - segment_count++; - if (url_history.add(str.c_str())) { - // provide audio urls to the url_loader - if (str.startsWith("http")) { - url_str = str; - } else { - // we create the complete url - url_str = resolve_url(str.c_str(), index_url_str); - } - url_loader.addUrl(url_str.c_str()); - } else { - LOGD("Duplicate ignored: %s", str.c_str()); - } - } - // clear url type - next_url_type = URLType::Undefined; - } - return true; - } -}; - -} // namespace audio_tools_hls - -namespace audio_tools { -/** - * @brief HTTP Live Streaming using HLS: The resulting .ts data is provided - * via readBytes() that dynamically reload new Segments. Please note that - * this reloading adds a considerable delay: So if you want to play back the - * audio, you should buffer the content in a seaparate task. - * - * @author Phil Schatzmann - * @ingroup http *@copyright GPLv3 - */ - -template -class HLSStreamT : public AbstractURLStream { - public: - /// Empty constructor - HLSStreamT() = default; - - /// Convenience constructor which logs in to the WiFi - HLSStreamT(const char *ssid, const char *password) { - setSSID(ssid); - setPassword(password); - } - - /// Open an HLS url - bool begin(const char *urlStr) { - TRACEI(); - login(); - // parse the url to the HLS - bool rc = parser.begin(urlStr); - return rc; - } - - /// Reopens the last url - bool begin() override { - TRACEI(); - login(); - bool rc = parser.begin(); - return rc; - } - - /// ends the request - void end() override { parser.end(); } - - /// Sets the ssid that will be used for logging in (when calling begin) - void setSSID(const char *ssid) override { this->ssid = ssid; } - - /// Sets the password that will be used for logging in (when calling begin) - void setPassword(const char *password) override { this->password = password; } - - /// Returns the string representation of the codec of the audio stream - const char *codec() { return parser.getCodec(); } - - /// Provides the content type from the http reply - const char *contentType() { return parser.contentType(); } - - /// Provides the content length of the actual .ts Segment - int contentLength() override { return parser.contentLength(); } - - /// Provides number of available bytes in the read buffer - int available() override { - TRACED(); - return parser.available(); - } - - /// Provides the data fro the next .ts Segment - size_t readBytes(uint8_t *data, size_t len) override { - TRACED(); - return parser.readBytes(data, len); - } - - /// Redefines the read buffer size - void setBufferSize(int size, int count) { parser.setBufferSize(size, count); } - - /// Defines the certificate - void setCACert(const char *cert) override { parser.setCACert(cert); } - - /// Changes the Wifi to power saving mode - void setPowerSave(bool flag) override { parser.setPowerSave(flag); } - - /// Custom logic to provide the codec as Content-Type to support the - /// MultiCodec - const char *getReplyHeader(const char *header) override { - const char *codec = parser.getCodec(); - const char *result = nullptr; - if (StrView(header).equalsIgnoreCase(CONTENT_TYPE)) { - result = parser.contentType(); - } - if (result) LOGI("-> Format: %s", result); - return result; - } - - /// The resolving of relative addresses can be quite tricky: you can provide - /// your custom resolver implementation - void setURLResolver(const char *(*cb)(const char *segment, - const char *reqURL)) { - parser.setURLResolver(cb); - } - - const char *urlStr() override { return parser.urlStr(); } - - size_t totalRead() override { return parser.totalRead(); }; - /// not implemented - void setConnectionClose(bool flag) override {}; - /// not implemented - bool waitForData(int timeout) override { return false; } - - protected: - audio_tools_hls::HLSParser parser; - const char *ssid = nullptr; - const char *password = nullptr; - - void login() { -#ifdef USE_WIFI - if (ssid != nullptr && password != nullptr && - WiFi.status() != WL_CONNECTED) { - TRACED(); - delay(1000); - WiFi.begin(ssid, password); - while (WiFi.status() != WL_CONNECTED) { - Serial.print("."); - delay(500); - } - } -#else - LOGW("login not supported"); -#endif - } - - /// Added to comply with AbstractURLStream - bool begin(const char *urlStr, const char *acceptMime, MethodID action = GET, - const char *reqMime = "", const char *reqData = "") override { - return begin(urlStr); - } - - HttpRequest &httpRequest() override { - static HttpRequest dummy; - return dummy; - } - - /// Not implemented: potential future improvement - void setClient(Client &clientPar) override {} - - /// Not implemented - void addRequestHeader(const char *header, const char *value) override {} -}; - -using HLSStream = HLSStreamT; - -} // namespace audio_tools +#WARNING("Obsolete - use /AudioTools/Communication/HLSStream.h") +#include "AudioTools/Communication/HLSStream.h" \ No newline at end of file diff --git a/src/AudioTools/AudioLibs/README.md b/src/AudioTools/AudioLibs/README.md index 1701c98238..4d3a6dba4b 100644 --- a/src/AudioTools/AudioLibs/README.md +++ b/src/AudioTools/AudioLibs/README.md @@ -1,2 +1,2 @@ -Integration to different external audio libraries \ No newline at end of file +Integration to different optonal external audio libraries \ No newline at end of file diff --git a/src/AudioTools/AudioLibs/VBANStream.h b/src/AudioTools/AudioLibs/VBANStream.h index 6ff5ceacfb..65ef0a6bf2 100644 --- a/src/AudioTools/AudioLibs/VBANStream.h +++ b/src/AudioTools/AudioLibs/VBANStream.h @@ -1,592 +1,3 @@ - -#include -#include - -#include "AudioTools/AudioLibs/vban/vban.h" -#include "AudioTools/CoreAudio/AudioStreams.h" -#include "AudioTools/Concurrency/RTOS/BufferRTOS.h" - -namespace audio_tools { - -class VBANConfig : public AudioInfo { - public: - VBANConfig() { - sample_rate = 11025; - channels = 1; - bits_per_sample = 16; - } - RxTxMode mode; - /// name of the stream - const char* stream_name = "Stream1"; - /// default port is 6980 - uint16_t udp_port = 6980; - /// Use {0,0,0,0}; as broadcast address - IPAddress target_ip{0, 0, 0, 0}; - /// ssid for wifi connection - const char* ssid = nullptr; - /// password for wifi connection - const char* password = nullptr; - int rx_buffer_count = 30; - // set to true if samples are generated faster then sample rate - bool throttle_active = false; - // when negative the number of ms that are subtracted from the calculated wait - // time to fine tune Overload and Underruns - int throttle_correction_us = 0; - // defines the max write size - int max_write_size = - DEFAULT_BUFFER_SIZE * 2; // just good enough for 44100 stereo - uint8_t format = 0; - - //reply for discovery packet - uint32_t device_flags = 0x00000001; // default: receiver only - uint32_t bitfeature = 0x00000001; // default: audio only - uint32_t device_color = 0x00FF00; // green default - //const char* stream_name_reply = "VBAN SPOT PING"; - const char* device_name = nullptr; // nullptr means use MAC by default - const char* manufacturer_name = "ESP32 AudioTools"; - const char* application_name = "VBAN Streamer"; - const char* host_name = nullptr; // will fallback to WiFi.getHostname() - const char* user_name = "User"; - const char* user_comment = "ESP32 VBAN Audio Device"; -}; - -/** - * @brief VBAN Audio Source and Sink for the ESP32. For further details please - * see https://vb-audio.com/Voicemeeter/vban.htm . - * Inspired by https://github.com/rkinnett/ESP32-VBAN-Audio-Source/tree/master - * and https://github.com/rkinnett/ESP32-VBAN-Network-Audio-Player - * @ingroup communications - * @author Phil Schatzmann - * @copyright GPLv3 - */ - -class VBANStream : public AudioStream { - public: - VBANConfig defaultConfig(RxTxMode mode = TX_MODE) { - VBANConfig def; - def.mode = mode; - return def; - } - - void setOutput(Print &out){ - p_out = &out; - } - - void setAudioInfo(AudioInfo info) override { - cfg.copyFrom(info); - AudioStream::setAudioInfo(info); - auto thc = throttle.defaultConfig(); - thc.copyFrom(info); - thc.correction_us = cfg.throttle_correction_us; - throttle.begin(thc); - if (cfg.mode == TX_MODE) { - configure_tx(); - } - } - - bool begin(VBANConfig cfg) { - this->cfg = cfg; - setAudioInfo(cfg); - return begin(); - } - - bool begin() { - if (cfg.mode == TX_MODE) { - if (cfg.bits_per_sample != 16) { - LOGE("Only 16 bits supported") - return false; - } - tx_buffer.resize(VBAN_PACKET_NUM_SAMPLES); - return begin_tx(); - } else { -#ifdef ESP32 - rx_buffer.resize(DEFAULT_BUFFER_SIZE * cfg.rx_buffer_count); - rx_buffer.setReadMaxWait(10); -#else - rx_buffer.resize(DEFAULT_BUFFER_SIZE, cfg.rx_buffer_count); -#endif - return begin_rx(); - } - } - - size_t write(const uint8_t* data, size_t len) override { - if (!udp_connected) return 0; - - int16_t* adc_data = (int16_t*)data; - size_t samples = len / (cfg.bits_per_sample/8); - - // limit output speed - if (cfg.throttle_active) { - throttle.delayFrames(samples / cfg.channels); - } - - for (int j = 0; j < samples; j++) { - tx_buffer.write(adc_data[j]); - if (tx_buffer.availableForWrite() == 0) { - memcpy(vban.data_frame, tx_buffer.data(), vban.packet_data_bytes); - *vban.packet_counter = packet_counter; // increment packet counter - // Send packet - if (cfg.target_ip == broadcast_address) { - udp.broadcastTo((uint8_t*)&vban.packet, vban.packet_total_bytes, - cfg.udp_port); - } else { - udp.writeTo((uint8_t*)&vban.packet, vban.packet_total_bytes, - cfg.target_ip, cfg.udp_port); - } - // defile delay start time - packet_counter++; - tx_buffer.reset(); - } - } - return len; - } - - int availableForWrite() { return cfg.max_write_size; } - - size_t readBytes(uint8_t* data, size_t len) override { - TRACED(); - size_t samples = len / (cfg.bits_per_sample/8); - if (cfg.throttle_active) { - throttle.delayFrames(samples / cfg.channels); - } - return rx_buffer.readArray(data, len); - } - - int available() { return available_active ? rx_buffer.available() : 0; } - - protected: - const IPAddress broadcast_address{0, 0, 0, 0}; - AsyncUDP udp; - VBan vban; - VBANConfig cfg; - SingleBuffer tx_buffer{0}; - #ifdef ESP32 - BufferRTOS rx_buffer{ 0}; - #else - NBuffer rx_buffer{DEFAULT_BUFFER_SIZE, 0}; - #endif - bool udp_connected = false; - uint32_t packet_counter = 0; - Throttle throttle; - size_t bytes_received = 0; - bool available_active = false; - Print *p_out = nullptr; - - bool begin_tx() { - if (!configure_tx()) { - return false; - } - start_wifi(); - if (WiFi.status() != WL_CONNECTED) { - LOGE("Wifi not connected"); - return false; - } - WiFi.setSleep(false); - IPAddress myIP = WiFi.localIP(); - udp_connected = udp.connect(myIP, cfg.udp_port); - return udp_connected; - } - - bool begin_rx() { - start_wifi(); - if (WiFi.status() != WL_CONNECTED) { - LOGE("Wifi not connected"); - return false; - } - WiFi.setSleep(false); - bytes_received = 0; - this->available_active = false; - // connect to target - if (!udp.listen(cfg.udp_port)) { - LOGE("Could not connect to '%s:%d' target", toString(cfg.target_ip), - cfg.udp_port); - } - // handle data - udp.onPacket([this](AsyncUDPPacket packet) { receive_udp(packet); }); - - return true; - } - - bool configure_tx() { - int rate = vban_sample_rate(); - if (rate < 0) { - LOGE("Invalid sample rate: %d", cfg.sample_rate); - return false; - } - configure_vban((VBanSampleRates)rate); - return true; - } - - void start_wifi() { - if (cfg.ssid == nullptr) return; - if (cfg.password == nullptr) return; - LOGI("ssid %s", cfg.ssid); - // Setup Wifi: - WiFi.begin(cfg.ssid, cfg.password); // Connect to your WiFi router - while (WiFi.status() != WL_CONNECTED) { // Wait for connection - delay(500); - Serial.print("."); - } - Serial.println(); - - LOGI("Wifi connected to IP (%d.%d.%d.%d)", WiFi.localIP()[0], - WiFi.localIP()[1], WiFi.localIP()[2], WiFi.localIP()[3]); - } - - void configure_vban(VBanSampleRates rate) { - // Set vban packet header, counter, and data frame pointers to respective - // parts of packet: - vban.hdr = (VBanHeader*)&vban.packet[0]; - vban.packet_counter = (uint32_t*)&vban.packet[VBAN_PACKET_HEADER_BYTES]; - vban.data_frame = - (uint8_t*)&vban - .packet[VBAN_PACKET_HEADER_BYTES + VBAN_PACKET_COUNTER_BYTES]; - - // Setup the packet header: - strncpy(vban.hdr->preamble, "VBAN", 4); - vban.hdr->sample_rate = - static_cast(VBAN_PROTOCOL_AUDIO) | - rate; // 11025 Hz, which matches default sample rate for soundmodem - vban.hdr->num_samples = - (VBAN_PACKET_NUM_SAMPLES / cfg.channels) - 1; // 255 = 256 samples - vban.hdr->num_channels = cfg.channels - 1; // 0 = 1 channel - vban.hdr->sample_format = - static_cast(VBAN_BITFMT_16_INT) | VBAN_CODEC_PCM; // int16 PCM - strncpy(vban.hdr->stream_name, cfg.stream_name, - min((int)strlen(cfg.stream_name), VBAN_STREAM_NAME_SIZE)); - - vban.packet_data_bytes = - (vban.hdr->num_samples + 1) * (vban.hdr->num_channels + 1) * - ((vban.hdr->sample_format & VBAN_BIT_RESOLUTION_MASK) + 1); - vban.packet_total_bytes = vban.packet_data_bytes + - VBAN_PACKET_HEADER_BYTES + - VBAN_PACKET_COUNTER_BYTES; - } - - int vban_sample_rate() { - int result = -1; - switch (cfg.sample_rate) { - case 6000: - result = SAMPLE_RATE_6000_HZ; - break; - case 12000: - result = SAMPLE_RATE_12000_HZ; - break; - case 24000: - result = SAMPLE_RATE_24000_HZ; - break; - case 48000: - result = SAMPLE_RATE_48000_HZ; - break; - case 96000: - result = SAMPLE_RATE_96000_HZ; - break; - case 192000: - result = SAMPLE_RATE_192000_HZ; - break; - case 384000: - result = SAMPLE_RATE_384000_HZ; - break; - case 8000: - result = SAMPLE_RATE_8000_HZ; - break; - case 16000: - result = SAMPLE_RATE_16000_HZ; - break; - case 32000: - result = SAMPLE_RATE_32000_HZ; - break; - case 64000: - result = SAMPLE_RATE_64000_HZ; - break; - case 128000: - result = SAMPLE_RATE_128000_HZ; - break; - case 256000: - result = SAMPLE_RATE_256000_HZ; - break; - case 512000: - result = SAMPLE_RATE_512000_HZ; - break; - case 11025: - result = SAMPLE_RATE_11025_HZ; - break; - case 22050: - result = SAMPLE_RATE_22050_HZ; - break; - case 44100: - result = SAMPLE_RATE_44100_HZ; - break; - case 88200: - result = SAMPLE_RATE_88200_HZ; - break; - case 176400: - result = SAMPLE_RATE_176400_HZ; - break; - case 352800: - result = SAMPLE_RATE_352800_HZ; - break; - case 705600: - result = SAMPLE_RATE_705600_HZ; - break; - } - return result; - } - - const char* toString(IPAddress adr) { - static char str[11] = {0}; - snprintf(str, 11, "%d.%d.%d.%d", adr[0], adr[1], adr[2], adr[3]); - return str; - } - - /** - * @brief VBAN adjusts the number of samples per packet according to sample - *rate. Assuming 16-bit PCM mono, sample rates 11025, 22050, 44100, and 88200 - *yield packets containing 64, 128, 256, and 256 samples per packet, - *respectively. The even-thousands sample rates below 48000 yield - *non-power-of-2 lengths. For example, sample rate 24000 yields 139 samples - *per packet. This VBAN->DMA->DAC method seems to require the dma buffer - *length be set equal to the number of samples in each VBAN packet. ESP32 - *I2S/DMA does not seem to handle non-power-of-2 buffer lengths well. Sample - *rate 24000 doesn't work reliably at all. Sample rate 32000 is stable but - *stutters. Recommend selecting from sample rates 11025, 22050, 44100, and - *above And set samplesPerPacket to 64 for 11025, 128 for 22050, or 256 for - *all else. - **/ - - void receive_udp(AsyncUDPPacket& packet) { - uint16_t vban_rx_data_bytes, vban_rx_sample_count; - int16_t* vban_rx_data; - uint32_t* vban_rx_pkt_nbr; - uint16_t outBuf[VBAN_PACKET_MAX_SAMPLES + 1]; - size_t bytesOut; - - int len = packet.length(); - if (len > 0) { - LOGD("receive_udp %d", len); - uint8_t* udpIncomingPacket = packet.data(); - - // receive incoming UDP packet - // Check if packet length meets VBAN specification: - if (len < VBAN_PACKET_HEADER_BYTES) { - LOGE("Too short to be VBAN (%u bytes)", len); - return; - } - - // Check if preamble matches VBAN format: - if (strncmp("VBAN", (const char*)udpIncomingPacket, 4) != 0) { - LOGE("Unrecognized preamble %.4s", udpIncomingPacket); - return; - } - - uint8_t protocol = udpIncomingPacket[4] & VBAN_PROTOCOL_MASK; - - if (protocol == VBAN_PROTOCOL_SERVICE) { - // Allow up to ~1024 bytes for service packets like Ping0 - if (len > 1024) { - LOGE("Service packet length invalid: %u bytes", len); - return; - } - } else { - // Audio, serial, etc - if (len <= (VBAN_PACKET_HEADER_BYTES + VBAN_PACKET_COUNTER_BYTES) || len > VBAN_PACKET_MAX_LEN_BYTES) { - LOGE("Audio/other packet length invalid: %u bytes", len); - rx_buffer.reset(); - return; - } - } - - //LOGI("VBAN format byte: 0x%02X", udpIncomingPacket[7]); - //LOGD("VBAN protocol mask applied: 0x%02X", udpIncomingPacket[7] & VBAN_PROTOCOL_MASK); - //Serial.printf("Header[7] = 0x%02X\n", udpIncomingPacket[7]); - - - //------------------------------------------------------------------------- - //SUPPORT PING REQUEST - if ( protocol == VBAN_PROTOCOL_SERVICE ) { - - uint8_t service_type = udpIncomingPacket[5]; - uint8_t service_fnct = udpIncomingPacket[6]; - - if (service_type == VBAN_SERVICE_IDENTIFICATION) { - bool isReply = (service_fnct & VBAN_SERVICE_FNCT_REPLY) != 0; - uint8_t function = service_fnct & 0x7F; - - if (!isReply && function == 0) { - LOGI("Received VBAN PING0 request"); - sendVbanPing0Reply(packet); - } - } - return; - } - //-------------------------------------------------------------------------- - - vban_rx_data_bytes = - len - (VBAN_PACKET_HEADER_BYTES + VBAN_PACKET_COUNTER_BYTES); - vban_rx_pkt_nbr = (uint32_t*)&udpIncomingPacket[VBAN_PACKET_HEADER_BYTES]; - vban_rx_data = (int16_t*)&udpIncomingPacket[VBAN_PACKET_HEADER_BYTES + - VBAN_PACKET_COUNTER_BYTES]; - vban_rx_sample_count = vban_rx_data_bytes / (cfg.bits_per_sample / 8); - uint8_t vbanSampleRateIdx = udpIncomingPacket[4] & VBAN_SR_MASK; - uint8_t vbchannels = udpIncomingPacket[6] + 1; - uint8_t vbframes = udpIncomingPacket[5] + 1; - uint8_t vbformat = udpIncomingPacket[7] & VBAN_PROTOCOL_MASK; - uint8_t vbformat_bits = udpIncomingPacket[7] & VBAN_BIT_RESOLUTION_MASK; - uint32_t vbanSampleRate = VBanSRList[vbanSampleRateIdx]; - - //LOGD("sample_count: %d - frames: %d", vban_rx_sample_count, vbframes); - //assert (vban_rx_sample_count == vbframes*vbchannels); - - // E.g. do not process any text - if (vbformat != cfg.format){ - LOGE("Format ignored: 0x%x", vbformat); - return; - } - - // Currently we support only 16 bits. - if (vbformat_bits != VBAN_BITFMT_16_INT){ - LOGE("Format only 16 bits supported"); - return; - } - - // Just to be safe, re-check sample count against max sample count to - // avoid overrunning outBuf later - if (vban_rx_sample_count > VBAN_PACKET_MAX_SAMPLES) { - LOGE("unexpected packet size: %u", vban_rx_sample_count); - return; - } - - // update sample rate - if (cfg.sample_rate != vbanSampleRate || cfg.channels != vbchannels) { - // update audio info - cfg.sample_rate = vbanSampleRate; - cfg.channels = vbchannels; - setAudioInfo(cfg); - // remove any buffered data - rx_buffer.reset(); - available_active = false; - } - - if (p_out!=nullptr){ - int size_written = p_out->write((uint8_t*)vban_rx_data, vban_rx_data_bytes); - if (size_written != vban_rx_data_bytes) { - LOGE("buffer overflow %d -> %d", vban_rx_data_bytes, size_written); - } - return; - } - - // write data to buffer - int size_written = rx_buffer.writeArray((uint8_t*)vban_rx_data, vban_rx_data_bytes); - if (size_written != vban_rx_data_bytes) { - LOGE("buffer overflow %d -> %d", vban_rx_data_bytes, size_written); - } - - // report available bytes only when buffer is 50% full - if (!available_active) { - bytes_received += vban_rx_data_bytes; - if (bytes_received >= cfg.rx_buffer_count * DEFAULT_BUFFER_SIZE * 0.75){ - available_active = true; - LOGI("Activating vban"); - } - } - } - } -//------------------------------------------------------------------------------------- - //implement ping reply based on VBAN standard - void sendVbanPing0Reply(AsyncUDPPacket& sourcePacket) { - - // Prepare VBAN 28-byte service header - uint8_t header[28]; - memset(header, 0, sizeof(header)); - memcpy(header, "VBAN", 4); - header[4] = VBAN_PROTOCOL_SERVICE; - header[5] = VBAN_SERVICE_FNCT_PING0 | VBAN_SERVICE_FNCT_REPLY; // Service function + reply bit - header[6] = 0x00; // must be zero - // Copy incoming stream name from discovery packet - const uint8_t* data = sourcePacket.data(); - memcpy(&header[8], &data[8], 16); - // Copy frame number (little endian) - - uint32_t frameNumber = (uint32_t)((data[24] & 0xFF) | ((data[25] & 0xFF) << 8) | ((data[26] & 0xFF) << 16) | ((data[27] & 0xFF) << 24)); - memcpy(&header[24], &frameNumber, 4); - - // Construct the PING0 payload using the struct - VBAN_PING0 ping0; - memset(&ping0, 0, sizeof(ping0)); - - // Fill fields with your config data and fixed values - ping0.bitType = cfg.device_flags; - ping0.bitfeature = cfg.bitfeature; - ping0.bitfeatureEx = 0x00000000; - ping0.PreferedRate = 44100; - ping0.MinRate = 8000; - ping0.MaxRate = 96000; - ping0.color_rgb = cfg.device_color; - - // Version string, 8 bytes total (zero padded) - memcpy(ping0.nVersion, "v1.0", 4); - - // GPS_Position left empty (all zero), so no need to set - // USER_Position 8 bytes - memcpy(ping0.USER_Position, "USRPOS", 6); - // LangCode_ascii 8 bytes ("EN" + padding) - memset(ping0.LangCode_ascii, 0, sizeof(ping0.LangCode_ascii)); - memcpy(ping0.LangCode_ascii, "EN", 2); - // reserved_ascii and reservedEx are zeroed by memset - // IP as string, max 32 bytes - - char ipStr[16]; // Enough for "255.255.255.255\0" - sprintf(ipStr, "%d.%d.%d.%d", WiFi.localIP()[0], WiFi.localIP()[1], WiFi.localIP()[2], WiFi.localIP()[3]); - safe_strncpy(ping0.DistantIP_ascii, ipStr, sizeof(ping0.DistantIP_ascii)); - // Ports (network byte order) - - ping0.DistantPort = cfg.udp_port; //returs port I am listening for VBAN - more useful then UDP ephemeral port - ping0.DistantReserved = 0; - - // Device name (64 bytes) - if (cfg.device_name && cfg.device_name[0] != '\0') { - safe_strncpy(ping0.DeviceName_ascii, cfg.device_name, sizeof(ping0.DeviceName_ascii)); - } else { - uint8_t mac[6]; - WiFi.macAddress(mac); - char macStr[64]; - snprintf(macStr, sizeof(macStr), "%02X:%02X:%02X:%02X:%02X:%02X", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); - safe_strncpy(ping0.DeviceName_ascii, macStr, sizeof(ping0.DeviceName_ascii)); - } - - // Manufacturer name (64 bytes) - safe_strncpy(ping0.ManufacturerName_ascii, cfg.manufacturer_name, sizeof(ping0.ManufacturerName_ascii)); - // Application name (64 bytes) - safe_strncpy(ping0.ApplicationName_ascii, cfg.application_name, sizeof(ping0.ApplicationName_ascii)); - // Host name (64 bytes) - const char* hostName = cfg.host_name; - if (!hostName || hostName[0] == '\0') { - hostName = WiFi.getHostname(); - if (!hostName) hostName = "ESP32"; - } - safe_strncpy(ping0.HostName_ascii, hostName, sizeof(ping0.HostName_ascii)); - - // UserName_utf8 - safe_strncpy(ping0.UserName_utf8, cfg.user_name, sizeof(ping0.UserName_utf8)); - //UserComment_utf8 - safe_strncpy(ping0.UserComment_utf8, cfg.user_comment, sizeof(ping0.UserComment_utf8)); - - // Prepare final packet: header + payload - uint8_t packet[28 + sizeof(VBAN_PING0)]; - memcpy(packet, header, 28); - memcpy(packet + 28, &ping0, sizeof(VBAN_PING0)); - - // Send UDP packet - udp.writeTo(packet, sizeof(packet), sourcePacket.remoteIP(), sourcePacket.remotePort()); -} - - // Safely copy a C-string with guaranteed null termination - void safe_strncpy(char* dest, const char* src, size_t dest_size) { - if (dest_size == 0) return; - strncpy(dest, src, dest_size - 1); - dest[dest_size - 1] = '\0'; - } - //----------------------------------------------------------------------------------- -}; - -} // namespace audio_tools \ No newline at end of file +#pragma once +#WARNING("Obsolete - use /AudioTools/Communication/VBANStream.h ") +#include "AudioTools/Communication" \ No newline at end of file diff --git a/src/AudioTools/Communication/HLSStream.h b/src/AudioTools/Communication/HLSStream.h new file mode 100644 index 0000000000..50aff75668 --- /dev/null +++ b/src/AudioTools/Communication/HLSStream.h @@ -0,0 +1,781 @@ +#pragma once +#include "AudioTools/AudioCodecs/AudioEncoded.h" +#include "AudioTools/CoreAudio/AudioBasic/Str.h" +#include "AudioTools/CoreAudio/AudioHttp/URLStream.h" +#include "AudioTools/CoreAudio/StreamCopy.h" +#include "AudioToolsConfig.h" + +#define MAX_HLS_LINE 512 +#define START_URLS_LIMIT 4 +#define HLS_BUFFER_COUNT 2 +#define HLS_MAX_NO_READ 2 +#define HLS_MAX_URL_LEN 256 +#define HLS_TIMEOUT 5000 +#define HLS_UNDER_OVERFLOW_WAIT_TIME 10 + +/// hide hls implementation in it's own namespace + +namespace audio_tools_hls { + +/*** + * @brief We feed the URLLoaderHLS with some url strings. The data of the + * related segments are provided via the readBytes() method. + * @author Phil Schatzmann + * @copyright GPLv3 + */ + +template +class URLLoaderHLS { + public: + URLLoaderHLS() = default; + + ~URLLoaderHLS() { end(); } + + bool begin() { + TRACED(); + buffer.resize(buffer_size * buffer_count); + + active = true; + return true; + } + + void end() { + TRACED(); + url_stream.end(); + buffer.clear(); + active = false; + } + + /// Adds the next url to be played in sequence + void addUrl(const char *url) { + LOGI("Adding %s", url); + StrView url_str(url); + char *str = new char[url_str.length() + 1]; + memcpy(str, url_str.c_str(), url_str.length() + 1); + urls.push_back((const char *)str); + } + + /// Provides the number of open urls which can be played. Refills them, when + /// min limit is reached. + int urlCount() { return urls.size(); } + + /// Available bytes of the audio stream + int available() { + if (!active) return 0; + TRACED(); + bufferRefill(); + + return buffer.available(); + } + + /// Provides data from the audio stream + size_t readBytes(uint8_t *data, size_t len) { + if (!active) return 0; + TRACED(); + bufferRefill(); + + if (buffer.available() < len) LOGW("Buffer underflow"); + return buffer.readArray(data, len); + } + + const char *contentType() { + return url_stream.httpRequest().reply().get(CONTENT_TYPE); + } + + int contentLength() { return url_stream.contentLength(); } + + void setBufferSize(int size, int count) { + buffer_size = size; + buffer_count = count; + // support call after begin()! + if (buffer.size() != 0) { + buffer.resize(buffer_size * buffer_count); + } + } + + void setCACert(const char *cert) { url_stream.setCACert(cert); } + + protected: + Vector urls{10}; + RingBuffer buffer{0}; + bool active = false; + int buffer_size = DEFAULT_BUFFER_SIZE; + int buffer_count = HLS_BUFFER_COUNT; + URLStream url_stream; + const char *url_to_play = nullptr; + + /// try to keep the buffer filled + void bufferRefill() { + TRACED(); + // we have nothing to do + if (urls.empty()) { + LOGD("urls empty"); + delay(HLS_UNDER_OVERFLOW_WAIT_TIME); + return; + } + if (buffer.availableForWrite() == 0) { + LOGD("buffer full"); + delay(HLS_UNDER_OVERFLOW_WAIT_TIME); + return; + } + + // switch current stream if we have no more data + if (!url_stream && !urls.empty()) { + LOGD("Refilling"); + if (url_to_play != nullptr) { + delete url_to_play; + } + url_to_play = urls[0]; + LOGI("playing %s", url_to_play); + url_stream.end(); + url_stream.setConnectionClose(true); + url_stream.setTimeout(HLS_TIMEOUT); + url_stream.begin(url_to_play); + url_stream.waitForData(HLS_TIMEOUT); + urls.pop_front(); + // assert(urls[0]!=url); + + LOGI("Playing %s of %d", url_stream.urlStr(), (int)urls.size()); + } + + int total = 0; + int failed = 0; + int to_write = min(buffer.availableForWrite(), DEFAULT_BUFFER_SIZE); + // try to keep the buffer filled + while (to_write > 0) { + uint8_t tmp[to_write]; + memset(tmp, 0, to_write); + int read = url_stream.readBytes(tmp, to_write); + total += read; + if (read > 0) { + failed = 0; + buffer.writeArray(tmp, read); + LOGD("buffer add %d -> %d:", read, buffer.available()); + + to_write = min(buffer.availableForWrite(), DEFAULT_BUFFER_SIZE); + } else { + delay(10); + } + // After we processed all data we close the stream to get a new url + if (url_stream.totalRead() == url_stream.contentLength()) { + LOGI("Closing stream because all bytes were processed: available: %d", + url_stream.available()); + url_stream.end(); + break; + } + LOGD("Refilled with %d now %d available to write", total, + buffer.availableForWrite()); + } + } +}; + +/** + * Prevent that the same url is loaded twice. We limit the history to + * 20 entries. + */ +class URLHistory { + public: + bool add(const char *url) { + if (url == nullptr) return true; + bool found = false; + StrView url_str(url); + for (int j = 0; j < history.size(); j++) { + if (url_str.equals(history[j])) { + found = true; + break; + } + } + if (!found) { + char *str = new char[url_str.length() + 1]; + memcpy(str, url, url_str.length() + 1); + history.push_back((const char *)str); + if (history.size() > 20) { + delete (history[0]); + history.pop_front(); + } + } + return !found; + } + + void clear() { history.clear(); } + + int size() { return history.size(); } + + protected: + Vector history; +}; + +/** + * @brief Simple Parser for HLS data. + * @author Phil Schatzmann + * @copyright GPLv3 + */ +template +class HLSParser { + public: + // loads the index url + bool begin(const char *urlStr) { + index_url_str = urlStr; + return begin(); + } + + bool begin() { + TRACEI(); + segments_url_str = ""; + bandwidth = 0; + total_read = 0; + + if (!parseIndex()) { + TRACEE(); + return false; + } + + // in some exceptional cases the index provided segement info + if (url_loader.urlCount() == 0) { + if (!parseSegments()) { + TRACEE(); + return false; + } + } else { + segments_url_str = index_url_str; + segmentsActivate(); + } + + if (!url_loader.begin()) { + TRACEE(); + return false; + } + + return true; + } + + int available() { + TRACED(); + int result = 0; + reloadSegments(); + + if (active) result = url_loader.available(); + return result; + } + + size_t readBytes(uint8_t *data, size_t len) { + TRACED(); + size_t result = 0; + reloadSegments(); + + if (active) result = url_loader.readBytes(data, len); + total_read += result; + return result; + } + + const char *indexUrl() { return index_url_str; } + + const char *segmentsUrl() { return segments_url_str.c_str(); } + + /// Provides the codec + const char *getCodec() { return codec.c_str(); } + + /// Provides the content type of the audio data + const char *contentType() { return url_loader.contentType(); } + + /// Provides the http content lengh + int contentLength() { return url_loader.contentLength(); } + + /// Closes the processing + void end() { + TRACEI(); + codec.clear(); + segments_url_str.clear(); + url_stream.end(); + url_loader.end(); + url_history.clear(); + active = false; + } + + /// Defines the number of urls that are preloaded in the URLLoaderHLS + void setUrlCount(int count) { url_count = count; } + + /// Redefines the buffer size + void setBufferSize(int size, int count) { + url_loader.setBufferSize(size, count); + } + + void setCACert(const char *cert) { + url_stream.setCACert(cert); + url_loader.setCACert(cert); + } + + void setPowerSave(bool flag) { url_stream.setPowerSave(flag); } + + void setURLResolver(const char *(*cb)(const char *segment, + const char *reqURL)) { + resolve_url = cb; + } + /// Provides the hls url as string + const char *urlStr() { return url_str.c_str(); } + + /// Povides the number of bytes read + size_t totalRead() { return total_read; }; + + protected: + enum class URLType { Undefined, Index, Segment }; + URLType next_url_type = URLType::Undefined; + int bandwidth = 0; + int url_count = 5; + size_t total_read = 0; + bool url_active = false; + bool is_extm3u = false; + Str codec; + Str segments_url_str; + Str url_str; + const char *index_url_str = nullptr; + URLStream url_stream; + URLLoaderHLS url_loader; + URLHistory url_history; + bool active = false; + bool parse_segments_active = false; + int media_sequence = 0; + int segment_count = 0; + uint64_t next_sement_load_time_planned = 0; + float play_time = 0; + uint64_t next_sement_load_time = 0; + const char *(*resolve_url)(const char *segment, + const char *reqURL) = resolveURL; + + /// Default implementation for url resolver: determine absolue url from + /// relative url + static const char *resolveURL(const char *segment, const char *reqURL) { + // avoid dynamic memory allocation + static char result[HLS_MAX_URL_LEN] = {0}; + StrView result_str(result, HLS_MAX_URL_LEN); + StrView index_url(reqURL); + // Use prefix up to ? or laast / + int end = index_url.lastIndexOf("?"); + if (end >= 0) { + result_str.substring(reqURL, 0, end); + } else { + end = index_url.lastIndexOf("/"); + if (end >= 0) { + result_str.substring(reqURL, 0, end); + } + } + // Use the full url + if (result_str.isEmpty()) { + result_str = reqURL; + } + // add trailing / + if (!result_str.endsWith("/")) { + result_str.add("/"); + } + // add relative segment + result_str.add(segment); + LOGI(">> relative addr: %s for %s", segment, reqURL); + LOGD(">> -> %s", result); + return result; + } + + /// trigger the reloading of segments if the limit is underflowing + void reloadSegments() { + TRACED(); + // get new urls + if (!segments_url_str.isEmpty()) { + parseSegments(); + } + } + + /// parse the index file and the segments + bool parseIndex() { + TRACED(); + url_stream.end(); + url_stream.setTimeout(HLS_TIMEOUT); + url_stream.setConnectionClose(true); + if (!url_stream.begin(index_url_str)) return false; + url_active = true; + return parseIndexLines(); + } + + /// parse the index file + bool parseIndexLines() { + TRACEI(); + char tmp[MAX_HLS_LINE]; + bool result = true; + is_extm3u = false; + + // parse lines + memset(tmp, 0, MAX_HLS_LINE); + while (true) { + memset(tmp, 0, MAX_HLS_LINE); + size_t len = + url_stream.httpRequest().readBytesUntil('\n', tmp, MAX_HLS_LINE); + // stop when there is no more data + if (len == 0 && url_stream.available() == 0) break; + StrView str(tmp); + + // check header + if (str.startsWith("#EXTM3U")) { + is_extm3u = true; + // reset timings + resetTimings(); + } + + if (is_extm3u) { + if (!parseIndexLine(str)) { + return false; + } + } + } + return result; + } + + /// Determine codec for min bandwidth + bool parseIndexLine(StrView &str) { + TRACED(); + LOGI("> %s", str.c_str()); + parseIndexLineMetaData(str); + // in some exceptional cases the index provided segement info + parseSegmentLineMetaData(str); + parseLineURL(str); + return true; + } + + bool parseIndexLineMetaData(StrView &str) { + int tmp_bandwidth; + if (str.startsWith("#")) { + if (str.indexOf("EXT-X-STREAM-INF") >= 0) { + next_url_type = URLType::Index; + // determine min bandwidth + int pos = str.indexOf("BANDWIDTH="); + if (pos > 0) { + StrView num(str.c_str() + pos + 10); + tmp_bandwidth = num.toInt(); + url_active = (tmp_bandwidth < bandwidth || bandwidth == 0); + if (url_active) { + bandwidth = tmp_bandwidth; + LOGD("-> bandwith: %d", bandwidth); + } + } + + pos = str.indexOf("CODECS="); + if (pos > 0) { + int start = pos + 8; + int end = str.indexOf('"', pos + 10); + codec.substring(str, start, end); + LOGI("-> codec: %s", codec.c_str()); + } + } + } + return true; + } + + void resetTimings() { + next_sement_load_time_planned = millis(); + play_time = 0; + next_sement_load_time = 0xFFFFFFFFFFFFFFFF; + } + + /// parse the segment url provided by the index + bool parseSegments() { + TRACED(); + if (parse_segments_active) { + return false; + } + + // make sure that we load at relevant schedule + if (millis() < next_sement_load_time && url_loader.urlCount() > 1) { + delay(1); + return false; + } + parse_segments_active = true; + + LOGI("Available urls: %d", url_loader.urlCount()); + + if (url_stream) url_stream.clear(); + LOGI("parsing %s", segments_url_str.c_str()); + + if (segments_url_str.isEmpty()) { + TRACEE(); + parse_segments_active = false; + return false; + } + + if (!url_stream.begin(segments_url_str.c_str())) { + TRACEE(); + parse_segments_active = false; + return false; + } + + segment_count = 0; + if (!parseSegmentLines()) { + TRACEE(); + parse_segments_active = false; + // do not display as error + return true; + } + + segmentsActivate(); + return true; + } + + void segmentsActivate() { + LOGI("Reloading in %f sec", play_time / 1000.0); + if (play_time > 0) { + next_sement_load_time = next_sement_load_time_planned + play_time; + } + + // we request a minimum of collected urls to play before we start + if (url_history.size() > START_URLS_LIMIT) active = true; + parse_segments_active = false; + } + + /// parse the segments + bool parseSegmentLines() { + TRACEI(); + char tmp[MAX_HLS_LINE]; + bool result = true; + is_extm3u = false; + + // parse lines + memset(tmp, 0, MAX_HLS_LINE); + while (true) { + memset(tmp, 0, MAX_HLS_LINE); + size_t len = + url_stream.httpRequest().readBytesUntil('\n', tmp, MAX_HLS_LINE); + if (len == 0 && url_stream.available() == 0) break; + StrView str(tmp); + + // check header + if (str.startsWith("#EXTM3U")) { + is_extm3u = true; + resetTimings(); + } + + if (is_extm3u) { + if (!parseSegmentLine(str)) { + return false; + } + } + } + return result; + } + + /// Add all segments to queue + bool parseSegmentLine(StrView &str) { + TRACED(); + LOGI("> %s", str.c_str()); + if (!parseSegmentLineMetaData(str)) return false; + parseLineURL(str); + return true; + } + + bool parseSegmentLineMetaData(StrView &str) { + if (str.startsWith("#")) { + if (str.startsWith("#EXT-X-MEDIA-SEQUENCE:")) { + int new_media_sequence = atoi(str.c_str() + 22); + LOGI("media_sequence: %d", new_media_sequence); + if (new_media_sequence == media_sequence) { + LOGW("MEDIA-SEQUENCE already loaded: %d", media_sequence); + return false; + } + media_sequence = new_media_sequence; + } + + // add play time to next_sement_load_time_planned + if (str.startsWith("#EXTINF")) { + next_url_type = URLType::Segment; + StrView sec_str(str.c_str() + 8); + float sec = sec_str.toFloat(); + LOGI("adding play time: %f sec", sec); + play_time += (sec * 1000.0); + } + } + return true; + } + + bool parseLineURL(StrView &str) { + if (!str.startsWith("#")) { + switch (next_url_type) { + case URLType::Undefined: + // we should not get here + assert(false); + break; + case URLType::Index: + if (str.startsWith("http")) { + segments_url_str.set(str); + } else { + segments_url_str.set(resolve_url(str.c_str(), index_url_str)); + } + LOGD("segments_url_str = %s", segments_url_str.c_str()); + break; + case URLType::Segment: + segment_count++; + if (url_history.add(str.c_str())) { + // provide audio urls to the url_loader + if (str.startsWith("http")) { + url_str = str; + } else { + // we create the complete url + url_str = resolve_url(str.c_str(), index_url_str); + } + url_loader.addUrl(url_str.c_str()); + } else { + LOGD("Duplicate ignored: %s", str.c_str()); + } + } + // clear url type + next_url_type = URLType::Undefined; + } + return true; + } +}; + +} // namespace audio_tools_hls + +namespace audio_tools { +/** + * @brief HTTP Live Streaming using HLS: The resulting .ts data is provided + * via readBytes() that dynamically reload new Segments. Please note that + * this reloading adds a considerable delay: So if you want to play back the + * audio, you should buffer the content in a seaparate task. + * + * @author Phil Schatzmann + * @ingroup http *@copyright GPLv3 + */ + +template +class HLSStreamT : public AbstractURLStream { + public: + /// Empty constructor + HLSStreamT() = default; + + /// Convenience constructor which logs in to the WiFi + HLSStreamT(const char *ssid, const char *password) { + setSSID(ssid); + setPassword(password); + } + + /// Open an HLS url + bool begin(const char *urlStr) { + TRACEI(); + login(); + // parse the url to the HLS + bool rc = parser.begin(urlStr); + return rc; + } + + /// Reopens the last url + bool begin() override { + TRACEI(); + login(); + bool rc = parser.begin(); + return rc; + } + + /// ends the request + void end() override { parser.end(); } + + /// Sets the ssid that will be used for logging in (when calling begin) + void setSSID(const char *ssid) override { this->ssid = ssid; } + + /// Sets the password that will be used for logging in (when calling begin) + void setPassword(const char *password) override { this->password = password; } + + /// Returns the string representation of the codec of the audio stream + const char *codec() { return parser.getCodec(); } + + /// Provides the content type from the http reply + const char *contentType() { return parser.contentType(); } + + /// Provides the content length of the actual .ts Segment + int contentLength() override { return parser.contentLength(); } + + /// Provides number of available bytes in the read buffer + int available() override { + TRACED(); + return parser.available(); + } + + /// Provides the data fro the next .ts Segment + size_t readBytes(uint8_t *data, size_t len) override { + TRACED(); + return parser.readBytes(data, len); + } + + /// Redefines the read buffer size + void setBufferSize(int size, int count) { parser.setBufferSize(size, count); } + + /// Defines the certificate + void setCACert(const char *cert) override { parser.setCACert(cert); } + + /// Changes the Wifi to power saving mode + void setPowerSave(bool flag) override { parser.setPowerSave(flag); } + + /// Custom logic to provide the codec as Content-Type to support the + /// MultiCodec + const char *getReplyHeader(const char *header) override { + const char *codec = parser.getCodec(); + const char *result = nullptr; + if (StrView(header).equalsIgnoreCase(CONTENT_TYPE)) { + result = parser.contentType(); + } + if (result) LOGI("-> Format: %s", result); + return result; + } + + /// The resolving of relative addresses can be quite tricky: you can provide + /// your custom resolver implementation + void setURLResolver(const char *(*cb)(const char *segment, + const char *reqURL)) { + parser.setURLResolver(cb); + } + + const char *urlStr() override { return parser.urlStr(); } + + size_t totalRead() override { return parser.totalRead(); }; + /// not implemented + void setConnectionClose(bool flag) override {}; + /// not implemented + bool waitForData(int timeout) override { return false; } + + protected: + audio_tools_hls::HLSParser parser; + const char *ssid = nullptr; + const char *password = nullptr; + + void login() { +#ifdef USE_WIFI + if (ssid != nullptr && password != nullptr && + WiFi.status() != WL_CONNECTED) { + TRACED(); + delay(1000); + WiFi.begin(ssid, password); + while (WiFi.status() != WL_CONNECTED) { + Serial.print("."); + delay(500); + } + } +#else + LOGW("login not supported"); +#endif + } + + /// Added to comply with AbstractURLStream + bool begin(const char *urlStr, const char *acceptMime, MethodID action = GET, + const char *reqMime = "", const char *reqData = "") override { + return begin(urlStr); + } + + HttpRequest &httpRequest() override { + static HttpRequest dummy; + return dummy; + } + + /// Not implemented: potential future improvement + void setClient(Client &clientPar) override {} + + /// Not implemented + void addRequestHeader(const char *header, const char *value) override {} +}; + +using HLSStream = HLSStreamT; + +} // namespace audio_tools diff --git a/src/AudioTools/AudioLibs/HLSStreamESP32.h b/src/AudioTools/Communication/HLSStreamESP32.h similarity index 100% rename from src/AudioTools/AudioLibs/HLSStreamESP32.h rename to src/AudioTools/Communication/HLSStreamESP32.h diff --git a/src/AudioTools/Communication/README.md b/src/AudioTools/Communication/README.md index 97e4b99ed1..d677dd4146 100644 --- a/src/AudioTools/Communication/README.md +++ b/src/AudioTools/Communication/README.md @@ -1,2 +1,2 @@ -Different classes to send and receive audio over the wire \ No newline at end of file +Different optional classes to send and receive audio over the wire or air \ No newline at end of file diff --git a/src/AudioTools/Communication/RTSP.h b/src/AudioTools/Communication/RTSP.h index a0eb7c2cf6..3c6833dcfe 100644 --- a/src/AudioTools/Communication/RTSP.h +++ b/src/AudioTools/Communication/RTSP.h @@ -1,5 +1,13 @@ #pragma once +/** + * @defgroup rtsp RTSP Streaming + * @ingroup communications + * @file RTSP.h + * @author Phil Schatzmann + * @copyright GPLv3 + */ + #include "AudioTools/CoreAudio/AudioPlayer.h" #include "AudioTools/CoreAudio/AudioStreams.h" #include "RTSP/IAudioSource.h" @@ -8,6 +16,8 @@ #include "RTSP/RTSPFormat.h" #include "RTSP/RTSPOutput.h" #include "RTSP/RTSPAudioStreamer.h" +#include "RTSP/RTSPClient.h" #ifdef ESP32 #include "RTSP/RTSPPlatformWiFi.h" +#include "RTSP/RTSPClientWiFi.h" #endif \ No newline at end of file diff --git a/src/AudioTools/Communication/RTSP/IAudioSource.h b/src/AudioTools/Communication/RTSP/IAudioSource.h index 5c2d6080f3..c48896d9b7 100644 --- a/src/AudioTools/Communication/RTSP/IAudioSource.h +++ b/src/AudioTools/Communication/RTSP/IAudioSource.h @@ -18,7 +18,6 @@ namespace audio_tools { /** * @brief Audio Source Interface - Contract for Audio Data Providers * - * @version 0.1.1 */ class IAudioSource { public: diff --git a/src/AudioTools/Communication/RTSPClient555.h b/src/AudioTools/Communication/RTSPClient555.h new file mode 100644 index 0000000000..27517807a1 --- /dev/null +++ b/src/AudioTools/Communication/RTSPClient555.h @@ -0,0 +1,721 @@ + +#pragma once + +/** +This library is free software; you can redistribute it and/or modify it under +the terms of the GNU Lesser General Public License as published by the +Free Software Foundation; either version 3 of the License, or (at your +option) any later version. (See .) + +This library is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for +more details. + +You should have received a copy of the GNU Lesser General Public License +along with this library; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +**/ + +// Copyright (c) 1996-2023, Live Networks, Inc. All rights reserved +// A demo application, showing how to create and run a RTSP client (that can +// potentially receive multiple streams concurrently). +// + +#include "AudioLogger.h" +#include "Print.h" // Arduino Print +// include live555 +#include "BasicUsageEnvironment.hh" +//#include "liveMedia.hh" +#include "RTSPClient.hh" + +// By default, we request that the server stream its data using RTP/UDP. +// If, instead, you want to request that the server stream via RTP-over-TCP, +// change the following to True: +#define REQUEST_STREAMING_OVER_TCP false + +// by default, print verbose output from each "RTSPClient" +#define RTSP_CLIENT_VERBOSITY_LEVEL 1 +// Even though we're not going to be doing anything with the incoming data, we +// still need to receive it. Define the size of the buffer that we'll use: +#define RTSP_SINK_BUFFER_SIZE 1024 + +// If you don't want to see debugging output for each received frame, then +// comment out the following line: +#undef DEBUG_PRINT_EACH_RECEIVED_FRAME +#define DEBUG_PRINT_EACH_RECEIVED_FRAME 0 + +/// @brief AudioTools internal: rtsp +namespace audiotools_rtsp { + +class OurRTSPClient; +// The main streaming routine (or each "rtsp://" URL): +OurRTSPClient * openURL(UsageEnvironment& env, char const* progName, char const* rtspURL); +// Counts how many streams (i.e., "RTSPClient"s) are currently in use. +static unsigned rtspClientCount = 0; +static char rtspEventLoopWatchVariable = 0; +static Print* rtspOutput = nullptr; +static uint32_t rtspSinkReceiveBufferSize = 0; +static bool rtspUseTCP = REQUEST_STREAMING_OVER_TCP; + +} // namespace audiotools_rtsp + +namespace audio_tools { + +/** + * @brief A simple RTSPClient using https://github.com/pschatzmann/arduino-live555 + * @ingroup communications + * @author Phil Schatzmann + * @copyright GPLv3 +*/ +class AudioClientRTSP { + public: + AudioClientRTSP(uint32_t receiveBufferSize = RTSP_SINK_BUFFER_SIZE, bool useTCP=REQUEST_STREAMING_OVER_TCP, bool blocking = false) { + setBufferSize(receiveBufferSize); + useTCP ? setTCP() : setUDP(); + setBlocking(blocking); + } + + void setBufferSize(int size){ + audiotools_rtsp::rtspSinkReceiveBufferSize = size; + } + + void setTCP(){ + audiotools_rtsp::rtspUseTCP = true; + } + + void setUDP(){ + audiotools_rtsp::rtspUseTCP = false; + } + + void setBlocking(bool flag){ + is_blocking = flag; + } + + /// login to wifi: optional convinience method. You can also just start Wifi the normal way + void setLogin(const char* ssid, const char* password){ + this->ssid = ssid; + this->password = password; + } + + /// Starts the processing + bool begin(const char* url, Print &out) { + audiotools_rtsp::rtspOutput = &out; + if (url==nullptr) { + return false; + } + if (!login()){ + LOGE("wifi down"); + return false; + } + // Begin by setting up our usage environment: + scheduler = BasicTaskScheduler::createNew(); + env = BasicUsageEnvironment::createNew(*scheduler); + + // There are argc-1 URLs: argv[1] through argv[argc-1]. Open and start + // streaming each one: + rtsp_client = audiotools_rtsp::openURL(*env, "RTSPClient", url); + + // All subsequent activity takes place within the event loop: + if (is_blocking) env->taskScheduler().doEventLoop(&audiotools_rtsp::rtspEventLoopWatchVariable); + // This function call does not return, unless, at some point in time, + // "rtspEventLoopWatchVariable" gets set to something non-zero. + + return true; + } + + /// to be called in Arduino loop when blocking = false + void loop() { + if (audiotools_rtsp::rtspEventLoopWatchVariable==0) scheduler->SingleStep(); + } + + void end() { + audiotools_rtsp::rtspEventLoopWatchVariable = 1; + env->reclaim(); + env = NULL; + delete scheduler; + scheduler = NULL; + bool is_blocking = false; + } + + audiotools_rtsp::OurRTSPClient *client() { + return rtsp_client; + } + + protected: + audiotools_rtsp::OurRTSPClient* rtsp_client; + UsageEnvironment* env=nullptr; + BasicTaskScheduler* scheduler=nullptr; + const char* ssid=nullptr; + const char* password = nullptr; + bool is_blocking = false; + + /// login to wifi: optional convinience method. You can also just start Wifi the normal way + bool login(){ + if(WiFi.status() != WL_CONNECTED && ssid!=nullptr && password!=nullptr){ + WiFi.mode(WIFI_STA); + WiFi.begin(ssid, password); + while(WiFi.status() != WL_CONNECTED){ + Serial.print("."); + delay(100); + } + Serial.println(); + Serial.print("Local Address: "); + Serial.println(WiFi.localIP()); + } + return WiFi.status() == WL_CONNECTED; + } + + +}; + +} // namespace audio_tools + +namespace audiotools_rtsp { +// Define a class to hold per-stream state that we maintain throughout each +// stream's lifetime: + +// Forward function definitions: + +// RTSP 'response handlers': +void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, + char* resultString); +void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, + char* resultString); +void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, + char* resultString); + +// Other event handler functions: +void subsessionAfterPlaying( + void* clientData); // called when a stream's subsession (e.g., audio or + // video substream) ends +void subsessionByeHandler(void* clientData, char const* reason); +// called when a RTCP "BYE" is received for a subsession +void streamTimerHandler(void* clientData); +// called at the end of a stream's expected duration (if the stream has not +// already signaled its end using a RTCP "BYE") + +// Used to iterate through each stream's 'subsessions', setting up each one: +void setupNextSubsession(RTSPClient* rtspClient); + +// Used to shut down and close a stream (including its "RTSPClient" object): +void shutdownStream(RTSPClient* rtspClient, int exitCode = 1); + +// A function that outputs a string that identifies each stream (for debugging +// output). Modify this if you wish: +UsageEnvironment& operator<<(UsageEnvironment& env, + const RTSPClient& rtspClient) { + return env << "[URL:\"" << rtspClient.url() << "\"]: "; +} + +// A function that outputs a string that identifies each subsession (for +// debugging output). Modify this if you wish: +UsageEnvironment& operator<<(UsageEnvironment& env, + const MediaSubsession& subsession) { + return env << subsession.mediumName() << "/" << subsession.codecName(); +} + +class StreamClientState { + public: + StreamClientState(); + virtual ~StreamClientState(); + + public: + MediaSubsessionIterator* iter; + MediaSession* session; + MediaSubsession* subsession; + TaskToken streamTimerTask; + double duration; +}; + +// If you're streaming just a single stream (i.e., just from a single URL, +// once), then you can define and use just a single "StreamClientState" +// structure, as a global variable in your application. However, because - in +// this demo application - we're showing how to play multiple streams, +// concurrently, we can't do that. Instead, we have to have a separate +// "StreamClientState" structure for each "RTSPClient". To do this, we subclass +// "RTSPClient", and add a "StreamClientState" field to the subclass: + +class OurRTSPClient : public RTSPClient { + public: + static OurRTSPClient* createNew(UsageEnvironment& env, char const* rtspURL, + int verbosityLevel = 0, + char const* applicationName = NULL, + portNumBits tunnelOverHTTPPortNum = 0); + + protected: + OurRTSPClient(UsageEnvironment& env, char const* rtspURL, int verbosityLevel, + char const* applicationName, portNumBits tunnelOverHTTPPortNum); + // called only by createNew(); + virtual ~OurRTSPClient(); + + public: + StreamClientState scs; +}; + +// Define a data sink (a subclass of "MediaSink") to receive the data for each +// subsession (i.e., each audio or video 'substream'). In practice, this might +// be a class (or a chain of classes) that decodes and then renders the incoming +// audio or video. Or it might be a "FileSink", for outputting the received data +// into a file (as is done by the "openRTSP" application). In this example code, +// however, we define a simple 'dummy' sink that receives incoming data, but +// does nothing with it. + +class OurSink : public MediaSink { + public: + static OurSink* createNew( + UsageEnvironment& env, + MediaSubsession& + subsession, // identifies the kind of data that's being received + char const* streamId = NULL); // identifies the stream itself (optional) + + private: + OurSink(UsageEnvironment& env, MediaSubsession& subsession, + char const* streamId); + // called only by "createNew()" + virtual ~OurSink(); + + static void afterGettingFrame(void* clientData, unsigned frameSize, + unsigned numTruncatedBytes, + struct timeval presentationTime, + unsigned durationInMicroseconds); + void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, + struct timeval presentationTime, + unsigned durationInMicroseconds); + + private: + // redefined virtual functions: + virtual Boolean continuePlaying(); + + private: + u_int8_t* fReceiveBuffer; + MediaSubsession& fSubsession; + char* fStreamId; +}; + +OurRTSPClient* openURL(UsageEnvironment& env, char const* progName, char const* rtspURL) { + // Begin by creating a "RTSPClient" object. Note that there is a separate + // "RTSPClient" object for each stream that we wish to receive (even if more + // than stream uses the same "rtsp://" URL). + OurRTSPClient* rtspClient = OurRTSPClient::createNew( + env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL, progName); + if (rtspClient == NULL) { + env << "Failed to create a RTSP client for URL \"" << rtspURL + << "\": " << env.getResultMsg() << "\n"; + return nullptr; + } + + ++rtspClientCount; + + // Next, send a RTSP "DESCRIBE" command, to get a SDP description for the + // stream. Note that this command - like all RTSP commands - is sent + // asynchronously; we do not block, waiting for a response. Instead, the + // following function call returns immediately, and we handle the RTSP + // response later, from within the event loop: + rtspClient->sendDescribeCommand(continueAfterDESCRIBE); + return rtspClient; +} + +// Implementation of the RTSP 'response handlers': + +void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, + char* resultString) { + do { + UsageEnvironment& env = rtspClient->envir(); // alias + StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias + + if (resultCode != 0) { + env << *rtspClient << "Failed to get a SDP description: " << resultString + << "\n"; + delete[] resultString; + break; + } + + char* const sdpDescription = resultString; + env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n"; + + // Create a media session object from this SDP description: + scs.session = MediaSession::createNew(env, sdpDescription); + delete[] sdpDescription; // because we don't need it anymore + if (scs.session == NULL) { + env << *rtspClient + << "Failed to create a MediaSession object from the SDP description: " + << env.getResultMsg() << "\n"; + break; + } else if (!scs.session->hasSubsessions()) { + env << *rtspClient + << "This session has no media subsessions (i.e., no \"m=\" lines)\n"; + break; + } + + // Then, create and set up our data source objects for the session. We do + // this by iterating over the session's 'subsessions', calling + // "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, + // on each one. (Each 'subsession' will have its own data source.) + scs.iter = new MediaSubsessionIterator(*scs.session); + setupNextSubsession(rtspClient); + return; + } while (0); + + // An unrecoverable error occurred with this stream. + shutdownStream(rtspClient); +} + +void setupNextSubsession(RTSPClient* rtspClient) { + UsageEnvironment& env = rtspClient->envir(); // alias + StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias + + scs.subsession = scs.iter->next(); + if (scs.subsession != NULL) { + if (!scs.subsession->initiate()) { + env << *rtspClient << "Failed to initiate the \"" << *scs.subsession + << "\" subsession: " << env.getResultMsg() << "\n"; + setupNextSubsession( + rtspClient); // give up on this subsession; go to the next one + } else { + env << *rtspClient << "Initiated the \"" << *scs.subsession + << "\" subsession ("; + if (scs.subsession->rtcpIsMuxed()) { + env << "client port " << scs.subsession->clientPortNum(); + } else { + env << "client ports " << scs.subsession->clientPortNum() << "-" + << scs.subsession->clientPortNum() + 1; + } + env << ")\n"; + + // Continue setting up this subsession, by sending a RTSP "SETUP" command: + rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, + rtspUseTCP); + } + return; + } + + // We've finished setting up all of the subsessions. Now, send a RTSP "PLAY" + // command to start the streaming: + if (scs.session->absStartTime() != NULL) { + // Special case: The stream is indexed by 'absolute' time, so send an + // appropriate "PLAY" command: + rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, + scs.session->absStartTime(), + scs.session->absEndTime()); + } else { + scs.duration = scs.session->playEndTime() - scs.session->playStartTime(); + rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY); + } +} + +void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, + char* resultString) { + do { + UsageEnvironment& env = rtspClient->envir(); // alias + StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias + + if (resultCode != 0) { + env << *rtspClient << "Failed to set up the \"" << *scs.subsession + << "\" subsession: " << resultString << "\n"; + break; + } + + env << *rtspClient << "Set up the \"" << *scs.subsession + << "\" subsession ("; + if (scs.subsession->rtcpIsMuxed()) { + env << "client port " << scs.subsession->clientPortNum(); + } else { + env << "client ports " << scs.subsession->clientPortNum() << "-" + << scs.subsession->clientPortNum() + 1; + } + env << ")\n"; + + // Having successfully setup the subsession, create a data sink for it, and + // call "startPlaying()" on it. (This will prepare the data sink to receive + // data; the actual flow of data from the client won't start happening until + // later, after we've sent a RTSP "PLAY" command.) + + scs.subsession->sink = + OurSink::createNew(env, *scs.subsession, rtspClient->url()); + // perhaps use your own custom "MediaSink" subclass instead + if (scs.subsession->sink == NULL) { + env << *rtspClient << "Failed to create a data sink for the \"" + << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n"; + break; + } + + env << *rtspClient << "Created a data sink for the \"" << *scs.subsession + << "\" subsession\n"; + scs.subsession->miscPtr = + rtspClient; // a hack to let subsession handler functions get the + // "RTSPClient" from the subsession + scs.subsession->sink->startPlaying(*(scs.subsession->readSource()), + subsessionAfterPlaying, scs.subsession); + // Also set a handler to be called if a RTCP "BYE" arrives for this + // subsession: + if (scs.subsession->rtcpInstance() != NULL) { + scs.subsession->rtcpInstance()->setByeWithReasonHandler( + subsessionByeHandler, scs.subsession); + } + } while (0); + delete[] resultString; + + // Set up the next subsession, if any: + setupNextSubsession(rtspClient); +} + +void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, + char* resultString) { + Boolean success = False; + + do { + UsageEnvironment& env = rtspClient->envir(); // alias + StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias + + if (resultCode != 0) { + env << *rtspClient << "Failed to start playing session: " << resultString + << "\n"; + break; + } + + // Set a timer to be handled at the end of the stream's expected duration + // (if the stream does not already signal its end using a RTCP "BYE"). This + // is optional. If, instead, you want to keep the stream active - e.g., so + // you can later 'seek' back within it and do another RTSP "PLAY" - then you + // can omit this code. (Alternatively, if you don't want to receive the + // entire stream, you could set this timer for some shorter value.) + if (scs.duration > 0) { + unsigned const delaySlop = + 2; // number of seconds extra to delay, after the stream's expected + // duration. (This is optional.) + scs.duration += delaySlop; + unsigned uSecsToDelay = (unsigned)(scs.duration * 1000000); + scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask( + uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient); + } + + env << *rtspClient << "Started playing session"; + if (scs.duration > 0) { + env << " (for up to " << scs.duration << " seconds)"; + } + env << "...\n"; + + success = True; + } while (0); + delete[] resultString; + + if (!success) { + // An unrecoverable error occurred with this stream. + shutdownStream(rtspClient); + } +} + +// Implementation of the other event handlers: + +void subsessionAfterPlaying(void* clientData) { + MediaSubsession* subsession = (MediaSubsession*)clientData; + RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr); + + // Begin by closing this subsession's stream: + Medium::close(subsession->sink); + subsession->sink = NULL; + + // Next, check whether *all* subsessions' streams have now been closed: + MediaSession& session = subsession->parentSession(); + MediaSubsessionIterator iter(session); + while ((subsession = iter.next()) != NULL) { + if (subsession->sink != NULL) return; // this subsession is still active + } + + // All subsessions' streams have now been closed, so shutdown the client: + shutdownStream(rtspClient); +} + +void subsessionByeHandler(void* clientData, char const* reason) { + MediaSubsession* subsession = (MediaSubsession*)clientData; + RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr; + UsageEnvironment& env = rtspClient->envir(); // alias + + env << *rtspClient << "Received RTCP \"BYE\""; + if (reason != NULL) { + env << " (reason:\"" << reason << "\")"; + delete[] (char*)reason; + } + env << " on \"" << *subsession << "\" subsession\n"; + + // Now act as if the subsession had closed: + subsessionAfterPlaying(subsession); +} + +void streamTimerHandler(void* clientData) { + OurRTSPClient* rtspClient = (OurRTSPClient*)clientData; + StreamClientState& scs = rtspClient->scs; // alias + + scs.streamTimerTask = NULL; + + // Shut down the stream: + shutdownStream(rtspClient); +} + +void shutdownStream(RTSPClient* rtspClient, int exitCode) { + UsageEnvironment& env = rtspClient->envir(); // alias + StreamClientState& scs = ((OurRTSPClient*)rtspClient)->scs; // alias + + // First, check whether any subsessions have still to be closed: + if (scs.session != NULL) { + Boolean someSubsessionsWereActive = False; + MediaSubsessionIterator iter(*scs.session); + MediaSubsession* subsession; + + while ((subsession = iter.next()) != NULL) { + if (subsession->sink != NULL) { + Medium::close(subsession->sink); + subsession->sink = NULL; + + if (subsession->rtcpInstance() != NULL) { + subsession->rtcpInstance()->setByeHandler( + NULL, NULL); // in case the server sends a RTCP "BYE" while + // handling "TEARDOWN" + } + + someSubsessionsWereActive = True; + } + } + + if (someSubsessionsWereActive) { + // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the + // stream. Don't bother handling the response to the "TEARDOWN". + rtspClient->sendTeardownCommand(*scs.session, NULL); + } + } + + env << *rtspClient << "Closing the stream.\n"; + Medium::close(rtspClient); + // Note that this will also cause this stream's "StreamClientState" structure + // to get reclaimed. + + if (--rtspClientCount == 0) { + // The final stream has ended, so exit the application now. + // (Of course, if you're embedding this code into your own application, you + // might want to comment this out, and replace it with + // "rtspEventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, + // and continue running "main()".) + // exit(exitCode); + rtspEventLoopWatchVariable = 1; + return; + } +} + +// Implementation of "OurRTSPClient": + +OurRTSPClient* OurRTSPClient::createNew(UsageEnvironment& env, + char const* rtspURL, int verbosityLevel, + char const* applicationName, + portNumBits tunnelOverHTTPPortNum) { + return new OurRTSPClient(env, rtspURL, verbosityLevel, applicationName, + tunnelOverHTTPPortNum); +} + +OurRTSPClient::OurRTSPClient(UsageEnvironment& env, char const* rtspURL, + int verbosityLevel, char const* applicationName, + portNumBits tunnelOverHTTPPortNum) + : RTSPClient(env, rtspURL, verbosityLevel, applicationName, + tunnelOverHTTPPortNum, -1) {} + +OurRTSPClient::~OurRTSPClient() {} + +// Implementation of "StreamClientState": + +StreamClientState::StreamClientState() + : iter(NULL), + session(NULL), + subsession(NULL), + streamTimerTask(NULL), + duration(0.0) {} + +StreamClientState::~StreamClientState() { + delete iter; + if (session != NULL) { + // We also need to delete "session", and unschedule "streamTimerTask" (if + // set) + UsageEnvironment& env = session->envir(); // alias + + env.taskScheduler().unscheduleDelayedTask(streamTimerTask); + Medium::close(session); + } +} + +// Implementation of "OurSink": + +OurSink* OurSink::createNew(UsageEnvironment& env, + MediaSubsession& subsession, + char const* streamId) { + return new OurSink(env, subsession, streamId); +} + +OurSink::OurSink(UsageEnvironment& env, MediaSubsession& subsession, + char const* streamId) + : MediaSink(env), fSubsession(subsession) { + fStreamId = strDup(streamId); + fReceiveBuffer = new u_int8_t[rtspSinkReceiveBufferSize]; +} + +OurSink::~OurSink() { + delete[] fReceiveBuffer; + delete[] fStreamId; +} + +void OurSink::afterGettingFrame(void* clientData, unsigned frameSize, + unsigned numTruncatedBytes, + struct timeval presentationTime, + unsigned durationInMicroseconds) { + OurSink* sink = (OurSink*)clientData; + sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, + durationInMicroseconds); +} + +void OurSink::afterGettingFrame(unsigned frameSize, + unsigned numTruncatedBytes, + struct timeval presentationTime, + unsigned /*durationInMicroseconds*/) { + // We've just received a frame of data. (Optionally) print out information + // about it: +#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME + if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; "; + envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() + << ":\tReceived " << frameSize << " bytes"; + if (numTruncatedBytes > 0) + envir() << " (with " << numTruncatedBytes << " bytes truncated)"; + char uSecsStr[6 + 1]; // used to output the 'microseconds' part of the + // presentation time + snprintf(uSecsStr,7 , "%06u", (unsigned)presentationTime.tv_usec); + envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." + << uSecsStr; + if (fSubsession.rtpSource() != NULL && + !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) { + envir() << "!"; // mark the debugging output to indicate that this + // presentation time is not RTCP-synchronized + } +#ifdef DEBUG_PRINT_NPT + envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime); +#endif + envir() << "\n"; +#endif + + // Decode the data + if (rtspOutput) { + size_t writtenSize = rtspOutput->write(fReceiveBuffer, frameSize); + assert(writtenSize == frameSize); + } + + // Then continue, to request the next frame of data: + continuePlaying(); +} + +Boolean OurSink::continuePlaying() { + if (fSource == NULL) return False; // sanity check (should not happen) + + // Request the next frame of data from our input source. "afterGettingFrame()" + // will get called later, when it arrives: + fSource->getNextFrame(fReceiveBuffer, rtspSinkReceiveBufferSize, + afterGettingFrame, this, onSourceClosure, this); + return True; +} + +} // namespace audiotools_rtsp \ No newline at end of file diff --git a/src/AudioTools/AudioLibs/vban/vban.h b/src/AudioTools/Communication/VBAN/vban.h similarity index 100% rename from src/AudioTools/AudioLibs/vban/vban.h rename to src/AudioTools/Communication/VBAN/vban.h diff --git a/src/AudioTools/Communication/VBANStream.h b/src/AudioTools/Communication/VBANStream.h new file mode 100644 index 0000000000..b1637d022e --- /dev/null +++ b/src/AudioTools/Communication/VBANStream.h @@ -0,0 +1,592 @@ + +#include +#include + +#include "AudioTools/Communication/VBAN/vban.h" +#include "AudioTools/CoreAudio/AudioStreams.h" +#include "AudioTools/Concurrency/RTOS/BufferRTOS.h" + +namespace audio_tools { + +class VBANConfig : public AudioInfo { + public: + VBANConfig() { + sample_rate = 11025; + channels = 1; + bits_per_sample = 16; + } + RxTxMode mode; + /// name of the stream + const char* stream_name = "Stream1"; + /// default port is 6980 + uint16_t udp_port = 6980; + /// Use {0,0,0,0}; as broadcast address + IPAddress target_ip{0, 0, 0, 0}; + /// ssid for wifi connection + const char* ssid = nullptr; + /// password for wifi connection + const char* password = nullptr; + int rx_buffer_count = 30; + // set to true if samples are generated faster then sample rate + bool throttle_active = false; + // when negative the number of ms that are subtracted from the calculated wait + // time to fine tune Overload and Underruns + int throttle_correction_us = 0; + // defines the max write size + int max_write_size = + DEFAULT_BUFFER_SIZE * 2; // just good enough for 44100 stereo + uint8_t format = 0; + + //reply for discovery packet + uint32_t device_flags = 0x00000001; // default: receiver only + uint32_t bitfeature = 0x00000001; // default: audio only + uint32_t device_color = 0x00FF00; // green default + //const char* stream_name_reply = "VBAN SPOT PING"; + const char* device_name = nullptr; // nullptr means use MAC by default + const char* manufacturer_name = "ESP32 AudioTools"; + const char* application_name = "VBAN Streamer"; + const char* host_name = nullptr; // will fallback to WiFi.getHostname() + const char* user_name = "User"; + const char* user_comment = "ESP32 VBAN Audio Device"; +}; + +/** + * @brief VBAN Audio Source and Sink for the ESP32. For further details please + * see https://vb-audio.com/Voicemeeter/vban.htm . + * Inspired by https://github.com/rkinnett/ESP32-VBAN-Audio-Source/tree/master + * and https://github.com/rkinnett/ESP32-VBAN-Network-Audio-Player + * @ingroup communications + * @author Phil Schatzmann + * @copyright GPLv3 + */ + +class VBANStream : public AudioStream { + public: + VBANConfig defaultConfig(RxTxMode mode = TX_MODE) { + VBANConfig def; + def.mode = mode; + return def; + } + + void setOutput(Print &out){ + p_out = &out; + } + + void setAudioInfo(AudioInfo info) override { + cfg.copyFrom(info); + AudioStream::setAudioInfo(info); + auto thc = throttle.defaultConfig(); + thc.copyFrom(info); + thc.correction_us = cfg.throttle_correction_us; + throttle.begin(thc); + if (cfg.mode == TX_MODE) { + configure_tx(); + } + } + + bool begin(VBANConfig cfg) { + this->cfg = cfg; + setAudioInfo(cfg); + return begin(); + } + + bool begin() { + if (cfg.mode == TX_MODE) { + if (cfg.bits_per_sample != 16) { + LOGE("Only 16 bits supported") + return false; + } + tx_buffer.resize(VBAN_PACKET_NUM_SAMPLES); + return begin_tx(); + } else { +#ifdef ESP32 + rx_buffer.resize(DEFAULT_BUFFER_SIZE * cfg.rx_buffer_count); + rx_buffer.setReadMaxWait(10); +#else + rx_buffer.resize(DEFAULT_BUFFER_SIZE, cfg.rx_buffer_count); +#endif + return begin_rx(); + } + } + + size_t write(const uint8_t* data, size_t len) override { + if (!udp_connected) return 0; + + int16_t* adc_data = (int16_t*)data; + size_t samples = len / (cfg.bits_per_sample/8); + + // limit output speed + if (cfg.throttle_active) { + throttle.delayFrames(samples / cfg.channels); + } + + for (int j = 0; j < samples; j++) { + tx_buffer.write(adc_data[j]); + if (tx_buffer.availableForWrite() == 0) { + memcpy(vban.data_frame, tx_buffer.data(), vban.packet_data_bytes); + *vban.packet_counter = packet_counter; // increment packet counter + // Send packet + if (cfg.target_ip == broadcast_address) { + udp.broadcastTo((uint8_t*)&vban.packet, vban.packet_total_bytes, + cfg.udp_port); + } else { + udp.writeTo((uint8_t*)&vban.packet, vban.packet_total_bytes, + cfg.target_ip, cfg.udp_port); + } + // defile delay start time + packet_counter++; + tx_buffer.reset(); + } + } + return len; + } + + int availableForWrite() { return cfg.max_write_size; } + + size_t readBytes(uint8_t* data, size_t len) override { + TRACED(); + size_t samples = len / (cfg.bits_per_sample/8); + if (cfg.throttle_active) { + throttle.delayFrames(samples / cfg.channels); + } + return rx_buffer.readArray(data, len); + } + + int available() { return available_active ? rx_buffer.available() : 0; } + + protected: + const IPAddress broadcast_address{0, 0, 0, 0}; + AsyncUDP udp; + VBan vban; + VBANConfig cfg; + SingleBuffer tx_buffer{0}; + #ifdef ESP32 + BufferRTOS rx_buffer{ 0}; + #else + NBuffer rx_buffer{DEFAULT_BUFFER_SIZE, 0}; + #endif + bool udp_connected = false; + uint32_t packet_counter = 0; + Throttle throttle; + size_t bytes_received = 0; + bool available_active = false; + Print *p_out = nullptr; + + bool begin_tx() { + if (!configure_tx()) { + return false; + } + start_wifi(); + if (WiFi.status() != WL_CONNECTED) { + LOGE("Wifi not connected"); + return false; + } + WiFi.setSleep(false); + IPAddress myIP = WiFi.localIP(); + udp_connected = udp.connect(myIP, cfg.udp_port); + return udp_connected; + } + + bool begin_rx() { + start_wifi(); + if (WiFi.status() != WL_CONNECTED) { + LOGE("Wifi not connected"); + return false; + } + WiFi.setSleep(false); + bytes_received = 0; + this->available_active = false; + // connect to target + if (!udp.listen(cfg.udp_port)) { + LOGE("Could not connect to '%s:%d' target", toString(cfg.target_ip), + cfg.udp_port); + } + // handle data + udp.onPacket([this](AsyncUDPPacket packet) { receive_udp(packet); }); + + return true; + } + + bool configure_tx() { + int rate = vban_sample_rate(); + if (rate < 0) { + LOGE("Invalid sample rate: %d", cfg.sample_rate); + return false; + } + configure_vban((VBanSampleRates)rate); + return true; + } + + void start_wifi() { + if (cfg.ssid == nullptr) return; + if (cfg.password == nullptr) return; + LOGI("ssid %s", cfg.ssid); + // Setup Wifi: + WiFi.begin(cfg.ssid, cfg.password); // Connect to your WiFi router + while (WiFi.status() != WL_CONNECTED) { // Wait for connection + delay(500); + Serial.print("."); + } + Serial.println(); + + LOGI("Wifi connected to IP (%d.%d.%d.%d)", WiFi.localIP()[0], + WiFi.localIP()[1], WiFi.localIP()[2], WiFi.localIP()[3]); + } + + void configure_vban(VBanSampleRates rate) { + // Set vban packet header, counter, and data frame pointers to respective + // parts of packet: + vban.hdr = (VBanHeader*)&vban.packet[0]; + vban.packet_counter = (uint32_t*)&vban.packet[VBAN_PACKET_HEADER_BYTES]; + vban.data_frame = + (uint8_t*)&vban + .packet[VBAN_PACKET_HEADER_BYTES + VBAN_PACKET_COUNTER_BYTES]; + + // Setup the packet header: + strncpy(vban.hdr->preamble, "VBAN", 4); + vban.hdr->sample_rate = + static_cast(VBAN_PROTOCOL_AUDIO) | + rate; // 11025 Hz, which matches default sample rate for soundmodem + vban.hdr->num_samples = + (VBAN_PACKET_NUM_SAMPLES / cfg.channels) - 1; // 255 = 256 samples + vban.hdr->num_channels = cfg.channels - 1; // 0 = 1 channel + vban.hdr->sample_format = + static_cast(VBAN_BITFMT_16_INT) | VBAN_CODEC_PCM; // int16 PCM + strncpy(vban.hdr->stream_name, cfg.stream_name, + min((int)strlen(cfg.stream_name), VBAN_STREAM_NAME_SIZE)); + + vban.packet_data_bytes = + (vban.hdr->num_samples + 1) * (vban.hdr->num_channels + 1) * + ((vban.hdr->sample_format & VBAN_BIT_RESOLUTION_MASK) + 1); + vban.packet_total_bytes = vban.packet_data_bytes + + VBAN_PACKET_HEADER_BYTES + + VBAN_PACKET_COUNTER_BYTES; + } + + int vban_sample_rate() { + int result = -1; + switch (cfg.sample_rate) { + case 6000: + result = SAMPLE_RATE_6000_HZ; + break; + case 12000: + result = SAMPLE_RATE_12000_HZ; + break; + case 24000: + result = SAMPLE_RATE_24000_HZ; + break; + case 48000: + result = SAMPLE_RATE_48000_HZ; + break; + case 96000: + result = SAMPLE_RATE_96000_HZ; + break; + case 192000: + result = SAMPLE_RATE_192000_HZ; + break; + case 384000: + result = SAMPLE_RATE_384000_HZ; + break; + case 8000: + result = SAMPLE_RATE_8000_HZ; + break; + case 16000: + result = SAMPLE_RATE_16000_HZ; + break; + case 32000: + result = SAMPLE_RATE_32000_HZ; + break; + case 64000: + result = SAMPLE_RATE_64000_HZ; + break; + case 128000: + result = SAMPLE_RATE_128000_HZ; + break; + case 256000: + result = SAMPLE_RATE_256000_HZ; + break; + case 512000: + result = SAMPLE_RATE_512000_HZ; + break; + case 11025: + result = SAMPLE_RATE_11025_HZ; + break; + case 22050: + result = SAMPLE_RATE_22050_HZ; + break; + case 44100: + result = SAMPLE_RATE_44100_HZ; + break; + case 88200: + result = SAMPLE_RATE_88200_HZ; + break; + case 176400: + result = SAMPLE_RATE_176400_HZ; + break; + case 352800: + result = SAMPLE_RATE_352800_HZ; + break; + case 705600: + result = SAMPLE_RATE_705600_HZ; + break; + } + return result; + } + + const char* toString(IPAddress adr) { + static char str[11] = {0}; + snprintf(str, 11, "%d.%d.%d.%d", adr[0], adr[1], adr[2], adr[3]); + return str; + } + + /** + * @brief VBAN adjusts the number of samples per packet according to sample + *rate. Assuming 16-bit PCM mono, sample rates 11025, 22050, 44100, and 88200 + *yield packets containing 64, 128, 256, and 256 samples per packet, + *respectively. The even-thousands sample rates below 48000 yield + *non-power-of-2 lengths. For example, sample rate 24000 yields 139 samples + *per packet. This VBAN->DMA->DAC method seems to require the dma buffer + *length be set equal to the number of samples in each VBAN packet. ESP32 + *I2S/DMA does not seem to handle non-power-of-2 buffer lengths well. Sample + *rate 24000 doesn't work reliably at all. Sample rate 32000 is stable but + *stutters. Recommend selecting from sample rates 11025, 22050, 44100, and + *above And set samplesPerPacket to 64 for 11025, 128 for 22050, or 256 for + *all else. + **/ + + void receive_udp(AsyncUDPPacket& packet) { + uint16_t vban_rx_data_bytes, vban_rx_sample_count; + int16_t* vban_rx_data; + uint32_t* vban_rx_pkt_nbr; + uint16_t outBuf[VBAN_PACKET_MAX_SAMPLES + 1]; + size_t bytesOut; + + int len = packet.length(); + if (len > 0) { + LOGD("receive_udp %d", len); + uint8_t* udpIncomingPacket = packet.data(); + + // receive incoming UDP packet + // Check if packet length meets VBAN specification: + if (len < VBAN_PACKET_HEADER_BYTES) { + LOGE("Too short to be VBAN (%u bytes)", len); + return; + } + + // Check if preamble matches VBAN format: + if (strncmp("VBAN", (const char*)udpIncomingPacket, 4) != 0) { + LOGE("Unrecognized preamble %.4s", udpIncomingPacket); + return; + } + + uint8_t protocol = udpIncomingPacket[4] & VBAN_PROTOCOL_MASK; + + if (protocol == VBAN_PROTOCOL_SERVICE) { + // Allow up to ~1024 bytes for service packets like Ping0 + if (len > 1024) { + LOGE("Service packet length invalid: %u bytes", len); + return; + } + } else { + // Audio, serial, etc + if (len <= (VBAN_PACKET_HEADER_BYTES + VBAN_PACKET_COUNTER_BYTES) || len > VBAN_PACKET_MAX_LEN_BYTES) { + LOGE("Audio/other packet length invalid: %u bytes", len); + rx_buffer.reset(); + return; + } + } + + //LOGI("VBAN format byte: 0x%02X", udpIncomingPacket[7]); + //LOGD("VBAN protocol mask applied: 0x%02X", udpIncomingPacket[7] & VBAN_PROTOCOL_MASK); + //Serial.printf("Header[7] = 0x%02X\n", udpIncomingPacket[7]); + + + //------------------------------------------------------------------------- + //SUPPORT PING REQUEST + if ( protocol == VBAN_PROTOCOL_SERVICE ) { + + uint8_t service_type = udpIncomingPacket[5]; + uint8_t service_fnct = udpIncomingPacket[6]; + + if (service_type == VBAN_SERVICE_IDENTIFICATION) { + bool isReply = (service_fnct & VBAN_SERVICE_FNCT_REPLY) != 0; + uint8_t function = service_fnct & 0x7F; + + if (!isReply && function == 0) { + LOGI("Received VBAN PING0 request"); + sendVbanPing0Reply(packet); + } + } + return; + } + //-------------------------------------------------------------------------- + + vban_rx_data_bytes = + len - (VBAN_PACKET_HEADER_BYTES + VBAN_PACKET_COUNTER_BYTES); + vban_rx_pkt_nbr = (uint32_t*)&udpIncomingPacket[VBAN_PACKET_HEADER_BYTES]; + vban_rx_data = (int16_t*)&udpIncomingPacket[VBAN_PACKET_HEADER_BYTES + + VBAN_PACKET_COUNTER_BYTES]; + vban_rx_sample_count = vban_rx_data_bytes / (cfg.bits_per_sample / 8); + uint8_t vbanSampleRateIdx = udpIncomingPacket[4] & VBAN_SR_MASK; + uint8_t vbchannels = udpIncomingPacket[6] + 1; + uint8_t vbframes = udpIncomingPacket[5] + 1; + uint8_t vbformat = udpIncomingPacket[7] & VBAN_PROTOCOL_MASK; + uint8_t vbformat_bits = udpIncomingPacket[7] & VBAN_BIT_RESOLUTION_MASK; + uint32_t vbanSampleRate = VBanSRList[vbanSampleRateIdx]; + + //LOGD("sample_count: %d - frames: %d", vban_rx_sample_count, vbframes); + //assert (vban_rx_sample_count == vbframes*vbchannels); + + // E.g. do not process any text + if (vbformat != cfg.format){ + LOGE("Format ignored: 0x%x", vbformat); + return; + } + + // Currently we support only 16 bits. + if (vbformat_bits != VBAN_BITFMT_16_INT){ + LOGE("Format only 16 bits supported"); + return; + } + + // Just to be safe, re-check sample count against max sample count to + // avoid overrunning outBuf later + if (vban_rx_sample_count > VBAN_PACKET_MAX_SAMPLES) { + LOGE("unexpected packet size: %u", vban_rx_sample_count); + return; + } + + // update sample rate + if (cfg.sample_rate != vbanSampleRate || cfg.channels != vbchannels) { + // update audio info + cfg.sample_rate = vbanSampleRate; + cfg.channels = vbchannels; + setAudioInfo(cfg); + // remove any buffered data + rx_buffer.reset(); + available_active = false; + } + + if (p_out!=nullptr){ + int size_written = p_out->write((uint8_t*)vban_rx_data, vban_rx_data_bytes); + if (size_written != vban_rx_data_bytes) { + LOGE("buffer overflow %d -> %d", vban_rx_data_bytes, size_written); + } + return; + } + + // write data to buffer + int size_written = rx_buffer.writeArray((uint8_t*)vban_rx_data, vban_rx_data_bytes); + if (size_written != vban_rx_data_bytes) { + LOGE("buffer overflow %d -> %d", vban_rx_data_bytes, size_written); + } + + // report available bytes only when buffer is 50% full + if (!available_active) { + bytes_received += vban_rx_data_bytes; + if (bytes_received >= cfg.rx_buffer_count * DEFAULT_BUFFER_SIZE * 0.75){ + available_active = true; + LOGI("Activating vban"); + } + } + } + } +//------------------------------------------------------------------------------------- + //implement ping reply based on VBAN standard + void sendVbanPing0Reply(AsyncUDPPacket& sourcePacket) { + + // Prepare VBAN 28-byte service header + uint8_t header[28]; + memset(header, 0, sizeof(header)); + memcpy(header, "VBAN", 4); + header[4] = VBAN_PROTOCOL_SERVICE; + header[5] = VBAN_SERVICE_FNCT_PING0 | VBAN_SERVICE_FNCT_REPLY; // Service function + reply bit + header[6] = 0x00; // must be zero + // Copy incoming stream name from discovery packet + const uint8_t* data = sourcePacket.data(); + memcpy(&header[8], &data[8], 16); + // Copy frame number (little endian) + + uint32_t frameNumber = (uint32_t)((data[24] & 0xFF) | ((data[25] & 0xFF) << 8) | ((data[26] & 0xFF) << 16) | ((data[27] & 0xFF) << 24)); + memcpy(&header[24], &frameNumber, 4); + + // Construct the PING0 payload using the struct + VBAN_PING0 ping0; + memset(&ping0, 0, sizeof(ping0)); + + // Fill fields with your config data and fixed values + ping0.bitType = cfg.device_flags; + ping0.bitfeature = cfg.bitfeature; + ping0.bitfeatureEx = 0x00000000; + ping0.PreferedRate = 44100; + ping0.MinRate = 8000; + ping0.MaxRate = 96000; + ping0.color_rgb = cfg.device_color; + + // Version string, 8 bytes total (zero padded) + memcpy(ping0.nVersion, "v1.0", 4); + + // GPS_Position left empty (all zero), so no need to set + // USER_Position 8 bytes + memcpy(ping0.USER_Position, "USRPOS", 6); + // LangCode_ascii 8 bytes ("EN" + padding) + memset(ping0.LangCode_ascii, 0, sizeof(ping0.LangCode_ascii)); + memcpy(ping0.LangCode_ascii, "EN", 2); + // reserved_ascii and reservedEx are zeroed by memset + // IP as string, max 32 bytes + + char ipStr[16]; // Enough for "255.255.255.255\0" + sprintf(ipStr, "%d.%d.%d.%d", WiFi.localIP()[0], WiFi.localIP()[1], WiFi.localIP()[2], WiFi.localIP()[3]); + safe_strncpy(ping0.DistantIP_ascii, ipStr, sizeof(ping0.DistantIP_ascii)); + // Ports (network byte order) + + ping0.DistantPort = cfg.udp_port; //returs port I am listening for VBAN - more useful then UDP ephemeral port + ping0.DistantReserved = 0; + + // Device name (64 bytes) + if (cfg.device_name && cfg.device_name[0] != '\0') { + safe_strncpy(ping0.DeviceName_ascii, cfg.device_name, sizeof(ping0.DeviceName_ascii)); + } else { + uint8_t mac[6]; + WiFi.macAddress(mac); + char macStr[64]; + snprintf(macStr, sizeof(macStr), "%02X:%02X:%02X:%02X:%02X:%02X", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + safe_strncpy(ping0.DeviceName_ascii, macStr, sizeof(ping0.DeviceName_ascii)); + } + + // Manufacturer name (64 bytes) + safe_strncpy(ping0.ManufacturerName_ascii, cfg.manufacturer_name, sizeof(ping0.ManufacturerName_ascii)); + // Application name (64 bytes) + safe_strncpy(ping0.ApplicationName_ascii, cfg.application_name, sizeof(ping0.ApplicationName_ascii)); + // Host name (64 bytes) + const char* hostName = cfg.host_name; + if (!hostName || hostName[0] == '\0') { + hostName = WiFi.getHostname(); + if (!hostName) hostName = "ESP32"; + } + safe_strncpy(ping0.HostName_ascii, hostName, sizeof(ping0.HostName_ascii)); + + // UserName_utf8 + safe_strncpy(ping0.UserName_utf8, cfg.user_name, sizeof(ping0.UserName_utf8)); + //UserComment_utf8 + safe_strncpy(ping0.UserComment_utf8, cfg.user_comment, sizeof(ping0.UserComment_utf8)); + + // Prepare final packet: header + payload + uint8_t packet[28 + sizeof(VBAN_PING0)]; + memcpy(packet, header, 28); + memcpy(packet + 28, &ping0, sizeof(VBAN_PING0)); + + // Send UDP packet + udp.writeTo(packet, sizeof(packet), sourcePacket.remoteIP(), sourcePacket.remotePort()); +} + + // Safely copy a C-string with guaranteed null termination + void safe_strncpy(char* dest, const char* src, size_t dest_size) { + if (dest_size == 0) return; + strncpy(dest, src, dest_size - 1); + dest[dest_size - 1] = '\0'; + } + //----------------------------------------------------------------------------------- +}; + +} // namespace audio_tools \ No newline at end of file From b18a14bf3b3a0da1821f3651abbb54633b497ca2 Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 06:50:33 +0200 Subject: [PATCH 02/15] bump to 1.2.0 --- library.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library.properties b/library.properties index b844d89390..afc250c77b 100644 --- a/library.properties +++ b/library.properties @@ -1,5 +1,5 @@ name=audio-tools -version=1.1.3 +version=1.2.0 author=Phil Schatzmann maintainer=Phil Schatzmann sentence=Some useful audio processing classes From 4a38e989de99c3d590d5eb2f190b00819094a9ad Mon Sep 17 00:00:00 2001 From: Phil Schatzmann Date: Tue, 23 Sep 2025 07:02:21 +0200 Subject: [PATCH 03/15] move HTTP to v1.2.0 (#2176) * move HTTP to Communication * RTSP corrections * Move to Communication from AudioLibs * bump to 1.2.0 --- examples/build-examples-log.txt | 6 +- .../streams-audiokit-multioutput-server.ino | 1 + .../player-url-i2s/player-url-i2s.ino | 1 + .../player-url_icy-audiokit.ino | 1 + .../player-url_icy-i2s/player-url_icy-i2s.ino | 1 + .../AudioSourceIcyUrl.h | 2 +- .../player-url_subclass-i2s.ino | 1 + .../streams-eth_url_mp3_helix-i2s.ino | 1 + .../streams-http_post/streams-http_post.ino | 1 + .../streams-url-file/streams-url-file.ino | 1 + .../streams-url-measuring.ino | 1 + .../streams-url_aac-audiokit.ino | 1 + .../streams-url_aac-i2s.ino | 1 + .../streams-url_flac-i2s.ino | 1 + .../streams-url_flac_foxen-i2s.ino | 1 + .../streams-url_mp3-analog.ino | 1 + .../streams-url_mp3-audiokit.ino | 1 + .../streams-url_mp3-metadata.ino | 1 + .../streams-url_mp3-metadata2.ino | 1 + .../streams-url_mp3-pwm.ino | 1 + .../streams-url_mp3_helix-i2s.ino | 1 + .../streams-url_mp3_helix-i2s_32bit.ino | 1 + .../streams-url_mp3_mad-i2s.ino | 1 + .../streams-url_post/streams-url_post.ino | 1 + .../streams-url_raw-i2s.ino | 1 + .../streams-url_raw-serial.ino | 1 + .../streams-url_vorbis_i2s.ino | 1 + .../player-sd-webserverex_mp3.ino | 2 +- .../streams-audiokit-webserver_aac.ino | 1 + .../streams-audiokit-webserver_mp3.ino | 1 + .../streams-audiokit-webserver_wav.ino | 1 + .../streams-effect-webserver_wav.ino | 1 + .../streams-flite-webserver_wav.ino | 1 + .../streams-generator-webserver_aac.ino | 1 + .../streams-generator-webserver_mp3.ino | 1 + .../streams-generator-webserver_ogg.ino | 1 + .../streams-generator-webserver_wav.ino | 1 + .../streams-generator-webserverex_wav.ino | 2 +- .../streams-generator-webserverex_wav1.ino | 2 +- .../streams-i2s-webserver_wav.ino | 1 + .../streams-sam-webserver_wav.ino | 1 + .../streams-tts-webserver_wav.ino | 1 + .../communication-ip-send.ino | 3 +- .../serial/mp3-custom/send-mp3/send-mp3.ino | 1 + .../serial/mp3-xon-xoff/send-mp3/send-mp3.ino | 1 + .../serial/mp3/send-mp3/send-mp3.ino | 1 + .../streams-sdfat_mp3-metadata.ino | 1 + .../streams-azure_tts-i2s.ino | 1 + .../streams-google-audiokit.ino | 1 + .../streams-url_wav-i2s.ino | 1 + .../streams-url_mp3-vs1053.ino | 1 + .../test-container-avi/test-container-avi.ino | 1 + .../test-codec-aac-fdk-dec.ino | 1 + .../test-memory-helix/test-memory-helix.ino | 1 + .../test-streaming-adapter.ino | 1 + .../tests/etc/test-ads1015/test-ads1015.ino | 39 --- examples/tests/performance/wifi/wifi.ino | 1 + .../tests/player/test-player/test-player.ino | 1 + src/AudioTools/AudioCodecs/AudioEncoded.h | 3 +- src/AudioTools/AudioCodecs/CodecCopy.h | 5 + src/AudioTools/AudioCodecs/CodecMTS.h | 2 + src/AudioTools/AudioCodecs/ContainerAVI.h | 2 + src/AudioTools/AudioCodecs/HeaderParserMP3.h | 4 +- src/AudioTools/AudioCodecs/M4ACommonDemuxer.h | 2 + src/AudioTools/AudioCodecs/MultiDecoder.h | 2 +- src/AudioTools/AudioLibs/AudioServerEx.h | 184 +------------ src/AudioTools/AudioLibs/Desktop/File.h | 5 + .../AudioLibs/Desktop/JupyterAudio.h | 4 + src/AudioTools/AudioLibs/SPDIFOutput.h | 2 + src/AudioTools/Communication/AudioHttp.h | 15 ++ src/AudioTools/Communication/AudioServerEx.h | 183 +++++++++++++ src/AudioTools/Communication/HLSStreamESP32.h | 3 + .../HTTP}/AbstractURLStream.h | 0 .../HTTP}/AudioClient.h | 0 .../Communication/HTTP/AudioEncodedServerT.h | 222 +++++++++++++++ src/AudioTools/Communication/HTTP/AudioHttp.h | 25 ++ .../Communication/HTTP/AudioServer.h | 4 + .../Communication/HTTP/AudioServerEthernet.h | 45 ++++ .../HTTP/AudioServerT.h} | 253 ++---------------- .../Communication/HTTP/AudioServerWiFi.h | 43 +++ .../HTTP}/HttpChunkReader.h | 0 .../HTTP}/HttpHeader.h | 0 .../HTTP}/HttpLineReader.h | 0 .../HTTP}/HttpRequest.h | 0 .../HTTP}/HttpTypes.h | 0 src/AudioTools/Communication/HTTP/ICYStream.h | 16 ++ .../HTTP}/ICYStreamT.h | 2 +- src/AudioTools/Communication/HTTP/README.md | 4 + .../HTTP}/URLStream.h | 53 +--- .../HTTP}/URLStreamBufferedT.h | 2 +- .../HTTP}/URLStreamESP32.h | 15 +- .../AudioHttp => Communication/HTTP}/Url.h | 0 .../Communication/HTTP/WiFiInclude.h | 20 ++ .../Concurrency/RP2040/BufferRP2040.h | 2 + .../Concurrency/RP2040/MutexRP2040.h | 2 + src/AudioTools/Concurrency/RTOS/BufferRTOS.h | 2 + src/AudioTools/Concurrency/RTOS/MutexRTOS.h | 2 + .../RTOS/SynchronizedNBufferRTOS.h | 5 + src/AudioTools/CoreAudio.h | 1 - src/AudioTools/CoreAudio/AudioHttp.h | 3 - .../CoreAudio/AudioHttp/AudioHttp.h | 13 - src/AudioTools/CoreAudio/AudioHttp/README.md | 2 - .../CoreAudio/AudioMetaData/MetaData.h | 5 +- .../CoreAudio/AudioMetaData/MetaDataICY.h | 2 +- .../CoreAudio/AudioPWM/PWMDriverAVR.h | 2 + src/AudioTools/CoreAudio/AudioPlayer.h | 1 - src/AudioTools/CoreAudio/AudioStreams.h | 5 + .../CoreAudio/AudioTimer/AudioTimerDesktop.h | 7 +- src/AudioTools/CoreAudio/AudioTypes.h | 5 +- src/AudioTools/Disk/AudioSourceURL.h | 2 +- src/AudioTools/PlatformConfig/avr.h | 5 +- src/AudioTools/PlatformConfig/giga.h | 1 - src/AudioTools/PlatformConfig/portenta.h | 1 - src/AudioTools/PlatformConfig/samd.h | 2 +- src/AudioTools/PlatformConfig/stm32.h | 7 +- src/AudioTools/PlatformConfig/unor4.h | 2 +- tests-cmake/codec/CMakeLists.txt | 2 +- tests-cmake/codec/aac-faad/aac-faad.cpp | 2 - .../codec/aac-fdk-encode/aac-fdk-encode.cpp | 1 - tests-cmake/codec/aac-fdk/aac-fdk.cpp | 2 - tests-cmake/codec/aac-helix/aac-helix.cpp | 2 - tests-cmake/codec/mp3-helix/mp3-helix.cpp | 2 - tests-cmake/codec/mp3-lame/mp3-lame.cpp | 2 - tests-cmake/codec/mp3-mad/mp3-mad.cpp | 2 - .../codec/mp3-metadata/mp3-metadata.cpp | 3 - tests-cmake/effects/effects.cpp | 2 - tests-cmake/url-test/url-test.cpp | 3 +- 127 files changed, 746 insertions(+), 583 deletions(-) delete mode 100644 examples/tests/etc/test-ads1015/test-ads1015.ino create mode 100644 src/AudioTools/Communication/AudioHttp.h create mode 100644 src/AudioTools/Communication/AudioServerEx.h rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/AbstractURLStream.h (100%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/AudioClient.h (100%) create mode 100644 src/AudioTools/Communication/HTTP/AudioEncodedServerT.h create mode 100644 src/AudioTools/Communication/HTTP/AudioHttp.h create mode 100644 src/AudioTools/Communication/HTTP/AudioServer.h create mode 100644 src/AudioTools/Communication/HTTP/AudioServerEthernet.h rename src/AudioTools/{CoreAudio/AudioHttp/AudioServer.h => Communication/HTTP/AudioServerT.h} (55%) create mode 100644 src/AudioTools/Communication/HTTP/AudioServerWiFi.h rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/HttpChunkReader.h (100%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/HttpHeader.h (100%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/HttpLineReader.h (100%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/HttpRequest.h (100%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/HttpTypes.h (100%) create mode 100644 src/AudioTools/Communication/HTTP/ICYStream.h rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/ICYStreamT.h (99%) create mode 100644 src/AudioTools/Communication/HTTP/README.md rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/URLStream.h (91%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/URLStreamBufferedT.h (99%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/URLStreamESP32.h (95%) rename src/AudioTools/{CoreAudio/AudioHttp => Communication/HTTP}/Url.h (100%) create mode 100644 src/AudioTools/Communication/HTTP/WiFiInclude.h delete mode 100644 src/AudioTools/CoreAudio/AudioHttp.h delete mode 100644 src/AudioTools/CoreAudio/AudioHttp/AudioHttp.h delete mode 100644 src/AudioTools/CoreAudio/AudioHttp/README.md diff --git a/examples/build-examples-log.txt b/examples/build-examples-log.txt index 8f87fcd7ae..59ec0a0a2e 100644 --- a/examples/build-examples-log.txt +++ b/examples/build-examples-log.txt @@ -58,6 +58,7 @@ ../examples/examples-audiokit/streams-audiokit-fft -> rc=0 ../examples/examples-audiokit/streams-audiokit-fft-led -> rc=0 ../examples/examples-audiokit/streams-audiokit-filter-audiokit -> rc=0 +../examples/examples-audiokit/streams-audiokit-goertzel -> rc=0 ../examples/examples-audiokit/streams-audiokit-multioutput -> rc=0 ../examples/examples-audiokit/streams-audiokit-multioutput-server -> rc=0 ../examples/examples-audiokit/streams-audiokit-ram-audiokit -> rc=0 @@ -183,8 +184,10 @@ ../examples/examples-communication/vban/streams-generator-vban -> rc=0 ../examples/examples-communication/vban/streams-vban-audiokit -> rc=0 ../examples/examples-communication/rtsp/communication-audiokit-rtsp -> rc=0 -../examples/examples-communication/rtsp/communication-codec-rtsp -> rc=0 +../examples/examples-communication/rtsp/communication-codec-rtsp -> rc=1 ../examples/examples-communication/rtsp/communication-generator-rtsp -> rc=0 +../examples/examples-communication/rtsp/communication-player_mp3-rtsp -> rc=0 +../examples/examples-communication/rtsp/communication-player_mp3-rtsp_adcpm -> rc=0 ../examples/examples-communication/rtsp/communication-rtsp-audiokit -> rc=0 ../examples/examples-communication/rtsp/communication-rtsp-i2s -> rc=0 ../examples/examples-communication/serial/mp3 -> rc=1 @@ -310,7 +313,6 @@ ../examples/tests/effects/pitch-shift-180 -> rc=0 ../examples/tests/effects/pitch-shift-simple -> rc=0 ../examples/tests/etc/callback-write -> rc=0 -../examples/tests/etc/test-ads1015 -> rc=0 ../examples/tests/etc/test-audiolibs -> rc=0 ../examples/tests/etc/test-mulit-compilation-units -> rc=0 ../examples/tests/etc/test-pins -> rc=0 diff --git a/examples/examples-audiokit/streams-audiokit-multioutput-server/streams-audiokit-multioutput-server.ino b/examples/examples-audiokit/streams-audiokit-multioutput-server/streams-audiokit-multioutput-server.ino index 653992abfc..95e0fa210e 100644 --- a/examples/examples-audiokit/streams-audiokit-multioutput-server/streams-audiokit-multioutput-server.ino +++ b/examples/examples-audiokit/streams-audiokit-multioutput-server/streams-audiokit-multioutput-server.ino @@ -11,6 +11,7 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" const int buffer_count = 10; const int buffer_size = 1024; diff --git a/examples/examples-communication/http-client/player-url-i2s/player-url-i2s.ino b/examples/examples-communication/http-client/player-url-i2s/player-url-i2s.ino index ee1f014ef9..d4dcb4267f 100644 --- a/examples/examples-communication/http-client/player-url-i2s/player-url-i2s.ino +++ b/examples/examples-communication/http-client/player-url-i2s/player-url-i2s.ino @@ -10,6 +10,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/Disk/AudioSourceURL.h" +#include "AudioTools/Communication/AudioHttp.h" const char *urls[] = { diff --git a/examples/examples-communication/http-client/player-url_icy-audiokit/player-url_icy-audiokit.ino b/examples/examples-communication/http-client/player-url_icy-audiokit/player-url_icy-audiokit.ino index 2f510092bf..7b6e6450ce 100644 --- a/examples/examples-communication/http-client/player-url_icy-audiokit/player-url_icy-audiokit.ino +++ b/examples/examples-communication/http-client/player-url_icy-audiokit/player-url_icy-audiokit.ino @@ -10,6 +10,7 @@ #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" #include "AudioTools/Disk/AudioSourceURL.h" +#include "AudioTools/Communication/AudioHttp.h" const char *urls[] = { "http://stream.srg-ssr.ch/m/rsj/mp3_128", diff --git a/examples/examples-communication/http-client/player-url_icy-i2s/player-url_icy-i2s.ino b/examples/examples-communication/http-client/player-url_icy-i2s/player-url_icy-i2s.ino index 53b193d7e8..82aeb05ba5 100644 --- a/examples/examples-communication/http-client/player-url_icy-i2s/player-url_icy-i2s.ino +++ b/examples/examples-communication/http-client/player-url_icy-i2s/player-url_icy-i2s.ino @@ -10,6 +10,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/Disk/AudioSourceURL.h" +#include "AudioTools/Communication/AudioHttp.h" const char *urls[] = { diff --git a/examples/examples-communication/http-client/player-url_subclass-i2s/AudioSourceIcyUrl.h b/examples/examples-communication/http-client/player-url_subclass-i2s/AudioSourceIcyUrl.h index acb5fffaf5..7b88d088d0 100644 --- a/examples/examples-communication/http-client/player-url_subclass-i2s/AudioSourceIcyUrl.h +++ b/examples/examples-communication/http-client/player-url_subclass-i2s/AudioSourceIcyUrl.h @@ -1,6 +1,6 @@ #pragma once #include "AudioTools.h" -#include "AudioTools/CoreAudio/AudioHttp/URLStream.h" +#include "AudioTools/Communication/HTTP/URLStream.h" #include "AudioTools/Disk/AudioSourceURL.h" namespace audio_tools { diff --git a/examples/examples-communication/http-client/player-url_subclass-i2s/player-url_subclass-i2s.ino b/examples/examples-communication/http-client/player-url_subclass-i2s/player-url_subclass-i2s.ino index 1c354c6074..a2b5bb3996 100644 --- a/examples/examples-communication/http-client/player-url_subclass-i2s/player-url_subclass-i2s.ino +++ b/examples/examples-communication/http-client/player-url_subclass-i2s/player-url_subclass-i2s.ino @@ -10,6 +10,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioSourceIcyUrl.h" +#include "AudioTools/Communication/AudioHttp.h" const char *urls[] = { diff --git a/examples/examples-communication/http-client/streams-eth_url_mp3_helix-i2s/streams-eth_url_mp3_helix-i2s.ino b/examples/examples-communication/http-client/streams-eth_url_mp3_helix-i2s/streams-eth_url_mp3_helix-i2s.ino index 68857587c0..46e9ff8f5a 100644 --- a/examples/examples-communication/http-client/streams-eth_url_mp3_helix-i2s/streams-eth_url_mp3_helix-i2s.ino +++ b/examples/examples-communication/http-client/streams-eth_url_mp3_helix-i2s/streams-eth_url_mp3_helix-i2s.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" #include #include diff --git a/examples/examples-communication/http-client/streams-http_post/streams-http_post.ino b/examples/examples-communication/http-client/streams-http_post/streams-http_post.ino index 54186a978b..11ec3a7c78 100644 --- a/examples/examples-communication/http-client/streams-http_post/streams-http_post.ino +++ b/examples/examples-communication/http-client/streams-http_post/streams-http_post.ino @@ -8,6 +8,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" const char *ssid = "your SSID"; const char *password = "your PASSWORD"; diff --git a/examples/examples-communication/http-client/streams-url-file/streams-url-file.ino b/examples/examples-communication/http-client/streams-url-file/streams-url-file.ino index b4b8a1d2d8..fa3b92c376 100644 --- a/examples/examples-communication/http-client/streams-url-file/streams-url-file.ino +++ b/examples/examples-communication/http-client/streams-url-file/streams-url-file.ino @@ -11,6 +11,7 @@ */ #include "SD.h" #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" #define PIN_AUDIO_KIT_SD_CARD_CS 13 #define PIN_AUDIO_KIT_SD_CARD_MISO 2 diff --git a/examples/examples-communication/http-client/streams-url-measuring/streams-url-measuring.ino b/examples/examples-communication/http-client/streams-url-measuring/streams-url-measuring.ino index 0e72aa05f3..bfc11eb00b 100644 --- a/examples/examples-communication/http-client/streams-url-measuring/streams-url-measuring.ino +++ b/examples/examples-communication/http-client/streams-url-measuring/streams-url-measuring.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); // or replace with ICYStream to get metadata MeasuringStream out(50, &Serial); // final output of decoded stream diff --git a/examples/examples-communication/http-client/streams-url_aac-audiokit/streams-url_aac-audiokit.ino b/examples/examples-communication/http-client/streams-url_aac-audiokit/streams-url_aac-audiokit.ino index 3ba8b078e4..942814f38d 100644 --- a/examples/examples-communication/http-client/streams-url_aac-audiokit/streams-url_aac-audiokit.ino +++ b/examples/examples-communication/http-client/streams-url_aac-audiokit/streams-url_aac-audiokit.ino @@ -13,6 +13,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecAACHelix.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); // or replace with ICYStream to get metadata diff --git a/examples/examples-communication/http-client/streams-url_aac-i2s/streams-url_aac-i2s.ino b/examples/examples-communication/http-client/streams-url_aac-i2s/streams-url_aac-i2s.ino index 2d409b9ca7..e3118b234f 100644 --- a/examples/examples-communication/http-client/streams-url_aac-i2s/streams-url_aac-i2s.ino +++ b/examples/examples-communication/http-client/streams-url_aac-i2s/streams-url_aac-i2s.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecAACHelix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_flac-i2s/streams-url_flac-i2s.ino b/examples/examples-communication/http-client/streams-url_flac-i2s/streams-url_flac-i2s.ino index cd153ab24e..6e026661ce 100644 --- a/examples/examples-communication/http-client/streams-url_flac-i2s/streams-url_flac-i2s.ino +++ b/examples/examples-communication/http-client/streams-url_flac-i2s/streams-url_flac-i2s.ino @@ -10,6 +10,7 @@ */ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecFLAC.h" +#include "AudioTools/Communication/AudioHttp.h" const char* ssid = "ssid"; const char* pwd = "password"; diff --git a/examples/examples-communication/http-client/streams-url_flac_foxen-i2s/streams-url_flac_foxen-i2s.ino b/examples/examples-communication/http-client/streams-url_flac_foxen-i2s/streams-url_flac_foxen-i2s.ino index 0bb8b9faa7..e7e7f9a726 100644 --- a/examples/examples-communication/http-client/streams-url_flac_foxen-i2s/streams-url_flac_foxen-i2s.ino +++ b/examples/examples-communication/http-client/streams-url_flac_foxen-i2s/streams-url_flac_foxen-i2s.ino @@ -14,6 +14,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecFLACFoxen.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" const char* ssid = "ssid"; const char* pwd = "password"; diff --git a/examples/examples-communication/http-client/streams-url_mp3-analog/streams-url_mp3-analog.ino b/examples/examples-communication/http-client/streams-url_mp3-analog/streams-url_mp3-analog.ino index bad4e6ffef..631734e146 100644 --- a/examples/examples-communication/http-client/streams-url_mp3-analog/streams-url_mp3-analog.ino +++ b/examples/examples-communication/http-client/streams-url_mp3-analog/streams-url_mp3-analog.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_mp3-audiokit/streams-url_mp3-audiokit.ino b/examples/examples-communication/http-client/streams-url_mp3-audiokit/streams-url_mp3-audiokit.ino index c3a80676f2..69a5d59a25 100644 --- a/examples/examples-communication/http-client/streams-url_mp3-audiokit/streams-url_mp3-audiokit.ino +++ b/examples/examples-communication/http-client/streams-url_mp3-audiokit/streams-url_mp3-audiokit.ino @@ -13,6 +13,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); // or replace with ICYStream to get metadata diff --git a/examples/examples-communication/http-client/streams-url_mp3-metadata/streams-url_mp3-metadata.ino b/examples/examples-communication/http-client/streams-url_mp3-metadata/streams-url_mp3-metadata.ino index 7d6e67c08f..c53d0e8235 100644 --- a/examples/examples-communication/http-client/streams-url_mp3-metadata/streams-url_mp3-metadata.ino +++ b/examples/examples-communication/http-client/streams-url_mp3-metadata/streams-url_mp3-metadata.ino @@ -11,6 +11,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" ICYStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_mp3-metadata2/streams-url_mp3-metadata2.ino b/examples/examples-communication/http-client/streams-url_mp3-metadata2/streams-url_mp3-metadata2.ino index 399da89a92..583ea5c0ee 100644 --- a/examples/examples-communication/http-client/streams-url_mp3-metadata2/streams-url_mp3-metadata2.ino +++ b/examples/examples-communication/http-client/streams-url_mp3-metadata2/streams-url_mp3-metadata2.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" // -> EncodedAudioStream -> I2SStream // URLStream -> MultiOutput -| diff --git a/examples/examples-communication/http-client/streams-url_mp3-pwm/streams-url_mp3-pwm.ino b/examples/examples-communication/http-client/streams-url_mp3-pwm/streams-url_mp3-pwm.ino index 15b96d25b5..71989a9496 100644 --- a/examples/examples-communication/http-client/streams-url_mp3-pwm/streams-url_mp3-pwm.ino +++ b/examples/examples-communication/http-client/streams-url_mp3-pwm/streams-url_mp3-pwm.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_mp3_helix-i2s/streams-url_mp3_helix-i2s.ino b/examples/examples-communication/http-client/streams-url_mp3_helix-i2s/streams-url_mp3_helix-i2s.ino index 18cfef0a60..f2e82420fa 100644 --- a/examples/examples-communication/http-client/streams-url_mp3_helix-i2s/streams-url_mp3_helix-i2s.ino +++ b/examples/examples-communication/http-client/streams-url_mp3_helix-i2s/streams-url_mp3_helix-i2s.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_mp3_helix-i2s_32bit/streams-url_mp3_helix-i2s_32bit.ino b/examples/examples-communication/http-client/streams-url_mp3_helix-i2s_32bit/streams-url_mp3_helix-i2s_32bit.ino index 705d8bb202..d0b71811aa 100644 --- a/examples/examples-communication/http-client/streams-url_mp3_helix-i2s_32bit/streams-url_mp3_helix-i2s_32bit.ino +++ b/examples/examples-communication/http-client/streams-url_mp3_helix-i2s_32bit/streams-url_mp3_helix-i2s_32bit.ino @@ -13,6 +13,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_mp3_mad-i2s/streams-url_mp3_mad-i2s.ino b/examples/examples-communication/http-client/streams-url_mp3_mad-i2s/streams-url_mp3_mad-i2s.ino index bf3d81445d..b4c64d764b 100644 --- a/examples/examples-communication/http-client/streams-url_mp3_mad-i2s/streams-url_mp3_mad-i2s.ino +++ b/examples/examples-communication/http-client/streams-url_mp3_mad-i2s/streams-url_mp3_mad-i2s.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3MAD.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-communication/http-client/streams-url_post/streams-url_post.ino b/examples/examples-communication/http-client/streams-url_post/streams-url_post.ino index 721f6a506e..07d9e2d5f3 100644 --- a/examples/examples-communication/http-client/streams-url_post/streams-url_post.ino +++ b/examples/examples-communication/http-client/streams-url_post/streams-url_post.ino @@ -7,6 +7,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" AudioInfo info(44100, 2, 16); SineWaveGenerator sineWave(32000); diff --git a/examples/examples-communication/http-client/streams-url_raw-i2s/streams-url_raw-i2s.ino b/examples/examples-communication/http-client/streams-url_raw-i2s/streams-url_raw-i2s.ino index b000e4ea17..ed0bfcc1a1 100644 --- a/examples/examples-communication/http-client/streams-url_raw-i2s/streams-url_raw-i2s.ino +++ b/examples/examples-communication/http-client/streams-url_raw-i2s/streams-url_raw-i2s.ino @@ -8,6 +8,7 @@ #include "WiFi.h" #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream music; // Music Stream I2SStream i2s;// I2S as Stream diff --git a/examples/examples-communication/http-client/streams-url_raw-serial/streams-url_raw-serial.ino b/examples/examples-communication/http-client/streams-url_raw-serial/streams-url_raw-serial.ino index bab43889fe..df53e4ddf1 100644 --- a/examples/examples-communication/http-client/streams-url_raw-serial/streams-url_raw-serial.ino +++ b/examples/examples-communication/http-client/streams-url_raw-serial/streams-url_raw-serial.ino @@ -8,6 +8,7 @@ #include "WiFi.h" #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" diff --git a/examples/examples-communication/http-client/streams-url_vorbis_i2s/streams-url_vorbis_i2s.ino b/examples/examples-communication/http-client/streams-url_vorbis_i2s/streams-url_vorbis_i2s.ino index 5e25d3d127..ab835255cb 100644 --- a/examples/examples-communication/http-client/streams-url_vorbis_i2s/streams-url_vorbis_i2s.ino +++ b/examples/examples-communication/http-client/streams-url_vorbis_i2s/streams-url_vorbis_i2s.ino @@ -10,6 +10,7 @@ */ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecVorbis.h" +#include "AudioTools/Communication/AudioHttp.h" const char* ssid = "ssid"; const char* pwd = "password"; diff --git a/examples/examples-communication/http-server/player-sd-webserverex_mp3/player-sd-webserverex_mp3.ino b/examples/examples-communication/http-server/player-sd-webserverex_mp3/player-sd-webserverex_mp3.ino index eebf181aca..662397f256 100644 --- a/examples/examples-communication/http-server/player-sd-webserverex_mp3/player-sd-webserverex_mp3.ino +++ b/examples/examples-communication/http-server/player-sd-webserverex_mp3/player-sd-webserverex_mp3.ino @@ -9,7 +9,7 @@ #include "AudioTools.h" #include "AudioTools/Disk/AudioSourceSD.h" -#include "AudioTools/AudioLibs/AudioServerEx.h" +#include "AudioTools/Communication/AudioServerEx.h" #include "AudioTools/AudioCodecs/CodecCopy.h" #define PIN_AUDIO_KIT_SD_CARD_CS 13 diff --git a/examples/examples-communication/http-server/streams-audiokit-webserver_aac/streams-audiokit-webserver_aac.ino b/examples/examples-communication/http-server/streams-audiokit-webserver_aac/streams-audiokit-webserver_aac.ino index f5cf208039..2163e3eb4a 100644 --- a/examples/examples-communication/http-server/streams-audiokit-webserver_aac/streams-audiokit-webserver_aac.ino +++ b/examples/examples-communication/http-server/streams-audiokit-webserver_aac/streams-audiokit-webserver_aac.ino @@ -10,6 +10,7 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" #include "AudioTools/AudioCodecs/CodecAACFDK.h" +#include "AudioTools/Communication/AudioHttp.h" // WIFI const char *ssid = "ssid"; diff --git a/examples/examples-communication/http-server/streams-audiokit-webserver_mp3/streams-audiokit-webserver_mp3.ino b/examples/examples-communication/http-server/streams-audiokit-webserver_mp3/streams-audiokit-webserver_mp3.ino index 24025fb94a..a14092f9cd 100644 --- a/examples/examples-communication/http-server/streams-audiokit-webserver_mp3/streams-audiokit-webserver_mp3.ino +++ b/examples/examples-communication/http-server/streams-audiokit-webserver_mp3/streams-audiokit-webserver_mp3.ino @@ -10,6 +10,7 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" #include "AudioTools/AudioCodecs/CodecMP3LAME.h" +#include "AudioTools/Communication/AudioHttp.h" // Set static IP address and stuff (optional) IPAddress IPA_address(192, 168, 0, 222); diff --git a/examples/examples-communication/http-server/streams-audiokit-webserver_wav/streams-audiokit-webserver_wav.ino b/examples/examples-communication/http-server/streams-audiokit-webserver_wav/streams-audiokit-webserver_wav.ino index 0bc0a7a35d..7ab77b37bc 100644 --- a/examples/examples-communication/http-server/streams-audiokit-webserver_wav/streams-audiokit-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-audiokit-webserver_wav/streams-audiokit-webserver_wav.ino @@ -9,6 +9,7 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" AudioEncoderServer server(new WAVEncoder(),"ssid","password"); AudioBoardStream kit(AudioKitEs8388V1); diff --git a/examples/examples-communication/http-server/streams-effect-webserver_wav/streams-effect-webserver_wav.ino b/examples/examples-communication/http-server/streams-effect-webserver_wav/streams-effect-webserver_wav.ino index 98d0863888..567c1d9cd4 100644 --- a/examples/examples-communication/http-server/streams-effect-webserver_wav/streams-effect-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-effect-webserver_wav/streams-effect-webserver_wav.ino @@ -9,6 +9,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" // WIFI const char *ssid = "ssid"; diff --git a/examples/examples-communication/http-server/streams-flite-webserver_wav/streams-flite-webserver_wav.ino b/examples/examples-communication/http-server/streams-flite-webserver_wav/streams-flite-webserver_wav.ino index f4f03cd596..d589e760b8 100644 --- a/examples/examples-communication/http-server/streams-flite-webserver_wav/streams-flite-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-flite-webserver_wav/streams-flite-webserver_wav.ino @@ -8,6 +8,7 @@ #include "flite_arduino.h" #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" AudioWAVServer server("ssid","password"); diff --git a/examples/examples-communication/http-server/streams-generator-webserver_aac/streams-generator-webserver_aac.ino b/examples/examples-communication/http-server/streams-generator-webserver_aac/streams-generator-webserver_aac.ino index 730acf2324..adcb117fc7 100644 --- a/examples/examples-communication/http-server/streams-generator-webserver_aac/streams-generator-webserver_aac.ino +++ b/examples/examples-communication/http-server/streams-generator-webserver_aac/streams-generator-webserver_aac.ino @@ -9,6 +9,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecAACFDK.h" +#include "AudioTools/Communication/AudioHttp.h" // WIFI diff --git a/examples/examples-communication/http-server/streams-generator-webserver_mp3/streams-generator-webserver_mp3.ino b/examples/examples-communication/http-server/streams-generator-webserver_mp3/streams-generator-webserver_mp3.ino index 6ec100a8d6..03620425a2 100644 --- a/examples/examples-communication/http-server/streams-generator-webserver_mp3/streams-generator-webserver_mp3.ino +++ b/examples/examples-communication/http-server/streams-generator-webserver_mp3/streams-generator-webserver_mp3.ino @@ -11,6 +11,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3LAME.h" +#include "AudioTools/Communication/AudioHttp.h" // WIFI const char *ssid = "ssid"; diff --git a/examples/examples-communication/http-server/streams-generator-webserver_ogg/streams-generator-webserver_ogg.ino b/examples/examples-communication/http-server/streams-generator-webserver_ogg/streams-generator-webserver_ogg.ino index 1ef880b9bf..c7e1adcc66 100644 --- a/examples/examples-communication/http-server/streams-generator-webserver_ogg/streams-generator-webserver_ogg.ino +++ b/examples/examples-communication/http-server/streams-generator-webserver_ogg/streams-generator-webserver_ogg.ino @@ -16,6 +16,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecOpusOgg.h" +#include "AudioTools/Communication/AudioHttp.h" // WIFI const char *ssid = "ssid"; diff --git a/examples/examples-communication/http-server/streams-generator-webserver_wav/streams-generator-webserver_wav.ino b/examples/examples-communication/http-server/streams-generator-webserver_wav/streams-generator-webserver_wav.ino index 8a921dd8e8..fc5a24679d 100644 --- a/examples/examples-communication/http-server/streams-generator-webserver_wav/streams-generator-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-generator-webserver_wav/streams-generator-webserver_wav.ino @@ -9,6 +9,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" // WIFI const char *ssid = "ssid"; diff --git a/examples/examples-communication/http-server/streams-generator-webserverex_wav/streams-generator-webserverex_wav.ino b/examples/examples-communication/http-server/streams-generator-webserverex_wav/streams-generator-webserverex_wav.ino index a2dafe1c4f..dc13cc57d8 100644 --- a/examples/examples-communication/http-server/streams-generator-webserverex_wav/streams-generator-webserverex_wav.ino +++ b/examples/examples-communication/http-server/streams-generator-webserverex_wav/streams-generator-webserverex_wav.ino @@ -9,7 +9,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/AudioServerEx.h" +#include "AudioTools/Communication/AudioServerEx.h" // WIFI const char *ssid = "SSID"; diff --git a/examples/examples-communication/http-server/streams-generator-webserverex_wav1/streams-generator-webserverex_wav1.ino b/examples/examples-communication/http-server/streams-generator-webserverex_wav1/streams-generator-webserverex_wav1.ino index a9c374db5c..3a0b289492 100644 --- a/examples/examples-communication/http-server/streams-generator-webserverex_wav1/streams-generator-webserverex_wav1.ino +++ b/examples/examples-communication/http-server/streams-generator-webserverex_wav1/streams-generator-webserverex_wav1.ino @@ -9,7 +9,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/AudioServerEx.h" +#include "AudioTools/Communication/AudioServerEx.h" // WIFI const char *ssid = "SSID"; diff --git a/examples/examples-communication/http-server/streams-i2s-webserver_wav/streams-i2s-webserver_wav.ino b/examples/examples-communication/http-server/streams-i2s-webserver_wav/streams-i2s-webserver_wav.ino index 7e96b79117..b82b938c1b 100644 --- a/examples/examples-communication/http-server/streams-i2s-webserver_wav/streams-i2s-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-i2s-webserver_wav/streams-i2s-webserver_wav.ino @@ -8,6 +8,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" //AudioEncodedServer server(new WAVEncoder(),"ssid","password"); AudioWAVServer server("ssid","password"); // the same a above diff --git a/examples/examples-communication/http-server/streams-sam-webserver_wav/streams-sam-webserver_wav.ino b/examples/examples-communication/http-server/streams-sam-webserver_wav/streams-sam-webserver_wav.ino index 325f55e4b0..12413b90cd 100644 --- a/examples/examples-communication/http-server/streams-sam-webserver_wav/streams-sam-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-sam-webserver_wav/streams-sam-webserver_wav.ino @@ -6,6 +6,7 @@ * */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" #include "sam_arduino.h" AudioWAVServer server("ssid","password"); diff --git a/examples/examples-communication/http-server/streams-tts-webserver_wav/streams-tts-webserver_wav.ino b/examples/examples-communication/http-server/streams-tts-webserver_wav/streams-tts-webserver_wav.ino index 24ce8af2d0..b852055eaa 100644 --- a/examples/examples-communication/http-server/streams-tts-webserver_wav/streams-tts-webserver_wav.ino +++ b/examples/examples-communication/http-server/streams-tts-webserver_wav/streams-tts-webserver_wav.ino @@ -7,6 +7,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" #include "TTS.h" AudioWAVServer server("ssid","password"); diff --git a/examples/examples-communication/ip/communication-ip-send/communication-ip-send.ino b/examples/examples-communication/ip/communication-ip-send/communication-ip-send.ino index 43d1c4c28e..1a3b91ae59 100644 --- a/examples/examples-communication/ip/communication-ip-send/communication-ip-send.ino +++ b/examples/examples-communication/ip/communication-ip-send/communication-ip-send.ino @@ -33,8 +33,9 @@ void connectWifi() { Serial.println(WiFi. localIP()); // Performance Hack + //esp_wifi_set_ps(WIFI_PS_NONE); + WiFi.setSleep(WIFI_PS_NONE); client.setNoDelay(true); - esp_wifi_set_ps(WIFI_PS_NONE); } void connectIP() { diff --git a/examples/examples-communication/serial/mp3-custom/send-mp3/send-mp3.ino b/examples/examples-communication/serial/mp3-custom/send-mp3/send-mp3.ino index 66a096ce00..da0d8a2c5f 100644 --- a/examples/examples-communication/serial/mp3-custom/send-mp3/send-mp3.ino +++ b/examples/examples-communication/serial/mp3-custom/send-mp3/send-mp3.ino @@ -8,6 +8,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid", "password"); // or replace with ICYStream to get metadata StreamCopy copier(Serial1, url); // copy url to decoder diff --git a/examples/examples-communication/serial/mp3-xon-xoff/send-mp3/send-mp3.ino b/examples/examples-communication/serial/mp3-xon-xoff/send-mp3/send-mp3.ino index 709249ae1c..a76f572cb3 100644 --- a/examples/examples-communication/serial/mp3-xon-xoff/send-mp3/send-mp3.ino +++ b/examples/examples-communication/serial/mp3-xon-xoff/send-mp3/send-mp3.ino @@ -7,6 +7,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid", "password"); // or replace with ICYStream to get metadata StreamCopy copier(Serial1, url); // copy url to decoder diff --git a/examples/examples-communication/serial/mp3/send-mp3/send-mp3.ino b/examples/examples-communication/serial/mp3/send-mp3/send-mp3.ino index ae0223f8fd..e2fc39e0b2 100644 --- a/examples/examples-communication/serial/mp3/send-mp3/send-mp3.ino +++ b/examples/examples-communication/serial/mp3/send-mp3/send-mp3.ino @@ -10,6 +10,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid", "password"); // or replace with ICYStream to get metadata HardwareSerial MP3Serial(1); // define a Serial for UART1 diff --git a/examples/examples-stream/streams-sdfat_mp3-metadata/streams-sdfat_mp3-metadata.ino b/examples/examples-stream/streams-sdfat_mp3-metadata/streams-sdfat_mp3-metadata.ino index 953146039b..549bc3917b 100644 --- a/examples/examples-stream/streams-sdfat_mp3-metadata/streams-sdfat_mp3-metadata.ino +++ b/examples/examples-stream/streams-sdfat_mp3-metadata/streams-sdfat_mp3-metadata.ino @@ -15,6 +15,7 @@ #include #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" // -> EncodedAudioStream -> I2SStream diff --git a/examples/examples-tts/streams-azure_tts-i2s/streams-azure_tts-i2s.ino b/examples/examples-tts/streams-azure_tts-i2s/streams-azure_tts-i2s.ino index e265410f69..ebc0cc3bc1 100644 --- a/examples/examples-tts/streams-azure_tts-i2s/streams-azure_tts-i2s.ino +++ b/examples/examples-tts/streams-azure_tts-i2s/streams-azure_tts-i2s.ino @@ -6,6 +6,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" String speechKey = "...."; // deploy a Speech Service in Azure and get both the key and the region. info here: https://azure.microsoft.com/en-us/products/cognitive-services/text-to-speech/ String spechregion = "...."; diff --git a/examples/examples-tts/streams-google-audiokit/streams-google-audiokit.ino b/examples/examples-tts/streams-google-audiokit/streams-google-audiokit.ino index 591c3003a7..c70c1b08b4 100644 --- a/examples/examples-tts/streams-google-audiokit/streams-google-audiokit.ino +++ b/examples/examples-tts/streams-google-audiokit/streams-google-audiokit.ino @@ -14,6 +14,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); diff --git a/examples/examples-tts/streams-url_wav-i2s/streams-url_wav-i2s.ino b/examples/examples-tts/streams-url_wav-i2s/streams-url_wav-i2s.ino index 62e439e0ec..f28ae528a3 100644 --- a/examples/examples-tts/streams-url_wav-i2s/streams-url_wav-i2s.ino +++ b/examples/examples-tts/streams-url_wav-i2s/streams-url_wav-i2s.ino @@ -9,6 +9,7 @@ */ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" // UrlStream -copy-> EncodedAudioStream -> I2S diff --git a/examples/examples-vs1053/streams-url_mp3-vs1053/streams-url_mp3-vs1053.ino b/examples/examples-vs1053/streams-url_mp3-vs1053/streams-url_mp3-vs1053.ino index 6a113c9da6..2e61e38ce4 100644 --- a/examples/examples-vs1053/streams-url_mp3-vs1053/streams-url_mp3-vs1053.ino +++ b/examples/examples-vs1053/streams-url_mp3-vs1053/streams-url_mp3-vs1053.ino @@ -12,6 +12,7 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/VS1053Stream.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); // or replace with ICYStream to get metadata VS1053Stream vs1053; // final output diff --git a/examples/sandbox/test-container-avi/test-container-avi.ino b/examples/sandbox/test-container-avi/test-container-avi.ino index 76d37ac0a1..9c29ac16de 100644 --- a/examples/sandbox/test-container-avi/test-container-avi.ino +++ b/examples/sandbox/test-container-avi/test-container-avi.ino @@ -11,6 +11,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/ContainerAVI.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); // input AudioBoardStream out(AudioKitEs8388V1); diff --git a/examples/tests/codecs/test-codec-aac-fdk-dec/test-codec-aac-fdk-dec.ino b/examples/tests/codecs/test-codec-aac-fdk-dec/test-codec-aac-fdk-dec.ino index 6f509af461..c96330c26b 100644 --- a/examples/tests/codecs/test-codec-aac-fdk-dec/test-codec-aac-fdk-dec.ino +++ b/examples/tests/codecs/test-codec-aac-fdk-dec/test-codec-aac-fdk-dec.ino @@ -3,6 +3,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecAACFDK.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" SET_LOOP_TASK_STACK_SIZE(50 * 1024); diff --git a/examples/tests/codecs/test-memory-helix/test-memory-helix.ino b/examples/tests/codecs/test-memory-helix/test-memory-helix.ino index c30a6ae3d9..09d493c821 100644 --- a/examples/tests/codecs/test-memory-helix/test-memory-helix.ino +++ b/examples/tests/codecs/test-memory-helix/test-memory-helix.ino @@ -1,6 +1,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" +#include "AudioTools/Communication/AudioHttp.h" /** * @brief Sketch to test the memory usage with libhelix with an ESP32 diff --git a/examples/tests/codecs/test-streaming-adapter/test-streaming-adapter.ino b/examples/tests/codecs/test-streaming-adapter/test-streaming-adapter.ino index 3e3136f376..52c5cbffdd 100644 --- a/examples/tests/codecs/test-streaming-adapter/test-streaming-adapter.ino +++ b/examples/tests/codecs/test-streaming-adapter/test-streaming-adapter.ino @@ -1,6 +1,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); // or replace with ICYStream to get metadata AudioBoardStream i2s(AudioKitEs8388V1); // final output of decoded stream diff --git a/examples/tests/etc/test-ads1015/test-ads1015.ino b/examples/tests/etc/test-ads1015/test-ads1015.ino deleted file mode 100644 index 0b877bbc46..0000000000 --- a/examples/tests/etc/test-ads1015/test-ads1015.ino +++ /dev/null @@ -1,39 +0,0 @@ - -#include -#include "ADS1X15.h" // https://github.com/pschatzmann/ADS1X15.git - -ADS1115 ads1015(0x48); // ads1015 device - - -void list(bool print){ - int count; - unsigned long end = millis()+1000*10; - while(end>millis()) { - int16_t value = ads1015.getValue();; - if (print) Serial.println(value); - count++; - } - Serial.print("Samples per second: "); - Serial.println(count/10); -} - -void setup(){ - Serial.begin(119200); - - // setup gain for ads1015 - Wire.setClock(400000); - ads1015.begin(); - if(!ads1015.isConnected()) Serial.println("ads1015 NOT CONNECTED!"); - ads1015.setGain(4); // 6.144 volt - ads1015.setDataRate(4); // 0 = slow 4 = medium 7 = fast (7 = fails ) - ads1015.setMode(0); - ads1015.requestADC_Differential_0_1(); - - list(false); - //list(true); -} - - -void loop(){ - -} \ No newline at end of file diff --git a/examples/tests/performance/wifi/wifi.ino b/examples/tests/performance/wifi/wifi.ino index 3e1d207761..50315e4a66 100644 --- a/examples/tests/performance/wifi/wifi.ino +++ b/examples/tests/performance/wifi/wifi.ino @@ -1,4 +1,5 @@ #include "AudioTools.h" +#include "AudioTools/Communication/AudioHttp.h" URLStream url("SSID","PASSWORD"); // or replace with ICYStream to get metadata MeasuringStream out(50, &Serial); // final output of decoded stream diff --git a/examples/tests/player/test-player/test-player.ino b/examples/tests/player/test-player/test-player.ino index 68f1c19422..8450bfad62 100644 --- a/examples/tests/player/test-player/test-player.ino +++ b/examples/tests/player/test-player/test-player.ino @@ -14,6 +14,7 @@ #include "AudioTools.h" #include "AudioTools/Disk/AudioSourceSDFAT.h" #include "AudioTools/Disk/AudioSourceURL.h" +#include "AudioTools/Communication/AudioHttp.h" const char *urls[] = { diff --git a/src/AudioTools/AudioCodecs/AudioEncoded.h b/src/AudioTools/AudioCodecs/AudioEncoded.h index 025ea2d819..f951085e62 100644 --- a/src/AudioTools/AudioCodecs/AudioEncoded.h +++ b/src/AudioTools/AudioCodecs/AudioEncoded.h @@ -257,7 +257,8 @@ class EncodedAudioOutput : public ModifyingOutput { int frame_size = DEFAULT_BUFFER_SIZE; }; -// legacy name +/// @brief Legacy alias for EncodedAudioOutput +/// @ingroup codecs using EncodedAudioPrint = EncodedAudioOutput; /** diff --git a/src/AudioTools/AudioCodecs/CodecCopy.h b/src/AudioTools/AudioCodecs/CodecCopy.h index 4c7b89ab96..55ab76d27b 100644 --- a/src/AudioTools/AudioCodecs/CodecCopy.h +++ b/src/AudioTools/AudioCodecs/CodecCopy.h @@ -101,7 +101,12 @@ class CopyEncoder : public AudioEncoder { const char *mime_type = "audio/pcm"; }; +/// @brief Alias for CopyEncoder to handle PCM audio encoding (no actual encoding) +/// @ingroup codecs using PCMEncoder = CopyEncoder; + +/// @brief Alias for CopyDecoder to handle PCM audio decoding (no actual decoding) +/// @ingroup codecs using PCMDecoder = CopyDecoder; } // namespace audio_tools diff --git a/src/AudioTools/AudioCodecs/CodecMTS.h b/src/AudioTools/AudioCodecs/CodecMTS.h index 8b6fd3f417..1add2564f1 100644 --- a/src/AudioTools/AudioCodecs/CodecMTS.h +++ b/src/AudioTools/AudioCodecs/CodecMTS.h @@ -485,6 +485,8 @@ class MTSDecoder : public AudioDecoder { } }; +/// @brief Legacy alias for MPEG Transport Stream decoder +/// @ingroup codecs using MPEG_TSDecoder = MTSDecoder; } // namespace audio_tools diff --git a/src/AudioTools/AudioCodecs/ContainerAVI.h b/src/AudioTools/AudioCodecs/ContainerAVI.h index c7f443cffa..e9d84a1b9b 100644 --- a/src/AudioTools/AudioCodecs/ContainerAVI.h +++ b/src/AudioTools/AudioCodecs/ContainerAVI.h @@ -58,6 +58,8 @@ class ParseBuffer { size_t available_byte_count = 0; }; +/// @brief Four-character code identifier for AVI format +/// @ingroup codecs using FOURCC = char[4]; struct AVIMainHeader { diff --git a/src/AudioTools/AudioCodecs/HeaderParserMP3.h b/src/AudioTools/AudioCodecs/HeaderParserMP3.h index 292c7a8b0b..4d209db680 100644 --- a/src/AudioTools/AudioCodecs/HeaderParserMP3.h +++ b/src/AudioTools/AudioCodecs/HeaderParserMP3.h @@ -219,7 +219,7 @@ class HeaderParserMP3 { return false; } - memset(&header, 0, sizeof(header)); + header = FrameHeader{}; int valid_frames_found = 0; int consecutive_frames = 0; const int MIN_FRAMES_TO_VALIDATE = 3; // Require at least 3 consecutive valid frames @@ -484,7 +484,7 @@ class HeaderParserMP3 { void reset() { buffer.reset(); frame_header_valid = false; - memset(&header, 0, sizeof(header)); + header = FrameHeader{}; } /// Finds the mp3/aac sync word diff --git a/src/AudioTools/AudioCodecs/M4ACommonDemuxer.h b/src/AudioTools/AudioCodecs/M4ACommonDemuxer.h index 840a9989a1..857b4cdef2 100644 --- a/src/AudioTools/AudioCodecs/M4ACommonDemuxer.h +++ b/src/AudioTools/AudioCodecs/M4ACommonDemuxer.h @@ -14,6 +14,8 @@ namespace audio_tools { /// we expect that the sample size is usually aound 1 - 2k, so uint16_t /// should be more then sufficient! Microcontolles only have a limited /// amount of RAM, so this makes a big difference! +/// @brief Sample size type optimized for microcontrollers +/// @ingroup codecs using stsz_sample_size_t = uint16_t; /** diff --git a/src/AudioTools/AudioCodecs/MultiDecoder.h b/src/AudioTools/AudioCodecs/MultiDecoder.h index 70dee46538..7f6f731f91 100644 --- a/src/AudioTools/AudioCodecs/MultiDecoder.h +++ b/src/AudioTools/AudioCodecs/MultiDecoder.h @@ -3,7 +3,7 @@ #include "AudioTools/AudioCodecs/AudioCodecsBase.h" #include "AudioTools/CoreAudio/AudioBasic/StrView.h" -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" #include "AudioTools/CoreAudio/AudioMetaData/MimeDetector.h" #include "AudioTools/AudioCodecs/StreamingDecoder.h" diff --git a/src/AudioTools/AudioLibs/AudioServerEx.h b/src/AudioTools/AudioLibs/AudioServerEx.h index a5f3fa5a56..36019b52b9 100644 --- a/src/AudioTools/AudioLibs/AudioServerEx.h +++ b/src/AudioTools/AudioLibs/AudioServerEx.h @@ -1,183 +1,3 @@ #pragma once - -#include "AudioToolsConfig.h" -#include "AudioTools/CoreAudio/AudioOutput.h" -#include "AudioTools/AudioCodecs/CodecWAV.h" -#include "AudioTools/CoreAudio/AudioBasic/StrView.h" -#include "HttpServer.h" -#include "HttpExtensions.h" - -namespace audio_tools { - -/** - * @brief Config information for AudioServerEx - * @author Phil Schatzmann - * @copyright GPLv3 - */ - -struct AudioServerExConfig : public AudioInfo { - const char* mime = nullptr; - const char* ssid = nullptr; - const char* password = nullptr; - const char* path = "/"; - // optional input; if not used use write methods to push data - Stream *input=nullptr; - int port = 80; -}; - -/** - * @brief A powerfull Web server which is based on - * https://github.com/pschatzmann/TinyHttp. - * It supports multiple concurrent clients. You can e.g. use it to write mp3 data and make - * it available in multiple clients. - * @ingroup http - * @author Phil Schatzmann - * @copyright GPLv3 - */ - -class AudioServerEx : public AudioOutput { - public: - // Default Constructor - AudioServerEx() = default; - - /// To be compatible with legacy API - AudioServerEx(const char *ssid, const char* pwd){ - info.ssid = ssid; - info.password = pwd; - } - - virtual AudioServerExConfig defaultConfig() { - AudioServerExConfig cfg; - return cfg; - } - - virtual bool begin(AudioServerExConfig cfg) { - info = cfg; - return begin(); - } - - virtual bool begin(Stream &in, const char* contentType) { - info.input = ∈ - info.mime = contentType; - return begin(); - } - - virtual bool begin() { - end(); // we (re) start with a clean state - - if (info.input==nullptr){ - p_stream = new ExtensionStream(info.path,tinyhttp::T_GET, info.mime ); - } else { - p_stream = new ExtensionStream(info.path, info.mime, *info.input); - } - p_stream->setReplyHeader(*getReplyHeader()); - p_server = new tinyhttp::HttpServer(wifi); - - // handling of WAV - p_server->addExtension(*p_stream); - return p_server->begin(info.port, info.ssid, info.password); - } - - virtual void end() { - if (p_stream!=nullptr) { - delete p_stream; - p_stream = nullptr; - } - if (p_server!=nullptr) { - delete p_server; - p_server = nullptr; - } - } - - /// Web server supports write so that we can e.g. use is as destination for the audio player. - size_t write(const uint8_t* data, size_t len) override { - if (p_stream==nullptr) return 0; - return p_stream->write((uint8_t*)data, len); - } - - int availableForWrite() override { - if (p_stream==nullptr) return 0; - return p_stream->availableForWrite(); - } - - /// Needs to be called if the data was provided as input Stream in the AudioServerExConfig - virtual void copy() { - if (p_server!=nullptr){ - p_server->copy(); - } - } - - protected: - AudioServerExConfig info; - WiFiServer wifi; - HttpServer *p_server; - ExtensionStream *p_stream=nullptr; - - virtual tinyhttp::StrView* getReplyHeader() { - return nullptr; - } - -}; - -/** - * @brief A powerfull WAV Web server which is based on - * https://github.com/pschatzmann/TinyHttp. - * It supports multiple concurrent clients - * @ingroup http - * @author Phil Schatzmann - * @copyright GPLv3 - * - */ -class AudioWAVServerEx : public AudioServerEx { - public: - // Default Constructor - AudioWAVServerEx() = default; - - /// To be compatible with legacy API - AudioWAVServerEx(const char *ssid, const char* pwd):AudioServerEx(ssid, pwd){} - - AudioServerExConfig defaultConfig() override { - AudioServerExConfig cfg; - cfg.mime = "audio/wav"; - return cfg; - } - - /// Legacy API support - bool begin(Stream &in, int sample_rate, int channels, int bits_per_sample=16) { - info.input = ∈ - info.sample_rate = sample_rate; - info.channels = channels; - info. bits_per_sample = bits_per_sample; - info.mime = "audio/wav"; - return AudioServerEx::begin(); - } - - bool begin(AudioServerExConfig cfg) override{ - return AudioServerEx::begin(cfg); - } - - protected: - // Dynamic memory - tinyhttp::Str header; - - // wav files start with a 44 bytes header - virtual tinyhttp::StrView* getReplyHeader() { - header.allocate(44); - MemoryOutput mp{(uint8_t*)header.c_str(), 44}; - WAVHeader enc; - WAVAudioInfo wi; - wi.format = AudioFormat::PCM; - wi.sample_rate = info.sample_rate; - wi.bits_per_sample = info.bits_per_sample; - wi.channels = info.channels; - enc.setAudioInfo(wi); - // fill header with data - enc.writeHeader(&mp); - // make sure that the length is 44 - assert(header.length() == 44); - - return &header; - } -}; - -} \ No newline at end of file +#warning("obsolete: use AudioTools/Communication/AudioServerEx.h") +#include "AudioTools/Communication/AudioServerEx.h" \ No newline at end of file diff --git a/src/AudioTools/AudioLibs/Desktop/File.h b/src/AudioTools/AudioLibs/Desktop/File.h index f1fc1f468f..800ec28469 100644 --- a/src/AudioTools/AudioLibs/Desktop/File.h +++ b/src/AudioTools/AudioLibs/Desktop/File.h @@ -7,7 +7,12 @@ namespace audio_tools { +/// @brief Desktop file system compatibility alias +/// @ingroup io using File = VFSFile; + +/// @brief Desktop file system compatibility alias +/// @ingroup io using FS = VFS; static FS SD; // global object for compatibility with Arduino code diff --git a/src/AudioTools/AudioLibs/Desktop/JupyterAudio.h b/src/AudioTools/AudioLibs/Desktop/JupyterAudio.h index 7d5810c285..0abc953aea 100644 --- a/src/AudioTools/AudioLibs/Desktop/JupyterAudio.h +++ b/src/AudioTools/AudioLibs/Desktop/JupyterAudio.h @@ -101,6 +101,8 @@ class ChartT { } }; +/// @brief Default chart type for Jupyter integration +/// @ingroup io using Chart = ChartT; /** @@ -193,6 +195,8 @@ class JupyterAudioT : public AudioStream { size_t buffer_count=0; }; +/// @brief Default Jupyter audio output with 16-bit samples +/// @ingroup io using JupyterAudio = JupyterAudioT; } // namespace audio_tools diff --git a/src/AudioTools/AudioLibs/SPDIFOutput.h b/src/AudioTools/AudioLibs/SPDIFOutput.h index b0cd488bb1..b83adf4a96 100644 --- a/src/AudioTools/AudioLibs/SPDIFOutput.h +++ b/src/AudioTools/AudioLibs/SPDIFOutput.h @@ -282,6 +282,8 @@ class SPDIFOutput : public AudioStream { } }; +/// @brief Alias for SPDIFOutput for backward compatibility +/// @ingroup io using SPDIFStream = SPDIFOutput; } // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/Communication/AudioHttp.h b/src/AudioTools/Communication/AudioHttp.h new file mode 100644 index 0000000000..7a817763da --- /dev/null +++ b/src/AudioTools/Communication/AudioHttp.h @@ -0,0 +1,15 @@ +#pragma once + +/** + * @file AudioNetworking.h + * @brief Convenience header to include all networking functionality for AudioTools + * + * This header includes WiFi-based URLStream and AudioServer implementations. + * Include this file when you want to use networking features with the AudioTools library. + * + * @author Phil Schatzmann + * @copyright GPLv3 + */ + +#include "AudioTools/Communication/HTTP/AudioHttp.h" + diff --git a/src/AudioTools/Communication/AudioServerEx.h b/src/AudioTools/Communication/AudioServerEx.h new file mode 100644 index 0000000000..a5f3fa5a56 --- /dev/null +++ b/src/AudioTools/Communication/AudioServerEx.h @@ -0,0 +1,183 @@ +#pragma once + +#include "AudioToolsConfig.h" +#include "AudioTools/CoreAudio/AudioOutput.h" +#include "AudioTools/AudioCodecs/CodecWAV.h" +#include "AudioTools/CoreAudio/AudioBasic/StrView.h" +#include "HttpServer.h" +#include "HttpExtensions.h" + +namespace audio_tools { + +/** + * @brief Config information for AudioServerEx + * @author Phil Schatzmann + * @copyright GPLv3 + */ + +struct AudioServerExConfig : public AudioInfo { + const char* mime = nullptr; + const char* ssid = nullptr; + const char* password = nullptr; + const char* path = "/"; + // optional input; if not used use write methods to push data + Stream *input=nullptr; + int port = 80; +}; + +/** + * @brief A powerfull Web server which is based on + * https://github.com/pschatzmann/TinyHttp. + * It supports multiple concurrent clients. You can e.g. use it to write mp3 data and make + * it available in multiple clients. + * @ingroup http + * @author Phil Schatzmann + * @copyright GPLv3 + */ + +class AudioServerEx : public AudioOutput { + public: + // Default Constructor + AudioServerEx() = default; + + /// To be compatible with legacy API + AudioServerEx(const char *ssid, const char* pwd){ + info.ssid = ssid; + info.password = pwd; + } + + virtual AudioServerExConfig defaultConfig() { + AudioServerExConfig cfg; + return cfg; + } + + virtual bool begin(AudioServerExConfig cfg) { + info = cfg; + return begin(); + } + + virtual bool begin(Stream &in, const char* contentType) { + info.input = ∈ + info.mime = contentType; + return begin(); + } + + virtual bool begin() { + end(); // we (re) start with a clean state + + if (info.input==nullptr){ + p_stream = new ExtensionStream(info.path,tinyhttp::T_GET, info.mime ); + } else { + p_stream = new ExtensionStream(info.path, info.mime, *info.input); + } + p_stream->setReplyHeader(*getReplyHeader()); + p_server = new tinyhttp::HttpServer(wifi); + + // handling of WAV + p_server->addExtension(*p_stream); + return p_server->begin(info.port, info.ssid, info.password); + } + + virtual void end() { + if (p_stream!=nullptr) { + delete p_stream; + p_stream = nullptr; + } + if (p_server!=nullptr) { + delete p_server; + p_server = nullptr; + } + } + + /// Web server supports write so that we can e.g. use is as destination for the audio player. + size_t write(const uint8_t* data, size_t len) override { + if (p_stream==nullptr) return 0; + return p_stream->write((uint8_t*)data, len); + } + + int availableForWrite() override { + if (p_stream==nullptr) return 0; + return p_stream->availableForWrite(); + } + + /// Needs to be called if the data was provided as input Stream in the AudioServerExConfig + virtual void copy() { + if (p_server!=nullptr){ + p_server->copy(); + } + } + + protected: + AudioServerExConfig info; + WiFiServer wifi; + HttpServer *p_server; + ExtensionStream *p_stream=nullptr; + + virtual tinyhttp::StrView* getReplyHeader() { + return nullptr; + } + +}; + +/** + * @brief A powerfull WAV Web server which is based on + * https://github.com/pschatzmann/TinyHttp. + * It supports multiple concurrent clients + * @ingroup http + * @author Phil Schatzmann + * @copyright GPLv3 + * + */ +class AudioWAVServerEx : public AudioServerEx { + public: + // Default Constructor + AudioWAVServerEx() = default; + + /// To be compatible with legacy API + AudioWAVServerEx(const char *ssid, const char* pwd):AudioServerEx(ssid, pwd){} + + AudioServerExConfig defaultConfig() override { + AudioServerExConfig cfg; + cfg.mime = "audio/wav"; + return cfg; + } + + /// Legacy API support + bool begin(Stream &in, int sample_rate, int channels, int bits_per_sample=16) { + info.input = ∈ + info.sample_rate = sample_rate; + info.channels = channels; + info. bits_per_sample = bits_per_sample; + info.mime = "audio/wav"; + return AudioServerEx::begin(); + } + + bool begin(AudioServerExConfig cfg) override{ + return AudioServerEx::begin(cfg); + } + + protected: + // Dynamic memory + tinyhttp::Str header; + + // wav files start with a 44 bytes header + virtual tinyhttp::StrView* getReplyHeader() { + header.allocate(44); + MemoryOutput mp{(uint8_t*)header.c_str(), 44}; + WAVHeader enc; + WAVAudioInfo wi; + wi.format = AudioFormat::PCM; + wi.sample_rate = info.sample_rate; + wi.bits_per_sample = info.bits_per_sample; + wi.channels = info.channels; + enc.setAudioInfo(wi); + // fill header with data + enc.writeHeader(&mp); + // make sure that the length is 44 + assert(header.length() == 44); + + return &header; + } +}; + +} \ No newline at end of file diff --git a/src/AudioTools/Communication/HLSStreamESP32.h b/src/AudioTools/Communication/HLSStreamESP32.h index ea83435e59..348ff16747 100644 --- a/src/AudioTools/Communication/HLSStreamESP32.h +++ b/src/AudioTools/Communication/HLSStreamESP32.h @@ -1,9 +1,12 @@ #pragma once +#include "AudioTools/Communication/HTTP/URLStreamESP32.h" #include "AudioTools/CoreAudio/AudioHttp/URLStreamESP32.h" #include "HLSStream.h" namespace audio_tools { +/// @brief HLS Stream implementation using URLStreamESP32 for ESP32-specific HTTP requests +/// @ingroup http using HLSStreamESP32 = HLSStreamT; } // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h b/src/AudioTools/Communication/HTTP/AbstractURLStream.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h rename to src/AudioTools/Communication/HTTP/AbstractURLStream.h diff --git a/src/AudioTools/CoreAudio/AudioHttp/AudioClient.h b/src/AudioTools/Communication/HTTP/AudioClient.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/AudioClient.h rename to src/AudioTools/Communication/HTTP/AudioClient.h diff --git a/src/AudioTools/Communication/HTTP/AudioEncodedServerT.h b/src/AudioTools/Communication/HTTP/AudioEncodedServerT.h new file mode 100644 index 0000000000..6961607c0d --- /dev/null +++ b/src/AudioTools/Communication/HTTP/AudioEncodedServerT.h @@ -0,0 +1,222 @@ +#pragma once + +#include "AudioServerT.h" + +namespace audio_tools { + +/** + * @brief A simple Arduino Webserver which streams the audio using the indicated + * encoder.. This class is based on the WiFiServer class. All you need to do is + * to provide the data with a callback method or from a Stream. + * + * @ingroup http + * @author Phil Schatzmann + * @copyright GPLv3 + */ +template +class AudioEncoderServerT : public AudioServerT { + public: + /** + * @brief Construct a new Audio Server object that supports an AudioEncoder + * We assume that the WiFi is already connected + */ + AudioEncoderServerT(AudioEncoder *encoder, int port = 80) : AudioServerT(port) { + this->encoder = encoder; + } + + /** + * @brief Construct a new Audio Server object + * + * @param network + * @param password + */ + AudioEncoderServerT(AudioEncoder *encoder, const char *network, + const char *password, int port = 80) + : AudioServerT(network, password, port) { + this->encoder = encoder; + } + + /** + * @brief Destructor release the memory + **/ + virtual ~AudioEncoderServerT() = default; + + /** + * @brief Start the server. You need to be connected to WiFI before calling + * this method + * + * @param in + * @param sample_rate + * @param channels + */ + bool begin(Stream &in, int sample_rate, int channels, + int bits_per_sample = 16, BaseConverter *converter = nullptr) { + TRACED(); + this->in = ∈ + AudioServerT::setConverter(converter); + audio_info.sample_rate = sample_rate; + audio_info.channels = channels; + audio_info.bits_per_sample = bits_per_sample; + encoder->setAudioInfo(audio_info); + // encoded_stream.begin(&client_obj, encoder); + encoded_stream.setOutput(&this->client_obj); + encoded_stream.setEncoder(encoder); + encoded_stream.begin(audio_info); + return AudioServerT::begin(in, encoder->mime()); + } + + /** + * @brief Start the server. You need to be connected to WiFI before calling + * this method + * + * @param in + * @param info + * @param converter + */ + bool begin(Stream &in, AudioInfo info, BaseConverter *converter = nullptr) { + TRACED(); + this->in = ∈ + this->audio_info = info; + AudioServerT::setConverter(converter); + encoder->setAudioInfo(audio_info); + encoded_stream.setOutput(&this->client_obj); + encoded_stream.setEncoder(encoder); + if (!encoded_stream.begin(audio_info)) { + LOGE("encoder begin failed"); + // stop(); + } + + return AudioServerT::begin(in, encoder->mime()); + } + + /** + * @brief Start the server. You need to be connected to WiFI before calling + * this method + * + * @param in + * @param converter + */ + bool begin(AudioStream &in, BaseConverter *converter = nullptr) { + TRACED(); + this->in = ∈ + this->audio_info = in.audioInfo(); + AudioServerT::setConverter(converter); + encoder->setAudioInfo(audio_info); + encoded_stream.setOutput(&this->client_obj); + encoded_stream.setEncoder(encoder); + encoded_stream.begin(audio_info); + + return AudioServerT::begin(in, encoder->mime()); + } + + /** + * @brief Start the server. The data must be provided by a callback method + * + * @param cb + * @param sample_rate + * @param channels + */ + bool begin(AudioServerDataCallback cb, int sample_rate, int channels, + int bits_per_sample = 16) { + TRACED(); + audio_info.sample_rate = sample_rate; + audio_info.channels = channels; + audio_info.bits_per_sample = bits_per_sample; + encoder->setAudioInfo(audio_info); + + return AudioServerT::begin(cb, encoder->mime()); + } + + // provides a pointer to the encoder + AudioEncoder *audioEncoder() { return encoder; } + + protected: + // Sound Generation - use EncodedAudioOutput with is more efficient then + // EncodedAudioStream + EncodedAudioOutput encoded_stream; + AudioInfo audio_info; + AudioEncoder *encoder = nullptr; + + // moved to be part of reply content to avoid timeout issues in Chrome + void sendReplyHeader() override {} + + void sendReplyContent() override { + TRACED(); + // restart encoder + if (encoder) { + encoder->end(); + encoder->begin(); + } + + if (this->callback != nullptr) { + // encoded_stream.begin(out_ptr(), encoder); + encoded_stream.setOutput(this->out_ptr()); + encoded_stream.setEncoder(encoder); + encoded_stream.begin(); + + // provide data via Callback to encoded_stream + LOGI("sendReply - calling callback"); + // Send delayed header + AudioServerT::sendReplyHeader(); + this->callback(&encoded_stream); + this->client_obj.stop(); + } else if (this->in != nullptr) { + // provide data for stream: in -copy> encoded_stream -> out + LOGI("sendReply - Returning encoded stream..."); + // encoded_stream.begin(out_ptr(), encoder); + encoded_stream.setOutput(this->out_ptr()); + encoded_stream.setEncoder(encoder); + encoded_stream.begin(); + + this->copier.begin(encoded_stream, *this->in); + if (!this->client_obj.connected()) { + LOGE("connection was closed"); + } + // Send delayed header + AudioServerT::sendReplyHeader(); + } + } +}; + +/** + * @brief A simple Arduino Webserver which streams the audio as WAV data. + * This class is based on the AudioEncodedServer class. All you need to do is to + * provide the data with a callback method or from a Stream. + * @ingroup http + * @author Phil Schatzmann + * @copyright GPLv3 + */ +template +class AudioWAVServerT : public AudioEncoderServerT { + public: + /** + * @brief Construct a new Audio WAV Server object + * We assume that the WiFi is already connected + */ + AudioWAVServerT(int port = 80) : AudioEncoderServerT(new WAVEncoder(), port) {} + + /** + * @brief Construct a new Audio WAV Server object + * + * @param network + * @param password + */ + AudioWAVServerT(const char *network, const char *password, int port = 80) + : AudioEncoderServerT(new WAVEncoder(), network, password, port) {} + + /// Destructor: release the allocated encoder + ~AudioWAVServerT() { + AudioEncoder *encoder = AudioEncoderServerT::audioEncoder(); + if (encoder != nullptr) { + delete encoder; + } + } + + // provides a pointer to the encoder + WAVEncoder &wavEncoder() { return *static_cast(AudioEncoderServerT::encoder); } +}; + + + +} // namespace audio_tools + diff --git a/src/AudioTools/Communication/HTTP/AudioHttp.h b/src/AudioTools/Communication/HTTP/AudioHttp.h new file mode 100644 index 0000000000..ea025757f6 --- /dev/null +++ b/src/AudioTools/Communication/HTTP/AudioHttp.h @@ -0,0 +1,25 @@ +#pragma once +/** + * @defgroup http Http + * @ingroup communications + * @brief Http client & server +**/ + +// Include abstract base classes and utilities +#include "AbstractURLStream.h" +#include "HttpRequest.h" +#include "HttpHeader.h" +#include "HttpTypes.h" +#include "ICYStreamT.h" +#include "URLStreamBufferedT.h" +#include "Url.h" + +// For backward compatibility, include stub implementations +#include "URLStream.h" +#include "ICYStream.h" +#include "AudioServer.h" + +#if ((defined(ESP32) && defined(USE_URL_ARDUINO)) || defined(ESP32_CMAKE)) +# include "URLStreamESP32.h" +#endif + diff --git a/src/AudioTools/Communication/HTTP/AudioServer.h b/src/AudioTools/Communication/HTTP/AudioServer.h new file mode 100644 index 0000000000..7fa6fb4fdc --- /dev/null +++ b/src/AudioTools/Communication/HTTP/AudioServer.h @@ -0,0 +1,4 @@ +#pragma once + +#include "AudioServerWiFi.h" +#include "AudioServerEthernet.h" \ No newline at end of file diff --git a/src/AudioTools/Communication/HTTP/AudioServerEthernet.h b/src/AudioTools/Communication/HTTP/AudioServerEthernet.h new file mode 100644 index 0000000000..b6c121b560 --- /dev/null +++ b/src/AudioTools/Communication/HTTP/AudioServerEthernet.h @@ -0,0 +1,45 @@ +#pragma once + +#include "AudioToolsConfig.h" + +#ifdef USE_ETHERNET +# include +#endif + +#include "AudioServerT.h" +#include "AudioEncodedServerT.h" +#include "AudioTools.h" +#include "AudioTools/AudioCodecs/CodecWAV.h" + +namespace audio_tools { + +#ifdef USE_ETHERNET +/// @brief Ethernet audio server for streaming audio content over Ethernet +/// @ingroup http +using AudioServerEthernet = AudioServerT; + +/// @brief Ethernet audio server with encoder support for streaming encoded audio +/// @ingroup http +using AudioEncoderServerEthernet = AudioEncoderServerT; + +/// @brief Ethernet audio server specifically for streaming WAV audio +/// @ingroup http +using AudioWAVServerEthernet = AudioWAVServerT; + +#ifndef USE_WIFI +/// @brief Basic audio server (defaults to Ethernet when USE_WIFI is not defined) +/// @ingroup http +using AudioServer = AudioServerT; + +/// @brief Basic audio server with encoder support (defaults to Ethernet when USE_WIFI is not defined) +/// @ingroup http +using AudioEncoderServer = AudioEncoderServerEthernet; + +/// @brief Basic WAV audio server (defaults to Ethernet when USE_WIFI is not defined) +/// @ingroup http +using AudioWAVServer = AudioWAVServerEthernet; +#endif +#endif + + +} // namespace audio_tools diff --git a/src/AudioTools/CoreAudio/AudioHttp/AudioServer.h b/src/AudioTools/Communication/HTTP/AudioServerT.h similarity index 55% rename from src/AudioTools/CoreAudio/AudioHttp/AudioServer.h rename to src/AudioTools/Communication/HTTP/AudioServerT.h index 6fda68a5f1..8834df6da8 100644 --- a/src/AudioTools/CoreAudio/AudioHttp/AudioServer.h +++ b/src/AudioTools/Communication/HTTP/AudioServerT.h @@ -1,24 +1,25 @@ #pragma once +#include "AudioTools/AudioCodecs/CodecWAV.h" +#include "AudioTools/CoreAudio/AudioStreams.h" +#include "AudioTools/CoreAudio/StreamCopy.h" #include "AudioToolsConfig.h" -#if defined(USE_AUDIO_SERVER) && (defined(USE_ETHERNET) || defined(USE_WIFI)) -#ifdef USE_WIFI -#ifdef ESP8266 -#include -#else -#include -#endif -#endif - -#ifdef USE_ETHERNET -#include -#endif +namespace audio_tools { -#include "AudioTools.h" -#include "AudioTools/AudioCodecs/CodecWAV.h" +/// Calback which writes the sound data to the stream +typedef void (*AudioServerDataCallback)(Print *out); -namespace audio_tools { +/** + * @brief A simple Arduino Webserver template which streams the result + * This template class can work with different Client and Server types. + * All you need to do is to provide the data with a callback method or + * from an Arduino Stream: in -copy> client + * + * @ingroup http + * @author Phil Schatzmann + * @copyright GPLv3 + */ /// Calback which writes the sound data to the stream typedef void (*AudioServerDataCallback)(Print *out); @@ -302,226 +303,4 @@ class AudioServerT { } }; -#ifdef USE_WIFI -using AudioServer = AudioServerT; -using AudioServerWiFi = AudioServerT; -#endif - -#ifdef USE_ETHERNET -using AudioServer = AudioServerT; -using AudioServerEthernet = AudioServerT; -#endif - -/** - * @brief A simple Arduino Webserver which streams the audio using the indicated - * encoder.. This class is based on the WiFiServer class. All you need to do is - * to provide the data with a callback method or from a Stream. - * - * @ingroup http - * @author Phil Schatzmann - * @copyright GPLv3 - */ -class AudioEncoderServer : public AudioServer { - public: - /** - * @brief Construct a new Audio Server object that supports an AudioEncoder - * We assume that the WiFi is already connected - */ - AudioEncoderServer(AudioEncoder *encoder, int port = 80) : AudioServer(port) { - this->encoder = encoder; - } - - /** - * @brief Construct a new Audio Server object - * - * @param network - * @param password - */ - AudioEncoderServer(AudioEncoder *encoder, const char *network, - const char *password, int port = 80) - : AudioServer(network, password, port) { - this->encoder = encoder; - } - - /** - * @brief Destructor release the memory - **/ - ~AudioEncoderServer() {} - - /** - * @brief Start the server. You need to be connected to WiFI before calling - * this method - * - * @param in - * @param sample_rate - * @param channels - */ - bool begin(Stream &in, int sample_rate, int channels, - int bits_per_sample = 16, BaseConverter *converter = nullptr) { - TRACED(); - this->in = ∈ - setConverter(converter); - audio_info.sample_rate = sample_rate; - audio_info.channels = channels; - audio_info.bits_per_sample = bits_per_sample; - encoder->setAudioInfo(audio_info); - // encoded_stream.begin(&client_obj, encoder); - encoded_stream.setOutput(&client_obj); - encoded_stream.setEncoder(encoder); - encoded_stream.begin(audio_info); - return AudioServer::begin(in, encoder->mime()); - } - - /** - * @brief Start the server. You need to be connected to WiFI before calling - * this method - * - * @param in - * @param info - * @param converter - */ - bool begin(Stream &in, AudioInfo info, BaseConverter *converter = nullptr) { - TRACED(); - this->in = ∈ - this->audio_info = info; - setConverter(converter); - encoder->setAudioInfo(audio_info); - encoded_stream.setOutput(&client_obj); - encoded_stream.setEncoder(encoder); - if (!encoded_stream.begin(audio_info)) { - LOGE("encoder begin failed"); - stop(); - } - - return AudioServer::begin(in, encoder->mime()); - } - - /** - * @brief Start the server. You need to be connected to WiFI before calling - * this method - * - * @param in - * @param converter - */ - bool begin(AudioStream &in, BaseConverter *converter = nullptr) { - TRACED(); - this->in = ∈ - this->audio_info = in.audioInfo(); - setConverter(converter); - encoder->setAudioInfo(audio_info); - encoded_stream.setOutput(&client_obj); - encoded_stream.setEncoder(encoder); - encoded_stream.begin(audio_info); - - return AudioServer::begin(in, encoder->mime()); - } - - /** - * @brief Start the server. The data must be provided by a callback method - * - * @param cb - * @param sample_rate - * @param channels - */ - bool begin(AudioServerDataCallback cb, int sample_rate, int channels, - int bits_per_sample = 16) { - TRACED(); - audio_info.sample_rate = sample_rate; - audio_info.channels = channels; - audio_info.bits_per_sample = bits_per_sample; - encoder->setAudioInfo(audio_info); - - return AudioServer::begin(cb, encoder->mime()); - } - - // provides a pointer to the encoder - AudioEncoder *audioEncoder() { return encoder; } - - protected: - // Sound Generation - use EncodedAudioOutput with is more efficient then - // EncodedAudioStream - EncodedAudioOutput encoded_stream; - AudioInfo audio_info; - AudioEncoder *encoder = nullptr; - - // moved to be part of reply content to avoid timeout issues in Chrome - void sendReplyHeader() override {} - - void sendReplyContent() override { - TRACED(); - // restart encoder - if (encoder) { - encoder->end(); - encoder->begin(); - } - - if (callback != nullptr) { - // encoded_stream.begin(out_ptr(), encoder); - encoded_stream.setOutput(out_ptr()); - encoded_stream.setEncoder(encoder); - encoded_stream.begin(); - - // provide data via Callback to encoded_stream - LOGI("sendReply - calling callback"); - // Send delayed header - AudioServer::sendReplyHeader(); - callback(&encoded_stream); - client_obj.stop(); - } else if (in != nullptr) { - // provide data for stream: in -copy> encoded_stream -> out - LOGI("sendReply - Returning encoded stream..."); - // encoded_stream.begin(out_ptr(), encoder); - encoded_stream.setOutput(out_ptr()); - encoded_stream.setEncoder(encoder); - encoded_stream.begin(); - - copier.begin(encoded_stream, *in); - if (!client_obj.connected()) { - LOGE("connection was closed"); - } - // Send delayed header - AudioServer::sendReplyHeader(); - } - } -}; - -/** - * @brief A simple Arduino Webserver which streams the audio as WAV data. - * This class is based on the AudioEncodedServer class. All you need to do is to - * provide the data with a callback method or from a Stream. - * @ingroup http - * @author Phil Schatzmann - * @copyright GPLv3 - */ -class AudioWAVServer : public AudioEncoderServer { - public: - /** - * @brief Construct a new Audio WAV Server object - * We assume that the WiFi is already connected - */ - AudioWAVServer(int port = 80) : AudioEncoderServer(new WAVEncoder(), port) {} - - /** - * @brief Construct a new Audio WAV Server object - * - * @param network - * @param password - */ - AudioWAVServer(const char *network, const char *password, int port = 80) - : AudioEncoderServer(new WAVEncoder(), network, password, port) {} - - /// Destructor: release the allocated encoder - ~AudioWAVServer() { - AudioEncoder *encoder = audioEncoder(); - if (encoder != nullptr) { - delete encoder; - } - } - - // provides a pointer to the encoder - WAVEncoder &wavEncoder() { return *static_cast(encoder); } -}; - } // namespace audio_tools - -#endif diff --git a/src/AudioTools/Communication/HTTP/AudioServerWiFi.h b/src/AudioTools/Communication/HTTP/AudioServerWiFi.h new file mode 100644 index 0000000000..778f328bad --- /dev/null +++ b/src/AudioTools/Communication/HTTP/AudioServerWiFi.h @@ -0,0 +1,43 @@ +#pragma once + +#include "AudioToolsConfig.h" + +#ifdef USE_WIFI +#include "WiFiInclude.h" +#endif + +#include "AudioEncodedServerT.h" +#include "AudioServerT.h" +#include "AudioTools.h" +#include "AudioTools/AudioCodecs/CodecWAV.h" + +namespace audio_tools { + +#ifdef USE_WIFI +/// @brief Basic WiFi audio server for streaming audio content +/// @ingroup http +using AudioServer = AudioServerT; + +/// @brief WiFi audio server for streaming audio content (explicit WiFi naming) +/// @ingroup http +using AudioServerWiFi = AudioServerT; + +/// @brief WiFi audio server with encoder support for streaming encoded audio +/// @ingroup http +using AudioEncoderServerWiFi = AudioEncoderServerT; + +/// @brief Basic audio server with encoder support (defaults to WiFi when +/// USE_WIFI is defined) +/// @ingroup http +using AudioEncoderServer = AudioEncoderServerT; + +/// @brief WiFi audio server specifically for streaming WAV audio +/// @ingroup http +using AudioWAVServerWiFi = AudioWAVServerT; + +/// @brief Basic WAV audio server (defaults to WiFi when USE_WIFI is defined) +/// @ingroup http +using AudioWAVServer = AudioWAVServerT; +#endif + +} // namespace audio_tools diff --git a/src/AudioTools/CoreAudio/AudioHttp/HttpChunkReader.h b/src/AudioTools/Communication/HTTP/HttpChunkReader.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/HttpChunkReader.h rename to src/AudioTools/Communication/HTTP/HttpChunkReader.h diff --git a/src/AudioTools/CoreAudio/AudioHttp/HttpHeader.h b/src/AudioTools/Communication/HTTP/HttpHeader.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/HttpHeader.h rename to src/AudioTools/Communication/HTTP/HttpHeader.h diff --git a/src/AudioTools/CoreAudio/AudioHttp/HttpLineReader.h b/src/AudioTools/Communication/HTTP/HttpLineReader.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/HttpLineReader.h rename to src/AudioTools/Communication/HTTP/HttpLineReader.h diff --git a/src/AudioTools/CoreAudio/AudioHttp/HttpRequest.h b/src/AudioTools/Communication/HTTP/HttpRequest.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/HttpRequest.h rename to src/AudioTools/Communication/HTTP/HttpRequest.h diff --git a/src/AudioTools/CoreAudio/AudioHttp/HttpTypes.h b/src/AudioTools/Communication/HTTP/HttpTypes.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/HttpTypes.h rename to src/AudioTools/Communication/HTTP/HttpTypes.h diff --git a/src/AudioTools/Communication/HTTP/ICYStream.h b/src/AudioTools/Communication/HTTP/ICYStream.h new file mode 100644 index 0000000000..bbec6bc292 --- /dev/null +++ b/src/AudioTools/Communication/HTTP/ICYStream.h @@ -0,0 +1,16 @@ +#pragma once +#include "AudioTools/Communication/HTTP/URLStream.h" +#include "AudioTools/Communication/HTTP/ICYStreamT.h" + +namespace audio_tools { + +/// Type alias for ICYStream +using ICYStream = ICYStreamT; + +#if defined(USE_CONCURRENCY) +/// Type alias for buffered ICYStream +using ICYStreamBuffered = URLStreamBufferedT; + +#endif + +} \ No newline at end of file diff --git a/src/AudioTools/CoreAudio/AudioHttp/ICYStreamT.h b/src/AudioTools/Communication/HTTP/ICYStreamT.h similarity index 99% rename from src/AudioTools/CoreAudio/AudioHttp/ICYStreamT.h rename to src/AudioTools/Communication/HTTP/ICYStreamT.h index b2b346e935..19708a8e00 100644 --- a/src/AudioTools/CoreAudio/AudioHttp/ICYStreamT.h +++ b/src/AudioTools/Communication/HTTP/ICYStreamT.h @@ -1,5 +1,5 @@ #pragma once -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" #include "AudioTools/CoreAudio/AudioMetaData/MetaDataICY.h" #include "AudioToolsConfig.h" diff --git a/src/AudioTools/Communication/HTTP/README.md b/src/AudioTools/Communication/HTTP/README.md new file mode 100644 index 0000000000..c96d798a51 --- /dev/null +++ b/src/AudioTools/Communication/HTTP/README.md @@ -0,0 +1,4 @@ + +We provide our own HTTP protocal implementation which includes a simple webserver. +Initially this was part of the core functionality, but has been changed to __optional functionality__ +in order to optimize the sketch size. \ No newline at end of file diff --git a/src/AudioTools/CoreAudio/AudioHttp/URLStream.h b/src/AudioTools/Communication/HTTP/URLStream.h similarity index 91% rename from src/AudioTools/CoreAudio/AudioHttp/URLStream.h rename to src/AudioTools/Communication/HTTP/URLStream.h index fdd998ccd9..e748e1403f 100644 --- a/src/AudioTools/CoreAudio/AudioHttp/URLStream.h +++ b/src/AudioTools/Communication/HTTP/URLStream.h @@ -1,21 +1,15 @@ #pragma once #include "AudioToolsConfig.h" -#ifdef USE_URL_ARDUINO -#if defined(ESP32) -# include -# include -# include -# include +#if defined(USE_WIFI) +# include "WiFiInclude.h" #endif #include "AudioTools/CoreAudio/AudioBasic/Str.h" -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" -#include "AudioTools/CoreAudio/AudioHttp/HttpRequest.h" -#include "AudioTools/CoreAudio/AudioHttp/ICYStreamT.h" -#include "AudioTools/CoreAudio/AudioHttp/URLStreamBufferedT.h" - +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/HttpRequest.h" +#include "AudioTools/Communication/HTTP/URLStreamBufferedT.h" namespace audio_tools { @@ -25,7 +19,7 @@ namespace audio_tools { * In this chase you can check if setting the protocol to "HTTP/1.0" improves * the situation. * @author Phil Schatzmann - * @ingroup http + * @ingroup network * @copyright GPLv3 * */ @@ -55,18 +49,14 @@ class URLStream : public AbstractURLStream { ~URLStream() { TRACED(); end(); -#ifdef USE_WIFI_CLIENT_SECURE if (clientSecure != nullptr) { delete clientSecure; clientSecure = nullptr; } -#endif -#ifdef USE_WIFI if (clientInsecure != nullptr) { delete clientInsecure; clientInsecure = nullptr; } -#endif } /// (Re-)defines the client @@ -259,9 +249,7 @@ class URLStream : public AbstractURLStream { /// Define the Root PEM Certificate for SSL void setCACert(const char* cert) override{ - #ifdef USE_WIFI_CLIENT_SECURE if (clientSecure!=nullptr) clientSecure->setCACert(cert); - #endif } protected: @@ -281,12 +269,8 @@ class URLStream : public AbstractURLStream { const char* network = nullptr; const char* password = nullptr; Client* client = nullptr; // client defined via setClient -#ifdef USE_WIFI WiFiClient* clientInsecure = nullptr; // wifi client for http -#endif -#ifdef USE_WIFI_CLIENT_SECURE WiFiClientSecure* clientSecure = nullptr; // wifi client for https -#endif int clientTimeout = URL_CLIENT_TIMEOUT; // 60000; unsigned long handshakeTimeout = URL_HANDSHAKE_TIMEOUT; // 120000 bool is_power_save = false; @@ -300,7 +284,6 @@ class URLStream : public AbstractURLStream { // close it - if we have an active connection if (active) end(); -#ifdef USE_WIFI // optional: login if necessary if no external client is defined if (client == nullptr){ if (!login()){ @@ -308,7 +291,6 @@ class URLStream : public AbstractURLStream { return false; } } -#endif // request.reply().setAutoCreateLines(false); if (acceptMime != nullptr) { @@ -323,7 +305,7 @@ class URLStream : public AbstractURLStream { client.setTimeout(clientTimeout / 1000); request.setTimeout(clientTimeout); -#if defined(ESP32) && defined(USE_WIFI_CLIENT_SECURE) +#if defined(ESP32) // There is a bug in IDF 4! if (clientSecure != nullptr) { clientSecure->setHandshakeTimeout(handshakeTimeout); @@ -370,7 +352,6 @@ class URLStream : public AbstractURLStream { /// Determines the client Client& getClient(bool isSecure) { -#ifdef USE_WIFI_CLIENT_SECURE if (isSecure) { if (clientSecure == nullptr) { clientSecure = new WiFiClientSecure(); @@ -379,20 +360,11 @@ class URLStream : public AbstractURLStream { LOGI("WiFiClientSecure"); return *clientSecure; } -#endif -#ifdef USE_WIFI if (clientInsecure == nullptr) { clientInsecure = new WiFiClient(); LOGI("WiFiClient"); } return *clientInsecure; -#else - if (client == nullptr){ - LOGE("Client not set"); - stop(); - } - return *client; // to avoid compiler warning -#endif } inline void fillBuffer() { @@ -406,7 +378,6 @@ class URLStream : public AbstractURLStream { inline bool isEOS() { return read_pos >= read_size; } bool login() { -#ifdef USE_WIFI if (network != nullptr && password != nullptr && WiFi.status() != WL_CONNECTED) { TRACEI(); @@ -420,22 +391,14 @@ class URLStream : public AbstractURLStream { return WiFi.status() == WL_CONNECTED; } return WiFi.status() == WL_CONNECTED; -#else - return false; -#endif } }; -/// Type alias for ICYStream -using ICYStream = ICYStreamT; #if defined(USE_CONCURRENCY) /// Type alias for buffered URLStream using URLStreamBuffered = URLStreamBufferedT; -/// Type alias for buffered ICYStream -using ICYStreamBuffered = URLStreamBufferedT; + #endif } // namespace audio_tools - -#endif diff --git a/src/AudioTools/CoreAudio/AudioHttp/URLStreamBufferedT.h b/src/AudioTools/Communication/HTTP/URLStreamBufferedT.h similarity index 99% rename from src/AudioTools/CoreAudio/AudioHttp/URLStreamBufferedT.h rename to src/AudioTools/Communication/HTTP/URLStreamBufferedT.h index 1331a2b8af..8f47c56c6a 100644 --- a/src/AudioTools/CoreAudio/AudioHttp/URLStreamBufferedT.h +++ b/src/AudioTools/Communication/HTTP/URLStreamBufferedT.h @@ -2,7 +2,7 @@ #include "AudioToolsConfig.h" #if defined(USE_CONCURRENCY) #include "AudioTools/AudioLibs/Concurrency.h" -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" #include "AudioTools/CoreAudio/BaseStream.h" #ifndef URL_STREAM_CORE diff --git a/src/AudioTools/CoreAudio/AudioHttp/URLStreamESP32.h b/src/AudioTools/Communication/HTTP/URLStreamESP32.h similarity index 95% rename from src/AudioTools/CoreAudio/AudioHttp/URLStreamESP32.h rename to src/AudioTools/Communication/HTTP/URLStreamESP32.h index 66a7a2b4b4..e471e2907c 100644 --- a/src/AudioTools/CoreAudio/AudioHttp/URLStreamESP32.h +++ b/src/AudioTools/Communication/HTTP/URLStreamESP32.h @@ -1,9 +1,9 @@ #pragma once -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" -#include "AudioTools/CoreAudio/AudioHttp/HttpRequest.h" -#include "AudioTools/CoreAudio/AudioHttp/ICYStreamT.h" -#include "AudioTools/CoreAudio/AudioHttp/URLStreamBufferedT.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/HttpRequest.h" +#include "AudioTools/Communication/HTTP/ICYStreamT.h" +#include "AudioTools/Communication/HTTP/URLStreamBufferedT.h" #include "esp_http_client.h" #include "esp_idf_version.h" #include "esp_system.h" @@ -414,17 +414,22 @@ class URLStreamESP32 : public AbstractURLStream { } }; -/// ICYStream +/// ICYStream for ESP32 platform using ICYStreamESP32 = ICYStreamT; #if defined(USE_CONCURRENCY) +/// Buffered URLStream for ESP32 platform using URLStreamBufferedESP32 = URLStreamBufferedT; +/// Buffered ICYStream for ESP32 platform using ICYStreamBufferedESP32 = URLStreamBufferedT; #endif /// Support URLStream w/o Arduino #if !defined(ARDUINO) +/// URLStream alias for ESP32 (non-Arduino environments) using URLStream = URLStreamESP32; +/// Buffered URLStream alias for ESP32 (non-Arduino environments) using URLStreamBuffered = URLStreamBufferedESP32; +/// Buffered ICYStream alias for ESP32 (non-Arduino environments) using ICYStreamBuffered = ICYStreamBufferedESP32; #endif diff --git a/src/AudioTools/CoreAudio/AudioHttp/Url.h b/src/AudioTools/Communication/HTTP/Url.h similarity index 100% rename from src/AudioTools/CoreAudio/AudioHttp/Url.h rename to src/AudioTools/Communication/HTTP/Url.h diff --git a/src/AudioTools/Communication/HTTP/WiFiInclude.h b/src/AudioTools/Communication/HTTP/WiFiInclude.h new file mode 100644 index 0000000000..b2d7cf6285 --- /dev/null +++ b/src/AudioTools/Communication/HTTP/WiFiInclude.h @@ -0,0 +1,20 @@ +#pragma once +#include "AudioToolsConfig.h" + +// Different platforms have different WiFi libraries + +#if defined(USE_WIFININA) +# include +#elif defined(USE_WIFIS3) +# include +#elif defined(ESP8266) +# include +#elif defined(ESP32) +# include +# include +# include +# include +#else +# include +#endif + diff --git a/src/AudioTools/Concurrency/RP2040/BufferRP2040.h b/src/AudioTools/Concurrency/RP2040/BufferRP2040.h index 1d19f3989d..f55dedb0ff 100644 --- a/src/AudioTools/Concurrency/RP2040/BufferRP2040.h +++ b/src/AudioTools/Concurrency/RP2040/BufferRP2040.h @@ -218,6 +218,8 @@ class BufferRP2040T : public BaseBuffer { }; +/// @brief RP2040 specific buffer for audio data +/// @ingroup buffers using BufferRP2040 = BufferRP2040T; } // namespace audio_tools diff --git a/src/AudioTools/Concurrency/RP2040/MutexRP2040.h b/src/AudioTools/Concurrency/RP2040/MutexRP2040.h index 53627db614..685a56e914 100644 --- a/src/AudioTools/Concurrency/RP2040/MutexRP2040.h +++ b/src/AudioTools/Concurrency/RP2040/MutexRP2040.h @@ -55,6 +55,8 @@ class MutexRP2040 : public MutexBase { mutex_t mtx; }; +/// @brief Default Mutex implementation using RP2040 Pico SDK +/// @ingroup concurrency using Mutex = MutexRP2040; } // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/Concurrency/RTOS/BufferRTOS.h b/src/AudioTools/Concurrency/RTOS/BufferRTOS.h index fc41b920c7..1b3d7cadcc 100644 --- a/src/AudioTools/Concurrency/RTOS/BufferRTOS.h +++ b/src/AudioTools/Concurrency/RTOS/BufferRTOS.h @@ -208,6 +208,8 @@ class BufferRTOS : public BaseBuffer { }; // #endif // ESP_IDF_VERSION_MAJOR >= 4 +/// @brief Template alias for RTOS-based synchronized buffer +/// @ingroup concurrency template using SynchronizedBufferRTOS = BufferRTOS; diff --git a/src/AudioTools/Concurrency/RTOS/MutexRTOS.h b/src/AudioTools/Concurrency/RTOS/MutexRTOS.h index 67d72b4bd4..28419ddec0 100644 --- a/src/AudioTools/Concurrency/RTOS/MutexRTOS.h +++ b/src/AudioTools/Concurrency/RTOS/MutexRTOS.h @@ -39,6 +39,8 @@ class MutexRTOS : public MutexBase { SemaphoreHandle_t xSemaphore = NULL; }; +/// @brief Default Mutex implementation using RTOS semaphores +/// @ingroup concurrency using Mutex = MutexRTOS; } \ No newline at end of file diff --git a/src/AudioTools/Concurrency/RTOS/SynchronizedNBufferRTOS.h b/src/AudioTools/Concurrency/RTOS/SynchronizedNBufferRTOS.h index d23cb4f72b..561f94a421 100644 --- a/src/AudioTools/Concurrency/RTOS/SynchronizedNBufferRTOS.h +++ b/src/AudioTools/Concurrency/RTOS/SynchronizedNBufferRTOS.h @@ -118,7 +118,12 @@ class SynchronizedNBufferRTOST : public NBuffer { } }; +/// @brief RTOS synchronized buffer for managing multiple audio buffers +/// @ingroup buffers using SynchronizedNBufferRTOS = SynchronizedNBufferRTOST; + +/// @brief Default synchronized buffer alias +/// @ingroup buffers using SynchronizedNBuffer = SynchronizedNBufferRTOS; } // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/CoreAudio.h b/src/AudioTools/CoreAudio.h index 44b622ec3e..e638a952d6 100644 --- a/src/AudioTools/CoreAudio.h +++ b/src/AudioTools/CoreAudio.h @@ -23,6 +23,5 @@ #include "AudioTools/CoreAudio/AnalogAudioStream.h" #include "AudioTools/CoreAudio/AudioEffects.h" #include "AudioTools/CoreAudio/AudioMetaData.h" -#include "AudioTools/CoreAudio/AudioHttp.h" #include "AudioTools/CoreAudio/FrequencyDetector.h" #include "AudioTools/CoreAudio/GoerzelStream.h" diff --git a/src/AudioTools/CoreAudio/AudioHttp.h b/src/AudioTools/CoreAudio/AudioHttp.h deleted file mode 100644 index 570f61d65d..0000000000 --- a/src/AudioTools/CoreAudio/AudioHttp.h +++ /dev/null @@ -1,3 +0,0 @@ -#pragma once - -#include "AudioTools/CoreAudio/AudioHttp/AudioHttp.h" diff --git a/src/AudioTools/CoreAudio/AudioHttp/AudioHttp.h b/src/AudioTools/CoreAudio/AudioHttp/AudioHttp.h deleted file mode 100644 index 9d4a0c3e84..0000000000 --- a/src/AudioTools/CoreAudio/AudioHttp/AudioHttp.h +++ /dev/null @@ -1,13 +0,0 @@ -#pragma once -/** - * @defgroup http Http - * @ingroup communications - * @brief Http client & server -**/ - -#include "URLStream.h" -#include "AudioServer.h" - -#if ((defined(ESP32) && defined(USE_URL_ARDUINO)) || defined(ESP32_CMAKE)) -# include "URLStreamESP32.h" -#endif \ No newline at end of file diff --git a/src/AudioTools/CoreAudio/AudioHttp/README.md b/src/AudioTools/CoreAudio/AudioHttp/README.md deleted file mode 100644 index e0cc926917..0000000000 --- a/src/AudioTools/CoreAudio/AudioHttp/README.md +++ /dev/null @@ -1,2 +0,0 @@ - -We provide our own HTTP protocal implementation which includes a simple webserver \ No newline at end of file diff --git a/src/AudioTools/CoreAudio/AudioMetaData/MetaData.h b/src/AudioTools/CoreAudio/AudioMetaData/MetaData.h index 1ce8fee0bf..f3bf6d97f3 100644 --- a/src/AudioTools/CoreAudio/AudioMetaData/MetaData.h +++ b/src/AudioTools/CoreAudio/AudioMetaData/MetaData.h @@ -3,7 +3,7 @@ #include "AudioToolsConfig.h" #include "AudioTools/CoreAudio/AudioTypes.h" #include "AudioTools/CoreAudio/AudioStreams.h" -#include "AudioTools/CoreAudio/AudioHttp/HttpRequest.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" #include "AudioTools/CoreAudio/AudioMetaData/MetaDataFilter.h" #include "MetaDataICY.h" #include "MetaDataID3.h" @@ -42,8 +42,6 @@ class MetaDataOutput : public AudioOutput { callback = fn; } -#ifdef USE_URL_ARDUINO - /// Starts the processing - iceMetaint is determined from the HttpRequest virtual void begin(AbstractURLStream &url) { TRACED(); @@ -52,7 +50,6 @@ class MetaDataOutput : public AudioOutput { icySetup.executeCallback(callback); begin(metaInt); } -#endif /// Starts the processing - if iceMetaint is defined we use icecast virtual void begin(int iceMetaint=0) { diff --git a/src/AudioTools/CoreAudio/AudioMetaData/MetaDataICY.h b/src/AudioTools/CoreAudio/AudioMetaData/MetaDataICY.h index f1e3811cc4..473832e73e 100644 --- a/src/AudioTools/CoreAudio/AudioMetaData/MetaDataICY.h +++ b/src/AudioTools/CoreAudio/AudioMetaData/MetaDataICY.h @@ -3,7 +3,7 @@ #include "AudioToolsConfig.h" #include "AbstractMetaData.h" #include "AudioTools/CoreAudio/AudioBasic/StrView.h" -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" namespace audio_tools { diff --git a/src/AudioTools/CoreAudio/AudioPWM/PWMDriverAVR.h b/src/AudioTools/CoreAudio/AudioPWM/PWMDriverAVR.h index 5f242cf926..b48188f339 100644 --- a/src/AudioTools/CoreAudio/AudioPWM/PWMDriverAVR.h +++ b/src/AudioTools/CoreAudio/AudioPWM/PWMDriverAVR.h @@ -8,6 +8,8 @@ namespace audio_tools { class PWMDriverAVR; +/// @brief Platform-specific PWM driver alias for AVR +/// @ingroup io using PWMDriver = PWMDriverAVR; static PWMDriverAVR *accessAudioPWM = nullptr; diff --git a/src/AudioTools/CoreAudio/AudioPlayer.h b/src/AudioTools/CoreAudio/AudioPlayer.h index 9aaab248a8..684967d190 100644 --- a/src/AudioTools/CoreAudio/AudioPlayer.h +++ b/src/AudioTools/CoreAudio/AudioPlayer.h @@ -2,7 +2,6 @@ #include "AudioTools/AudioCodecs/AudioCodecs.h" #include "AudioTools/CoreAudio/AudioBasic/Debouncer.h" -#include "AudioTools/CoreAudio/AudioHttp/AudioHttp.h" #include "AudioTools/CoreAudio/AudioLogger.h" #include "AudioTools/CoreAudio/AudioMetaData/MetaData.h" #include "AudioTools/CoreAudio/AudioStreams.h" diff --git a/src/AudioTools/CoreAudio/AudioStreams.h b/src/AudioTools/CoreAudio/AudioStreams.h index e68b18e4fb..dac9d93dbd 100644 --- a/src/AudioTools/CoreAudio/AudioStreams.h +++ b/src/AudioTools/CoreAudio/AudioStreams.h @@ -1945,7 +1945,12 @@ class VolumeMeter : public ModifyingStream { }; // legacy names +/// @brief Legacy alias for VolumeMeter +/// @ingroup io using VolumePrint = VolumeMeter; + +/// @brief Legacy alias for VolumeMeter +/// @ingroup io using VolumeOutput = VolumeMeter; #ifdef USE_TIMER diff --git a/src/AudioTools/CoreAudio/AudioTimer/AudioTimerDesktop.h b/src/AudioTools/CoreAudio/AudioTimer/AudioTimerDesktop.h index d655a3705d..31fa22d197 100644 --- a/src/AudioTools/CoreAudio/AudioTimer/AudioTimerDesktop.h +++ b/src/AudioTools/CoreAudio/AudioTimer/AudioTimerDesktop.h @@ -1,12 +1,13 @@ #pragma once -#include -#include -#include #include "AudioTimerBase.h" #if defined(USE_TIMER) && defined(USE_CPP_TASK) +#include +#include +#include + namespace audio_tools { /** diff --git a/src/AudioTools/CoreAudio/AudioTypes.h b/src/AudioTools/CoreAudio/AudioTypes.h index 17db4c6a3d..6abb168c3a 100644 --- a/src/AudioTools/CoreAudio/AudioTypes.h +++ b/src/AudioTools/CoreAudio/AudioTypes.h @@ -18,6 +18,8 @@ namespace audio_tools { +/// @brief Type alias for sample rate values +/// @ingroup basic using sample_rate_t = uint32_t; /** @@ -520,7 +522,8 @@ inline void waitFor(HardwareSerial& out) { while (!out); } /// wait for flag to be active @ingroup basic inline void waitFor(bool& flag) { while (!flag); } -/// Pins @ingroup basic +/// @brief Type alias for a collection of pin numbers +/// @ingroup basic using Pins = Vector; } // namespace audio_tools \ No newline at end of file diff --git a/src/AudioTools/Disk/AudioSourceURL.h b/src/AudioTools/Disk/AudioSourceURL.h index e3df89d489..d0c02c7de8 100644 --- a/src/AudioTools/Disk/AudioSourceURL.h +++ b/src/AudioTools/Disk/AudioSourceURL.h @@ -3,7 +3,7 @@ #pragma once #include "AudioToolsConfig.h" #include "AudioSource.h" -#include "AudioTools/CoreAudio/AudioHttp/AbstractURLStream.h" +#include "AudioTools/Communication/HTTP/AbstractURLStream.h" namespace audio_tools { diff --git a/src/AudioTools/PlatformConfig/avr.h b/src/AudioTools/PlatformConfig/avr.h index 43dc86965b..fe0a87d5bd 100644 --- a/src/AudioTools/PlatformConfig/avr.h +++ b/src/AudioTools/PlatformConfig/avr.h @@ -4,9 +4,8 @@ #define USE_PWM #define USE_TIMER #define NO_INPLACE_INIT_SUPPORT -// Uncomment to activate network -//#include -//#define USE_URL_ARDUINO +#define USE_ETHERNET +#define USE_URL_ARDUINO #ifndef assert # define assert(T) #endif diff --git a/src/AudioTools/PlatformConfig/giga.h b/src/AudioTools/PlatformConfig/giga.h index b0c9ca9ccd..9a8830b748 100644 --- a/src/AudioTools/PlatformConfig/giga.h +++ b/src/AudioTools/PlatformConfig/giga.h @@ -2,7 +2,6 @@ #pragma once -#include #include #define IS_MBED diff --git a/src/AudioTools/PlatformConfig/portenta.h b/src/AudioTools/PlatformConfig/portenta.h index 921a2e5471..49ba6ce16f 100644 --- a/src/AudioTools/PlatformConfig/portenta.h +++ b/src/AudioTools/PlatformConfig/portenta.h @@ -2,7 +2,6 @@ #pragma once -#include #include #define IS_MBED diff --git a/src/AudioTools/PlatformConfig/samd.h b/src/AudioTools/PlatformConfig/samd.h index bca98a7f27..9f57346d93 100644 --- a/src/AudioTools/PlatformConfig/samd.h +++ b/src/AudioTools/PlatformConfig/samd.h @@ -15,7 +15,7 @@ #define PIN_CS 4 #ifdef ARDUINO_SAMD_MKRWIFI1010 -#include +#define USE_WIFI_NININA #define USE_URL_ARDUINO #define USE_AUDIO_SERVER #endif diff --git a/src/AudioTools/PlatformConfig/stm32.h b/src/AudioTools/PlatformConfig/stm32.h index a1e83ceab6..3706770967 100644 --- a/src/AudioTools/PlatformConfig/stm32.h +++ b/src/AudioTools/PlatformConfig/stm32.h @@ -25,8 +25,7 @@ #define SOFT_MUTE_VALUE 0 #define PIN_CS -1 -// Uncomment to activate networking -//#define USE_ETHERNET -//#define USE_URL_ARDUINO -//#define USE_AUDIO_SERVER +#define USE_ETHERNET +#define USE_URL_ARDUINO +#define USE_AUDIO_SERVER diff --git a/src/AudioTools/PlatformConfig/unor4.h b/src/AudioTools/PlatformConfig/unor4.h index 18f05a5085..380eb5f0cf 100644 --- a/src/AudioTools/PlatformConfig/unor4.h +++ b/src/AudioTools/PlatformConfig/unor4.h @@ -32,5 +32,5 @@ # define USE_WIFI # define USE_URL_ARDUINO # define USE_AUDIO_SERVER -# include "WiFiS3.h" +# define USE_WIFIS3 #endif diff --git a/tests-cmake/codec/CMakeLists.txt b/tests-cmake/codec/CMakeLists.txt index dfaf1b08b6..ac0bcd24ad 100644 --- a/tests-cmake/codec/CMakeLists.txt +++ b/tests-cmake/codec/CMakeLists.txt @@ -30,7 +30,7 @@ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/hls ${CMAKE_CURRENT_BINARY_DIR}/hls add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/container-m4a ${CMAKE_CURRENT_BINARY_DIR}/container-m4a) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/m4a-extractor ${CMAKE_CURRENT_BINARY_DIR}/m4a-extractor) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/mp4-parser ${CMAKE_CURRENT_BINARY_DIR}/mp4-parser) -add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/mp3-parser ${CMAKE_CURRENT_BINARY_DIR}/mp4-parser) +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/mp3-parser ${CMAKE_CURRENT_BINARY_DIR}/mp3-parser) #add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/container-avi-movie ${CMAKE_CURRENT_BINARY_DIR}/container-avi-movie) diff --git a/tests-cmake/codec/aac-faad/aac-faad.cpp b/tests-cmake/codec/aac-faad/aac-faad.cpp index e2882f1472..4c98baa722 100644 --- a/tests-cmake/codec/aac-faad/aac-faad.cpp +++ b/tests-cmake/codec/aac-faad/aac-faad.cpp @@ -4,8 +4,6 @@ //#include "AudioTools/AudioLibs/PortAudioStream.h" #include "audio.h" -using namespace audio_tools; - MemoryStream aac(gs_16b_2c_44100hz_aac, gs_16b_2c_44100hz_aac_len); //PortAudioStream out; // Output of sound on desktop CsvOutput out(Serial, 2); diff --git a/tests-cmake/codec/aac-fdk-encode/aac-fdk-encode.cpp b/tests-cmake/codec/aac-fdk-encode/aac-fdk-encode.cpp index c72d604247..ce8f104dc1 100644 --- a/tests-cmake/codec/aac-fdk-encode/aac-fdk-encode.cpp +++ b/tests-cmake/codec/aac-fdk-encode/aac-fdk-encode.cpp @@ -4,7 +4,6 @@ #include "AudioTools/AudioCodecs/CodecAACFDK.h" //#include // for rand -using namespace audio_tools; HexDumpOutput out(Serial); AACEncoderFDK aac(out); diff --git a/tests-cmake/codec/aac-fdk/aac-fdk.cpp b/tests-cmake/codec/aac-fdk/aac-fdk.cpp index c25125046d..f6ec92ee84 100644 --- a/tests-cmake/codec/aac-fdk/aac-fdk.cpp +++ b/tests-cmake/codec/aac-fdk/aac-fdk.cpp @@ -4,8 +4,6 @@ #include "AudioTools/AudioLibs/PortAudioStream.h" #include "audio.h" -using namespace audio_tools; - MemoryStream aac(gs_16b_2c_44100hz_aac, gs_16b_2c_44100hz_aac_len); PortAudioStream portaudio_stream; // Output of sound on desktop EncodedAudioStream dec(&portaudio_stream, new AACDecoderFDK()); // aac data source diff --git a/tests-cmake/codec/aac-helix/aac-helix.cpp b/tests-cmake/codec/aac-helix/aac-helix.cpp index 68f31cb69b..37cfbdab7d 100644 --- a/tests-cmake/codec/aac-helix/aac-helix.cpp +++ b/tests-cmake/codec/aac-helix/aac-helix.cpp @@ -4,8 +4,6 @@ #include "AudioTools/AudioLibs/PortAudioStream.h" #include "audio.h" -using namespace audio_tools; - MemoryStream aac(gs_16b_2c_44100hz_aac, gs_16b_2c_44100hz_aac_len); PortAudioStream portaudio_stream; // Output of sound on desktop EncodedAudioStream dec(&portaudio_stream, new AACDecoderHelix()); // aac data source diff --git a/tests-cmake/codec/mp3-helix/mp3-helix.cpp b/tests-cmake/codec/mp3-helix/mp3-helix.cpp index d6504f1034..3ede3b6b9b 100644 --- a/tests-cmake/codec/mp3-helix/mp3-helix.cpp +++ b/tests-cmake/codec/mp3-helix/mp3-helix.cpp @@ -5,8 +5,6 @@ #include "AudioTools/AudioLibs/PortAudioStream.h" #include "BabyElephantWalk60_mp3.h" -using namespace audio_tools; - MemoryStream mp3(BabyElephantWalk60_mp3, BabyElephantWalk60_mp3_len); PortAudioStream portaudio_stream; // Output of sound on desktop EncodedAudioStream dec(&portaudio_stream, new MP3DecoderHelix()); // MP3 data source diff --git a/tests-cmake/codec/mp3-lame/mp3-lame.cpp b/tests-cmake/codec/mp3-lame/mp3-lame.cpp index d8d3e7483e..8e9f01609e 100644 --- a/tests-cmake/codec/mp3-lame/mp3-lame.cpp +++ b/tests-cmake/codec/mp3-lame/mp3-lame.cpp @@ -4,8 +4,6 @@ #include "AudioTools/AudioCodecs/CodecMP3LAME.h" //#include // for rand -using namespace audio_tools; - HexDumpOutput out(Serial); MP3EncoderLAME mp3(out); AudioInfoLAME info; diff --git a/tests-cmake/codec/mp3-mad/mp3-mad.cpp b/tests-cmake/codec/mp3-mad/mp3-mad.cpp index 66628a7f86..3dd507e65d 100644 --- a/tests-cmake/codec/mp3-mad/mp3-mad.cpp +++ b/tests-cmake/codec/mp3-mad/mp3-mad.cpp @@ -5,8 +5,6 @@ #include "AudioTools/AudioLibs/PortAudioStream.h" #include "BabyElephantWalk60_mp3.h" -using namespace audio_tools; - MemoryStream mp3(BabyElephantWalk60_mp3, BabyElephantWalk60_mp3_len); PortAudioStream portaudio_stream; // Output of sound on desktop EncodedAudioStream dec(&portaudio_stream, new MP3DecoderMAD()); // MP3 data source diff --git a/tests-cmake/codec/mp3-metadata/mp3-metadata.cpp b/tests-cmake/codec/mp3-metadata/mp3-metadata.cpp index 5cad3fa347..cbac6384df 100644 --- a/tests-cmake/codec/mp3-metadata/mp3-metadata.cpp +++ b/tests-cmake/codec/mp3-metadata/mp3-metadata.cpp @@ -3,14 +3,11 @@ #include "AudioTools.h" #include "sample-12s.h" -using namespace audio_tools; - MemoryStream mp3(sample_12s_mp3, sample_12s_mp3_len); MetaDataOutput out; StreamCopy copier(out, mp3); // copy in to out bool title_printed = false; - void printMetaData(MetaDataType type, const char* str, int len){ Serial.print("==> "); Serial.print(toStr(type)); diff --git a/tests-cmake/effects/effects.cpp b/tests-cmake/effects/effects.cpp index d12fa388d1..ede3f28a52 100644 --- a/tests-cmake/effects/effects.cpp +++ b/tests-cmake/effects/effects.cpp @@ -3,8 +3,6 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/PortAudioStream.h" -using namespace audio_tools; - PortAudioStream out; SineWaveGenerator sine; AudioEffects> effects(sine); diff --git a/tests-cmake/url-test/url-test.cpp b/tests-cmake/url-test/url-test.cpp index 4f82a1b610..282e5ba40d 100644 --- a/tests-cmake/url-test/url-test.cpp +++ b/tests-cmake/url-test/url-test.cpp @@ -1,6 +1,5 @@ #include "AudioTools.h" - -using namespace audio_tools; +#include "AudioTools/Communication/AudioHttp.h" URLStream url("ssid","password"); NullStream null_out; // final output of decoded stream From adb3e6f6069bbbba077fdc6e09f54b9c0268345f Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 08:00:32 +0200 Subject: [PATCH 04/15] move Player Remote Control to Communication --- .../AudioPlayer}/AudioPlayerProtocol.h | 0 .../AudioPlayer}/AudioPlayerProtocolServer.h | 0 .../{AudioLibs => Communication/AudioPlayer}/KARadioProtocol.h | 0 .../AudioPlayer}/KARadioProtocolServer.h | 0 src/AudioTools/Communication/AudioPlayer/README.md | 2 ++ 5 files changed, 2 insertions(+) rename src/AudioTools/{AudioLibs => Communication/AudioPlayer}/AudioPlayerProtocol.h (100%) rename src/AudioTools/{AudioLibs => Communication/AudioPlayer}/AudioPlayerProtocolServer.h (100%) rename src/AudioTools/{AudioLibs => Communication/AudioPlayer}/KARadioProtocol.h (100%) rename src/AudioTools/{AudioLibs => Communication/AudioPlayer}/KARadioProtocolServer.h (100%) create mode 100644 src/AudioTools/Communication/AudioPlayer/README.md diff --git a/src/AudioTools/AudioLibs/AudioPlayerProtocol.h b/src/AudioTools/Communication/AudioPlayer/AudioPlayerProtocol.h similarity index 100% rename from src/AudioTools/AudioLibs/AudioPlayerProtocol.h rename to src/AudioTools/Communication/AudioPlayer/AudioPlayerProtocol.h diff --git a/src/AudioTools/AudioLibs/AudioPlayerProtocolServer.h b/src/AudioTools/Communication/AudioPlayer/AudioPlayerProtocolServer.h similarity index 100% rename from src/AudioTools/AudioLibs/AudioPlayerProtocolServer.h rename to src/AudioTools/Communication/AudioPlayer/AudioPlayerProtocolServer.h diff --git a/src/AudioTools/AudioLibs/KARadioProtocol.h b/src/AudioTools/Communication/AudioPlayer/KARadioProtocol.h similarity index 100% rename from src/AudioTools/AudioLibs/KARadioProtocol.h rename to src/AudioTools/Communication/AudioPlayer/KARadioProtocol.h diff --git a/src/AudioTools/AudioLibs/KARadioProtocolServer.h b/src/AudioTools/Communication/AudioPlayer/KARadioProtocolServer.h similarity index 100% rename from src/AudioTools/AudioLibs/KARadioProtocolServer.h rename to src/AudioTools/Communication/AudioPlayer/KARadioProtocolServer.h diff --git a/src/AudioTools/Communication/AudioPlayer/README.md b/src/AudioTools/Communication/AudioPlayer/README.md new file mode 100644 index 0000000000..8ce2910624 --- /dev/null +++ b/src/AudioTools/Communication/AudioPlayer/README.md @@ -0,0 +1,2 @@ + +Remote control for AudioPlayer \ No newline at end of file From b9cc5ca87461727b58dfa850879ed599df2bb988 Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 08:12:59 +0200 Subject: [PATCH 05/15] AudioPlayerContro.h --- src/AudioTools/Communication/AudioPlayerControl.h | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 src/AudioTools/Communication/AudioPlayerControl.h diff --git a/src/AudioTools/Communication/AudioPlayerControl.h b/src/AudioTools/Communication/AudioPlayerControl.h new file mode 100644 index 0000000000..bfdfdaa4bd --- /dev/null +++ b/src/AudioTools/Communication/AudioPlayerControl.h @@ -0,0 +1,6 @@ +#pragma once +// Remote Control for AudioPlayer +#include "AudioPlayer/AudioPlayerProtocol.h" +#include "AudioPlayer/AudioPlayerProtocolServer.h" +#include "AudioPlayer/KARadioProtocol.h" +#include "AudioPlayer/KARadioProtocolServer.h" \ No newline at end of file From d0e14e5e650f79e959aebf9e8a8b2faa8280d2df Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 09:11:34 +0200 Subject: [PATCH 06/15] A2DPStream to Communication --- .../basic-a2dp-audiokit.ino | 2 +- .../basic-a2dp-eq-audiokit.ino | 2 +- .../basic-audiokit-a2dp.ino | 2 +- .../basic-file_mp3-a2dp.ino | 2 +- .../basic-generator-a2dp.ino | 2 +- .../a2dp/basic-i2s-a2dp/basic-i2s-a2dp.ino | 2 +- .../basic-player-a2dp/basic-player-a2dp.ino | 2 +- .../player-sd_a2dp-audiokit.ino | 2 +- .../player-sdfat-a2dp/player-sdfat-a2dp.ino | 2 +- .../streams-a2dp-audiokit.ino | 2 +- .../streams-a2dp-serial.ino | 2 +- .../streams-generator-a2dp.ino | 2 +- .../streams-i2s-a2dp/streams-i2s-a2dp.ino | 2 +- .../control_gain-a2dp/control_gain-a2dp.ino | 2 +- .../streams-simple_tts-a2dp.ino | 2 +- .../streams-talkie-a2dp.ino | 2 +- src/AudioTools/AudioLibs/A2DPStream.h | 417 +----------------- src/AudioTools/Communication/A2DPStream.h | 416 +++++++++++++++++ 18 files changed, 434 insertions(+), 431 deletions(-) create mode 100644 src/AudioTools/Communication/A2DPStream.h diff --git a/examples/examples-communication/a2dp/basic-a2dp-audiokit/basic-a2dp-audiokit.ino b/examples/examples-communication/a2dp/basic-a2dp-audiokit/basic-a2dp-audiokit.ino index aa33497b4d..3ed0711c41 100644 --- a/examples/examples-communication/a2dp/basic-a2dp-audiokit/basic-a2dp-audiokit.ino +++ b/examples/examples-communication/a2dp/basic-a2dp-audiokit/basic-a2dp-audiokit.ino @@ -7,7 +7,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/A2DPStream.h" // install https://github.com/pschatzmann/ESP32-A2DP +#include "AudioTools/Communication/A2DPStream.h" // install https://github.com/pschatzmann/ESP32-A2DP #include "AudioTools/AudioLibs/AudioBoardStream.h" // install https://github.com/pschatzmann/arduino-audio-driver diff --git a/examples/examples-communication/a2dp/basic-a2dp-eq-audiokit/basic-a2dp-eq-audiokit.ino b/examples/examples-communication/a2dp/basic-a2dp-eq-audiokit/basic-a2dp-eq-audiokit.ino index f4dac39893..8f573e6f0b 100644 --- a/examples/examples-communication/a2dp/basic-a2dp-eq-audiokit/basic-a2dp-eq-audiokit.ino +++ b/examples/examples-communication/a2dp/basic-a2dp-eq-audiokit/basic-a2dp-eq-audiokit.ino @@ -7,7 +7,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/A2DPStream.h" // install https://github.com/pschatzmann/ESP32-A2DP +#include "AudioTools/Communication/A2DPStream.h" // install https://github.com/pschatzmann/ESP32-A2DP #include "AudioTools/AudioLibs/AudioBoardStream.h" // install https://github.com/pschatzmann/arduino-audio-driver diff --git a/examples/examples-communication/a2dp/basic-audiokit-a2dp/basic-audiokit-a2dp.ino b/examples/examples-communication/a2dp/basic-audiokit-a2dp/basic-audiokit-a2dp.ino index 965c0c0d75..c1f5bc8935 100644 --- a/examples/examples-communication/a2dp/basic-audiokit-a2dp/basic-audiokit-a2dp.ino +++ b/examples/examples-communication/a2dp/basic-audiokit-a2dp/basic-audiokit-a2dp.ino @@ -7,7 +7,7 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" AudioInfo info(44100, 2, 16); BluetoothA2DPSource a2dp_source; diff --git a/examples/examples-communication/a2dp/basic-file_mp3-a2dp/basic-file_mp3-a2dp.ino b/examples/examples-communication/a2dp/basic-file_mp3-a2dp/basic-file_mp3-a2dp.ino index 15408212f1..f7238a593e 100644 --- a/examples/examples-communication/a2dp/basic-file_mp3-a2dp/basic-file_mp3-a2dp.ino +++ b/examples/examples-communication/a2dp/basic-file_mp3-a2dp/basic-file_mp3-a2dp.ino @@ -3,7 +3,7 @@ #include "SPI.h" #include "SD.h" #include "AudioTools.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" //#include "AudioTools/AudioLibs/AudioBoardStream.h" // for SPI pins diff --git a/examples/examples-communication/a2dp/basic-generator-a2dp/basic-generator-a2dp.ino b/examples/examples-communication/a2dp/basic-generator-a2dp/basic-generator-a2dp.ino index a8b9e29d48..1a1587d0d2 100644 --- a/examples/examples-communication/a2dp/basic-generator-a2dp/basic-generator-a2dp.ino +++ b/examples/examples-communication/a2dp/basic-generator-a2dp/basic-generator-a2dp.ino @@ -6,7 +6,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" const char* name = "LEXON MINO L"; // Replace with your bluetooth speaker name SineWaveGenerator sineWave(15000); // subclass of SoundGenerator, set max amplitude (=volume) diff --git a/examples/examples-communication/a2dp/basic-i2s-a2dp/basic-i2s-a2dp.ino b/examples/examples-communication/a2dp/basic-i2s-a2dp/basic-i2s-a2dp.ino index dba0f2b9c5..c0ea09acca 100644 --- a/examples/examples-communication/a2dp/basic-i2s-a2dp/basic-i2s-a2dp.ino +++ b/examples/examples-communication/a2dp/basic-i2s-a2dp/basic-i2s-a2dp.ino @@ -8,7 +8,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" AudioInfo info32(44100, 2, 32); AudioInfo info16(44100, 2, 16); diff --git a/examples/examples-communication/a2dp/basic-player-a2dp/basic-player-a2dp.ino b/examples/examples-communication/a2dp/basic-player-a2dp/basic-player-a2dp.ino index 06638dacc3..6065d812a1 100644 --- a/examples/examples-communication/a2dp/basic-player-a2dp/basic-player-a2dp.ino +++ b/examples/examples-communication/a2dp/basic-player-a2dp/basic-player-a2dp.ino @@ -11,7 +11,7 @@ */ #include "AudioTools.h" - #include "AudioTools/AudioLibs/A2DPStream.h" + #include "AudioTools/Communication/A2DPStream.h" #include "AudioTools/Disk/AudioSourceSDFAT.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" //#include "AudioTools/AudioLibs/AudioBoardStream.h" // for SD Pins diff --git a/examples/examples-communication/a2dp/player-sd_a2dp-audiokit/player-sd_a2dp-audiokit.ino b/examples/examples-communication/a2dp/player-sd_a2dp-audiokit/player-sd_a2dp-audiokit.ino index 1de620e9ad..ee91b5917c 100644 --- a/examples/examples-communication/a2dp/player-sd_a2dp-audiokit/player-sd_a2dp-audiokit.ino +++ b/examples/examples-communication/a2dp/player-sd_a2dp-audiokit/player-sd_a2dp-audiokit.ino @@ -12,7 +12,7 @@ // install https://github.com/greiman/SdFat.git #include "AudioTools.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" #include "AudioTools/Disk/AudioSourceSDFAT.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" diff --git a/examples/examples-communication/a2dp/player-sdfat-a2dp/player-sdfat-a2dp.ino b/examples/examples-communication/a2dp/player-sdfat-a2dp/player-sdfat-a2dp.ino index 596410438a..550366b838 100644 --- a/examples/examples-communication/a2dp/player-sdfat-a2dp/player-sdfat-a2dp.ino +++ b/examples/examples-communication/a2dp/player-sdfat-a2dp/player-sdfat-a2dp.ino @@ -10,7 +10,7 @@ #define HELIX_LOGGING_ACTIVE false #include "AudioTools.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" #include "AudioTools/Disk/AudioSourceSDFAT.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" diff --git a/examples/examples-communication/a2dp/streams-a2dp-audiokit/streams-a2dp-audiokit.ino b/examples/examples-communication/a2dp/streams-a2dp-audiokit/streams-a2dp-audiokit.ino index 15daf91042..60d5d844ff 100644 --- a/examples/examples-communication/a2dp/streams-a2dp-audiokit/streams-a2dp-audiokit.ino +++ b/examples/examples-communication/a2dp/streams-a2dp-audiokit/streams-a2dp-audiokit.ino @@ -9,7 +9,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" diff --git a/examples/examples-communication/a2dp/streams-a2dp-serial/streams-a2dp-serial.ino b/examples/examples-communication/a2dp/streams-a2dp-serial/streams-a2dp-serial.ino index fd040ad589..d8e2086f0c 100644 --- a/examples/examples-communication/a2dp/streams-a2dp-serial/streams-a2dp-serial.ino +++ b/examples/examples-communication/a2dp/streams-a2dp-serial/streams-a2dp-serial.ino @@ -9,7 +9,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" A2DPStream in; diff --git a/examples/examples-communication/a2dp/streams-generator-a2dp/streams-generator-a2dp.ino b/examples/examples-communication/a2dp/streams-generator-a2dp/streams-generator-a2dp.ino index fc14b57d8c..5b5387ddc6 100644 --- a/examples/examples-communication/a2dp/streams-generator-a2dp/streams-generator-a2dp.ino +++ b/examples/examples-communication/a2dp/streams-generator-a2dp/streams-generator-a2dp.ino @@ -9,7 +9,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" const char* name = "LEXON MINO L"; // Replace with your device name AudioInfo info(44100, 2, 16); diff --git a/examples/examples-communication/a2dp/streams-i2s-a2dp/streams-i2s-a2dp.ino b/examples/examples-communication/a2dp/streams-i2s-a2dp/streams-i2s-a2dp.ino index fe90354e9b..9f66d55772 100644 --- a/examples/examples-communication/a2dp/streams-i2s-a2dp/streams-i2s-a2dp.ino +++ b/examples/examples-communication/a2dp/streams-i2s-a2dp/streams-i2s-a2dp.ino @@ -8,7 +8,7 @@ */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" I2SStream i2sStream; // Access I2S as stream A2DPStream a2dpStream; // access A2DP as stream diff --git a/examples/examples-dsp/examples-mozzi/control_gain-a2dp/control_gain-a2dp.ino b/examples/examples-dsp/examples-mozzi/control_gain-a2dp/control_gain-a2dp.ino index 3356bde6f0..25361f1461 100644 --- a/examples/examples-dsp/examples-mozzi/control_gain-a2dp/control_gain-a2dp.ino +++ b/examples/examples-dsp/examples-mozzi/control_gain-a2dp/control_gain-a2dp.ino @@ -5,7 +5,7 @@ * A2DP base API with a callback */ #include "AudioTools.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" #include "AudioTools/AudioLibs/MozziStream.h" #include // oscillator template #include // sine table for oscillator diff --git a/examples/examples-tts/streams-simple_tts-a2dp/streams-simple_tts-a2dp.ino b/examples/examples-tts/streams-simple_tts-a2dp/streams-simple_tts-a2dp.ino index 9869597cf9..261ab9ea78 100644 --- a/examples/examples-tts/streams-simple_tts-a2dp/streams-simple_tts-a2dp.ino +++ b/examples/examples-tts/streams-simple_tts-a2dp/streams-simple_tts-a2dp.ino @@ -8,7 +8,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" #include "SimpleTTS.h" const char* name = "LEXON MINO L"; // Replace with your device name diff --git a/examples/examples-tts/streams-talkie-a2dp/streams-talkie-a2dp.ino b/examples/examples-tts/streams-talkie-a2dp/streams-talkie-a2dp.ino index be4bcaf8d9..74e7dd9e57 100644 --- a/examples/examples-tts/streams-talkie-a2dp/streams-talkie-a2dp.ino +++ b/examples/examples-tts/streams-talkie-a2dp/streams-talkie-a2dp.ino @@ -7,7 +7,7 @@ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" -#include "AudioTools/AudioLibs/A2DPStream.h" +#include "AudioTools/Communication/A2DPStream.h" #include "TalkiePCM.h" // https://github.com/pschatzmann/TalkiePCM #include "Vocab_US_Large.h" diff --git a/src/AudioTools/AudioLibs/A2DPStream.h b/src/AudioTools/AudioLibs/A2DPStream.h index 7b0d7ec9c0..1d6eecac11 100644 --- a/src/AudioTools/AudioLibs/A2DPStream.h +++ b/src/AudioTools/AudioLibs/A2DPStream.h @@ -1,416 +1,3 @@ -/** - * @file A2DPStream.h - * @author Phil Schatzmann - * @brief A2DP Support via Arduino Streams - * @copyright GPLv3 - * - */ #pragma once - -#include "AudioToolsConfig.h" - -#include "AudioTools.h" -#include "BluetoothA2DPSink.h" -#include "BluetoothA2DPSource.h" -#include "AudioTools/CoreAudio/AudioStreams.h" -#include "AudioTools/Concurrency/RTOS/BufferRTOS.h" -#include "AudioTools/CoreAudio/AudioBasic/StrView.h" - - -namespace audio_tools { - -class A2DPStream; -static A2DPStream *A2DPStream_self=nullptr; -// buffer which is used to exchange data -static BufferRTOSa2dp_buffer{0, A2DP_BUFFER_SIZE, portMAX_DELAY, portMAX_DELAY}; -// flag to indicated that we are ready to process data -static bool is_a2dp_active = false; - -int32_t a2dp_stream_source_sound_data(Frame* data, int32_t len); -void a2dp_stream_sink_sound_data(const uint8_t* data, uint32_t len); - -/// A2DP Startup Logic -enum A2DPStartLogic {StartWhenBufferFull, StartOnConnect}; -/// A2DP Action when there is no data -enum A2DPNoData {A2DPSilence, A2DPWhoosh}; - -/** - * @brief Configuration for A2DPStream - * @author Phil Schatzmann - * @copyright GPLv3 - */ -class A2DPConfig { - public: - /// Logic when the processing is activated (default StartWhenBufferFull) - A2DPStartLogic startup_logic = StartWhenBufferFull; - /// Action when a2dp is not active yet (default A2DPSilence) - A2DPNoData startup_nodata = A2DPSilence; - /// Mode: TX_MODE or RX_MODE (default RX_MODE) - RxTxMode mode = RX_MODE; - /// A2DP name (default A2DP) - const char* name = "A2DP"; - /// automatically reconnect if connection is lost (default false) - bool auto_reconnect = false; - int buffer_size = A2DP_BUFFER_SIZE * A2DP_BUFFER_COUNT; - /// Delay in ms which is added to each write (default 1) - int delay_ms = 1; - /// when a2dp source is active but has no data we generate silence data (default false) - bool silence_on_nodata = false; - /// write timeout in ms: -1 is blocking write (default -1) - int tx_write_timeout_ms = -1; // no timeout - /// begin should wait for connection to be established (default true) - bool wait_for_connection=true; -}; - - -/** - * @brief Stream support for A2DP using https://github.com/pschatzmann/ESP32-A2DP: - * begin(TX_MODE) opens a a2dp_source and begin(RX_MODE) a a2dp_sink. - * The data is in int16_t with 2 channels at 44100 hertz. - * We support only one instance of the class! - * Please note that this is a conveniance class that supports the stream api, - * however this is rather inefficient, beause quite a big buffer needs to be allocated. - * It is recommended to use the API with the callbacks. Examples can be found in the a2dp - * examples directory starting with basic. - * - * Requires: https://github.com/pschatzmann/ESP32-A2DP - * - * @ingroup io - * @ingroup communications - * @author Phil Schatzmann - * @copyright GPLv3 - */ -class A2DPStream : public AudioStream, public VolumeSupport { - - public: - A2DPStream() { - TRACED(); - // A2DPStream can only be used once - assert(A2DPStream_self==nullptr); - A2DPStream_self = this; - info.bits_per_sample = 16; - info.sample_rate = 44100; - info.channels = 2; - } - - /// Release the allocate a2dp_source or a2dp_sink - ~A2DPStream(){ - TRACED(); - if (a2dp_source!=nullptr) delete a2dp_source; - if (a2dp_sink!=nullptr) delete a2dp_sink; - A2DPStream_self = nullptr; - } - - A2DPConfig defaultConfig(RxTxMode mode=RX_MODE){ - A2DPConfig cfg; - cfg.mode = mode; - if(mode==TX_MODE){ - cfg.name="[Unknown]"; - } - return cfg; - } - - /// provides access to the BluetoothA2DPSource - BluetoothA2DPSource &source() { - if (a2dp_source==nullptr){ - a2dp = a2dp_source = new BluetoothA2DPSource(); - } - return *a2dp_source; - } - - /// provides access to the BluetoothA2DPSink - BluetoothA2DPSink &sink(){ - if (a2dp_sink==nullptr){ - a2dp = a2dp_sink = new BluetoothA2DPSink(); - } - return *a2dp_sink; - } - - /// Starts the processing - bool begin(RxTxMode mode, const char* name, bool wait_for_connection=true){ - A2DPConfig cfg; - cfg.mode = mode; - cfg.name = name; - cfg.wait_for_connection = wait_for_connection; - return begin(cfg); - } - - /// Starts the processing - bool begin(A2DPConfig cfg){ - this->config = cfg; - bool result = false; - LOGI("Connecting to %s",cfg.name); - - if (!a2dp_buffer.resize(cfg.buffer_size)){ - LOGE("a2dp_buffer resize failed"); - return false; - } - - // initialize a2dp_silence_timeout - if (config.silence_on_nodata){ - LOGI("Using StartOnConnect") - config.startup_logic = StartOnConnect; - } - - switch (cfg.mode){ - case TX_MODE: - LOGI("Starting a2dp_source..."); - source(); // allocate object - a2dp_source->set_auto_reconnect(cfg.auto_reconnect); - a2dp_source->set_volume(volume() * A2DP_MAX_VOL); - if(StrView(cfg.name).equals("[Unknown]")){ - //search next available device - a2dp_source->set_ssid_callback(detected_device); - } - a2dp_source->set_on_connection_state_changed(a2dp_state_callback, this); - a2dp_source->start_raw((char*)cfg.name, a2dp_stream_source_sound_data); - if (cfg.wait_for_connection){ - while(!a2dp_source->is_connected()){ - LOGD("waiting for connection"); - delay(1000); - } - LOGI("a2dp_source is connected..."); - notify_base_Info(44100); - //is_a2dp_active = true; - } - else{ - LOGI("a2dp_source started without connecting"); - } - result = true; - break; - - case RX_MODE: - LOGI("Starting a2dp_sink..."); - sink(); // allocate object - a2dp_sink->set_auto_reconnect(cfg.auto_reconnect); - a2dp_sink->set_stream_reader(&a2dp_stream_sink_sound_data, false); - a2dp_sink->set_volume(volume() * A2DP_MAX_VOL); - a2dp_sink->set_on_connection_state_changed(a2dp_state_callback, this); - a2dp_sink->set_sample_rate_callback(sample_rate_callback); - a2dp_sink->start((char*)cfg.name); - if (cfg.wait_for_connection){ - while(!a2dp_sink->is_connected()){ - LOGD("waiting for connection"); - delay(1000); - } - LOGI("a2dp_sink is connected..."); - } - else{ - LOGI("a2dp_sink started without connection"); - } - is_a2dp_active = true; - result = true; - break; - default: - LOGE("Undefined mode: %d", cfg.mode); - break; - } - - return result; - } - - void end() override { - if (a2dp != nullptr) { - a2dp->disconnect(); - } - AudioStream::end(); - } - - /// checks if we are connected - bool isConnected() { - if (a2dp_source==nullptr && a2dp_sink==nullptr) return false; - if (a2dp_source!=nullptr) return a2dp_source->is_connected(); - return a2dp_sink->is_connected(); - } - - /// is ready to process data - bool isReady() { - return is_a2dp_active; - } - - /// convert to bool - operator bool() { - return isReady(); - } - - /// Writes the data into a temporary send buffer - where it can be picked up by the callback - size_t write(const uint8_t* data, size_t len) override { - LOGD("%s: %zu", LOG_METHOD, len); - if (config.mode == TX_MODE){ - // at 80% we activate the processing - if(!is_a2dp_active - && config.startup_logic == StartWhenBufferFull - && a2dp_buffer.available() >= 0.8f * a2dp_buffer.size()){ - LOGI("set active"); - is_a2dp_active = true; - } - - // blocking write: if buffer is full we wait - int timeout = config.tx_write_timeout_ms; - int wait_time = 5; - size_t free = a2dp_buffer.availableForWrite(); - while(len > free){ - LOGD("Waiting for buffer: writing %d > available %d", (int) len, (int) free); - if (timeout > 0) { - timeout -= wait_time; - if (timeout <= 0) return 0; - } - delay(wait_time); - free = a2dp_buffer.availableForWrite(); - } - } - - // write to buffer - size_t result = a2dp_buffer.writeArray(data, len); - LOGD("write %d -> %d", len, result); - if (config.mode == TX_MODE){ - // give the callback a chance to retrieve the data - delay(config.delay_ms); - } - return result; - } - - /// Reads the data from the temporary buffer - size_t readBytes(uint8_t *data, size_t len) override { - if (!is_a2dp_active){ - LOGW( "readBytes failed because !is_a2dp_active"); - return 0; - } - LOGD("readBytes %d", len); - size_t result = a2dp_buffer.readArray(data, len); - LOGI("readBytes %d->%d", len,result); - return result; - } - - /// Provides the number of bytes available to read - int available() override { - // only supported in tx mode - if (config.mode!=RX_MODE) return 0; - return a2dp_buffer.available(); - } - - /// Provides the number of bytes available to write - int availableForWrite() override { - // only supported in tx mode - if (config.mode!=TX_MODE ) return 0; - // return infor from buffer - return a2dp_buffer.availableForWrite(); - } - - /// Define the volume (values between 0.0 and 1.0) - bool setVolume(float volume) override { - VolumeSupport::setVolume(volume); - // 128 is max volume - if (a2dp!=nullptr) a2dp->set_volume(volume * A2DP_MAX_VOL); - return true; - } - - /// Provides access to the buffer - BaseBuffer &buffer() { - return a2dp_buffer; - } - - /// Manage config.silence_on_nodata dynamically. - void setSilenceOnNoData(bool silence){ - config.silence_on_nodata = silence; - } - - /// Clears the buffer - void clear(){ - // set inactive if necessary - if (config.startup_logic == StartWhenBufferFull){ - is_a2dp_active = false; - } - a2dp_buffer.clear(); - } - - protected: - A2DPConfig config; - BluetoothA2DPSource *a2dp_source = nullptr; - BluetoothA2DPSink *a2dp_sink = nullptr; - BluetoothA2DPCommon *a2dp=nullptr; - const int A2DP_MAX_VOL = 128; - - // auto-detect device to send audio to (TX-Mode) - static bool detected_device(const char* ssid, esp_bd_addr_t address, int rssi){ - LOGW("found Device: %s rssi: %d", ssid, rssi); - //filter out weak signals - return (rssi > -75); - } - - static void a2dp_state_callback(esp_a2d_connection_state_t state, void *caller){ - TRACED(); - A2DPStream *self = (A2DPStream*)caller; - if (state==ESP_A2D_CONNECTION_STATE_CONNECTED && self->config.startup_logic==StartOnConnect){ - is_a2dp_active = true; - } - LOGW("==> state: %s", self->a2dp->to_str(state)); - } - - - // callback used by A2DP to provide the a2dp_source sound data - static int32_t a2dp_stream_source_sound_data(uint8_t* data, int32_t len) { - int32_t result_len = 0; - A2DPConfig config = A2DPStream_self->config; - - // at first call we start with some empty data - if (is_a2dp_active){ - // the data in the file must be in int16 with 2 channels - yield(); - result_len = a2dp_buffer.readArray((uint8_t*)data, len); - - // provide silence data - if (config.silence_on_nodata && result_len == 0){ - memset(data,0, len); - result_len = len; - } - } else { - - // prevent underflow on first call - switch (config.startup_nodata) { - case A2DPSilence: - memset(data, 0, len); - break; - case A2DPWhoosh: - int16_t *data16 = (int16_t*)data; - for (int j=0;j %d", len, result_len); - return result_len; - } - - /// callback used by A2DP to write the sound data - static void a2dp_stream_sink_sound_data(const uint8_t* data, uint32_t len) { - if (is_a2dp_active){ - uint32_t result_len = a2dp_buffer.writeArray(data, len); - LOGD("a2dp_stream_sink_sound_data %d -> %d", len, result_len); - } - } - - /// notify subscriber with AudioInfo - void notify_base_Info(int rate){ - AudioInfo info; - info.channels = 2; - info.bits_per_sample = 16; - info.sample_rate = rate; - notifyAudioChange(info); - } - - /// callback to update audio info with used a2dp sample rate - static void sample_rate_callback(uint16_t rate) { - A2DPStream_self->info.sample_rate = rate; - A2DPStream_self->notify_base_Info(rate); - } - -}; - -} +#warning("obsolete: use AudioTools/Communication/A2DPStream.h") +#include "AudioTools/Communication/A2DPStream.h" \ No newline at end of file diff --git a/src/AudioTools/Communication/A2DPStream.h b/src/AudioTools/Communication/A2DPStream.h new file mode 100644 index 0000000000..7b0d7ec9c0 --- /dev/null +++ b/src/AudioTools/Communication/A2DPStream.h @@ -0,0 +1,416 @@ +/** + * @file A2DPStream.h + * @author Phil Schatzmann + * @brief A2DP Support via Arduino Streams + * @copyright GPLv3 + * + */ +#pragma once + +#include "AudioToolsConfig.h" + +#include "AudioTools.h" +#include "BluetoothA2DPSink.h" +#include "BluetoothA2DPSource.h" +#include "AudioTools/CoreAudio/AudioStreams.h" +#include "AudioTools/Concurrency/RTOS/BufferRTOS.h" +#include "AudioTools/CoreAudio/AudioBasic/StrView.h" + + +namespace audio_tools { + +class A2DPStream; +static A2DPStream *A2DPStream_self=nullptr; +// buffer which is used to exchange data +static BufferRTOSa2dp_buffer{0, A2DP_BUFFER_SIZE, portMAX_DELAY, portMAX_DELAY}; +// flag to indicated that we are ready to process data +static bool is_a2dp_active = false; + +int32_t a2dp_stream_source_sound_data(Frame* data, int32_t len); +void a2dp_stream_sink_sound_data(const uint8_t* data, uint32_t len); + +/// A2DP Startup Logic +enum A2DPStartLogic {StartWhenBufferFull, StartOnConnect}; +/// A2DP Action when there is no data +enum A2DPNoData {A2DPSilence, A2DPWhoosh}; + +/** + * @brief Configuration for A2DPStream + * @author Phil Schatzmann + * @copyright GPLv3 + */ +class A2DPConfig { + public: + /// Logic when the processing is activated (default StartWhenBufferFull) + A2DPStartLogic startup_logic = StartWhenBufferFull; + /// Action when a2dp is not active yet (default A2DPSilence) + A2DPNoData startup_nodata = A2DPSilence; + /// Mode: TX_MODE or RX_MODE (default RX_MODE) + RxTxMode mode = RX_MODE; + /// A2DP name (default A2DP) + const char* name = "A2DP"; + /// automatically reconnect if connection is lost (default false) + bool auto_reconnect = false; + int buffer_size = A2DP_BUFFER_SIZE * A2DP_BUFFER_COUNT; + /// Delay in ms which is added to each write (default 1) + int delay_ms = 1; + /// when a2dp source is active but has no data we generate silence data (default false) + bool silence_on_nodata = false; + /// write timeout in ms: -1 is blocking write (default -1) + int tx_write_timeout_ms = -1; // no timeout + /// begin should wait for connection to be established (default true) + bool wait_for_connection=true; +}; + + +/** + * @brief Stream support for A2DP using https://github.com/pschatzmann/ESP32-A2DP: + * begin(TX_MODE) opens a a2dp_source and begin(RX_MODE) a a2dp_sink. + * The data is in int16_t with 2 channels at 44100 hertz. + * We support only one instance of the class! + * Please note that this is a conveniance class that supports the stream api, + * however this is rather inefficient, beause quite a big buffer needs to be allocated. + * It is recommended to use the API with the callbacks. Examples can be found in the a2dp + * examples directory starting with basic. + * + * Requires: https://github.com/pschatzmann/ESP32-A2DP + * + * @ingroup io + * @ingroup communications + * @author Phil Schatzmann + * @copyright GPLv3 + */ +class A2DPStream : public AudioStream, public VolumeSupport { + + public: + A2DPStream() { + TRACED(); + // A2DPStream can only be used once + assert(A2DPStream_self==nullptr); + A2DPStream_self = this; + info.bits_per_sample = 16; + info.sample_rate = 44100; + info.channels = 2; + } + + /// Release the allocate a2dp_source or a2dp_sink + ~A2DPStream(){ + TRACED(); + if (a2dp_source!=nullptr) delete a2dp_source; + if (a2dp_sink!=nullptr) delete a2dp_sink; + A2DPStream_self = nullptr; + } + + A2DPConfig defaultConfig(RxTxMode mode=RX_MODE){ + A2DPConfig cfg; + cfg.mode = mode; + if(mode==TX_MODE){ + cfg.name="[Unknown]"; + } + return cfg; + } + + /// provides access to the BluetoothA2DPSource + BluetoothA2DPSource &source() { + if (a2dp_source==nullptr){ + a2dp = a2dp_source = new BluetoothA2DPSource(); + } + return *a2dp_source; + } + + /// provides access to the BluetoothA2DPSink + BluetoothA2DPSink &sink(){ + if (a2dp_sink==nullptr){ + a2dp = a2dp_sink = new BluetoothA2DPSink(); + } + return *a2dp_sink; + } + + /// Starts the processing + bool begin(RxTxMode mode, const char* name, bool wait_for_connection=true){ + A2DPConfig cfg; + cfg.mode = mode; + cfg.name = name; + cfg.wait_for_connection = wait_for_connection; + return begin(cfg); + } + + /// Starts the processing + bool begin(A2DPConfig cfg){ + this->config = cfg; + bool result = false; + LOGI("Connecting to %s",cfg.name); + + if (!a2dp_buffer.resize(cfg.buffer_size)){ + LOGE("a2dp_buffer resize failed"); + return false; + } + + // initialize a2dp_silence_timeout + if (config.silence_on_nodata){ + LOGI("Using StartOnConnect") + config.startup_logic = StartOnConnect; + } + + switch (cfg.mode){ + case TX_MODE: + LOGI("Starting a2dp_source..."); + source(); // allocate object + a2dp_source->set_auto_reconnect(cfg.auto_reconnect); + a2dp_source->set_volume(volume() * A2DP_MAX_VOL); + if(StrView(cfg.name).equals("[Unknown]")){ + //search next available device + a2dp_source->set_ssid_callback(detected_device); + } + a2dp_source->set_on_connection_state_changed(a2dp_state_callback, this); + a2dp_source->start_raw((char*)cfg.name, a2dp_stream_source_sound_data); + if (cfg.wait_for_connection){ + while(!a2dp_source->is_connected()){ + LOGD("waiting for connection"); + delay(1000); + } + LOGI("a2dp_source is connected..."); + notify_base_Info(44100); + //is_a2dp_active = true; + } + else{ + LOGI("a2dp_source started without connecting"); + } + result = true; + break; + + case RX_MODE: + LOGI("Starting a2dp_sink..."); + sink(); // allocate object + a2dp_sink->set_auto_reconnect(cfg.auto_reconnect); + a2dp_sink->set_stream_reader(&a2dp_stream_sink_sound_data, false); + a2dp_sink->set_volume(volume() * A2DP_MAX_VOL); + a2dp_sink->set_on_connection_state_changed(a2dp_state_callback, this); + a2dp_sink->set_sample_rate_callback(sample_rate_callback); + a2dp_sink->start((char*)cfg.name); + if (cfg.wait_for_connection){ + while(!a2dp_sink->is_connected()){ + LOGD("waiting for connection"); + delay(1000); + } + LOGI("a2dp_sink is connected..."); + } + else{ + LOGI("a2dp_sink started without connection"); + } + is_a2dp_active = true; + result = true; + break; + default: + LOGE("Undefined mode: %d", cfg.mode); + break; + } + + return result; + } + + void end() override { + if (a2dp != nullptr) { + a2dp->disconnect(); + } + AudioStream::end(); + } + + /// checks if we are connected + bool isConnected() { + if (a2dp_source==nullptr && a2dp_sink==nullptr) return false; + if (a2dp_source!=nullptr) return a2dp_source->is_connected(); + return a2dp_sink->is_connected(); + } + + /// is ready to process data + bool isReady() { + return is_a2dp_active; + } + + /// convert to bool + operator bool() { + return isReady(); + } + + /// Writes the data into a temporary send buffer - where it can be picked up by the callback + size_t write(const uint8_t* data, size_t len) override { + LOGD("%s: %zu", LOG_METHOD, len); + if (config.mode == TX_MODE){ + // at 80% we activate the processing + if(!is_a2dp_active + && config.startup_logic == StartWhenBufferFull + && a2dp_buffer.available() >= 0.8f * a2dp_buffer.size()){ + LOGI("set active"); + is_a2dp_active = true; + } + + // blocking write: if buffer is full we wait + int timeout = config.tx_write_timeout_ms; + int wait_time = 5; + size_t free = a2dp_buffer.availableForWrite(); + while(len > free){ + LOGD("Waiting for buffer: writing %d > available %d", (int) len, (int) free); + if (timeout > 0) { + timeout -= wait_time; + if (timeout <= 0) return 0; + } + delay(wait_time); + free = a2dp_buffer.availableForWrite(); + } + } + + // write to buffer + size_t result = a2dp_buffer.writeArray(data, len); + LOGD("write %d -> %d", len, result); + if (config.mode == TX_MODE){ + // give the callback a chance to retrieve the data + delay(config.delay_ms); + } + return result; + } + + /// Reads the data from the temporary buffer + size_t readBytes(uint8_t *data, size_t len) override { + if (!is_a2dp_active){ + LOGW( "readBytes failed because !is_a2dp_active"); + return 0; + } + LOGD("readBytes %d", len); + size_t result = a2dp_buffer.readArray(data, len); + LOGI("readBytes %d->%d", len,result); + return result; + } + + /// Provides the number of bytes available to read + int available() override { + // only supported in tx mode + if (config.mode!=RX_MODE) return 0; + return a2dp_buffer.available(); + } + + /// Provides the number of bytes available to write + int availableForWrite() override { + // only supported in tx mode + if (config.mode!=TX_MODE ) return 0; + // return infor from buffer + return a2dp_buffer.availableForWrite(); + } + + /// Define the volume (values between 0.0 and 1.0) + bool setVolume(float volume) override { + VolumeSupport::setVolume(volume); + // 128 is max volume + if (a2dp!=nullptr) a2dp->set_volume(volume * A2DP_MAX_VOL); + return true; + } + + /// Provides access to the buffer + BaseBuffer &buffer() { + return a2dp_buffer; + } + + /// Manage config.silence_on_nodata dynamically. + void setSilenceOnNoData(bool silence){ + config.silence_on_nodata = silence; + } + + /// Clears the buffer + void clear(){ + // set inactive if necessary + if (config.startup_logic == StartWhenBufferFull){ + is_a2dp_active = false; + } + a2dp_buffer.clear(); + } + + protected: + A2DPConfig config; + BluetoothA2DPSource *a2dp_source = nullptr; + BluetoothA2DPSink *a2dp_sink = nullptr; + BluetoothA2DPCommon *a2dp=nullptr; + const int A2DP_MAX_VOL = 128; + + // auto-detect device to send audio to (TX-Mode) + static bool detected_device(const char* ssid, esp_bd_addr_t address, int rssi){ + LOGW("found Device: %s rssi: %d", ssid, rssi); + //filter out weak signals + return (rssi > -75); + } + + static void a2dp_state_callback(esp_a2d_connection_state_t state, void *caller){ + TRACED(); + A2DPStream *self = (A2DPStream*)caller; + if (state==ESP_A2D_CONNECTION_STATE_CONNECTED && self->config.startup_logic==StartOnConnect){ + is_a2dp_active = true; + } + LOGW("==> state: %s", self->a2dp->to_str(state)); + } + + + // callback used by A2DP to provide the a2dp_source sound data + static int32_t a2dp_stream_source_sound_data(uint8_t* data, int32_t len) { + int32_t result_len = 0; + A2DPConfig config = A2DPStream_self->config; + + // at first call we start with some empty data + if (is_a2dp_active){ + // the data in the file must be in int16 with 2 channels + yield(); + result_len = a2dp_buffer.readArray((uint8_t*)data, len); + + // provide silence data + if (config.silence_on_nodata && result_len == 0){ + memset(data,0, len); + result_len = len; + } + } else { + + // prevent underflow on first call + switch (config.startup_nodata) { + case A2DPSilence: + memset(data, 0, len); + break; + case A2DPWhoosh: + int16_t *data16 = (int16_t*)data; + for (int j=0;j %d", len, result_len); + return result_len; + } + + /// callback used by A2DP to write the sound data + static void a2dp_stream_sink_sound_data(const uint8_t* data, uint32_t len) { + if (is_a2dp_active){ + uint32_t result_len = a2dp_buffer.writeArray(data, len); + LOGD("a2dp_stream_sink_sound_data %d -> %d", len, result_len); + } + } + + /// notify subscriber with AudioInfo + void notify_base_Info(int rate){ + AudioInfo info; + info.channels = 2; + info.bits_per_sample = 16; + info.sample_rate = rate; + notifyAudioChange(info); + } + + /// callback to update audio info with used a2dp sample rate + static void sample_rate_callback(uint16_t rate) { + A2DPStream_self->info.sample_rate = rate; + A2DPStream_self->notify_base_Info(rate); + } + +}; + +} From c2d9d141ed716024925c8dba70183fd9632061dc Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 12:58:40 +0200 Subject: [PATCH 07/15] Delete AudioLibs/All.h --- src/AudioTools/AudioLibs/All.h | 43 ---------------------------------- 1 file changed, 43 deletions(-) delete mode 100644 src/AudioTools/AudioLibs/All.h diff --git a/src/AudioTools/AudioLibs/All.h b/src/AudioTools/AudioLibs/All.h deleted file mode 100644 index 5b40c0015e..0000000000 --- a/src/AudioTools/AudioLibs/All.h +++ /dev/null @@ -1,43 +0,0 @@ -#pragma once -// include to identify future compile errors: if you don't have all dependencies -// installed this will generate a lot of compile errors! -#include "MozziStream.h" -#include "A2DPStream.h" -#include "AudioBoardStream.h" -#include "AudioClientRTSP.h" -#include "AudioEffectsSuite.h" -#include "AudioFFT.h" -#include "AudioSTK.h" -#include "Concurrency.h" -#include "FFTEffects.h" -#include "HLSStream.h" -#include "I2SCodecStream.h" -#include "LEDOutput.h" -#include "MaximilianDSP.h" -#include "MemoryManager.h" -#include "PIDController.h" -#include "R2ROutput.h" -#include "SPDIFOutput.h" -#include "StdioStream.h" -#include "VBANStream.h" -#include "VS1053Stream.h" -// #include "TfLiteAudioStream.h" // takes too much time -// #include "AudioServerEx.h" -// #include "WM8960Stream.h" // driver part of AudioBoardStream -// #include "AudioFaust.h" -// #include "RTSP.h" // conflit with AudioClientRTSP -// #include "AudioESP32ULP.h" // using obsolete functioinality -// #include "PureDataStream.h" -// #include "Jupyter.h" // only for desktop -// #include "PortAudioStream.h" // only for desktop -// #include "MiniAudioStream.h" // only for desktop -// #include "AudioKissFFT.h" // select on fft implementation -// #include "AudioCmsisFFT.h" // select on fft implementation -// #include "AudioRealFFT.h" // select on fft implementation -// #include "AudioESP32FFT.h" // select on fft implementation -// #include "AudioEspressifFFT.h" // select on fft implementation -// #include "FFTDisplay.h" -// #include "LEDOutputUnoR4.h" // only for uno r4 -// #include "AudioMP34DT05.h" // only for nano ble sense -// #include "AudioESP8266.h" -// #include "AudioKit.h" // obsolete From e01d7bb0f1637633fcd3d88cd8a81b890c549cf5 Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 13:17:17 +0200 Subject: [PATCH 08/15] USE_RTSP_LOGIN --- .../communication-audiokit-rtsp.ino | 1 + .../communication-codec-rtsp.ino | 1 + .../communication-generator-rtsp.ino | 1 + .../communication-player_mp3-rtsp.ino | 3 ++- .../communication-player_mp3-rtsp_adcpm.ino | 1 + src/AudioTools/Communication/RTSP/RTSPServer.h | 11 ++++++----- 6 files changed, 12 insertions(+), 6 deletions(-) diff --git a/examples/examples-communication/rtsp/communication-audiokit-rtsp/communication-audiokit-rtsp.ino b/examples/examples-communication/rtsp/communication-audiokit-rtsp/communication-audiokit-rtsp.ino index a676dd3e2d..c469104e8a 100644 --- a/examples/examples-communication/rtsp/communication-audiokit-rtsp/communication-audiokit-rtsp.ino +++ b/examples/examples-communication/rtsp/communication-audiokit-rtsp/communication-audiokit-rtsp.ino @@ -11,6 +11,7 @@ #include "AudioTools.h" #include "AudioTools/AudioLibs/AudioBoardStream.h" +#define USE_RTSP_LOGIN // activate RTSP login support #include "AudioTools/Communication/RTSP.h" int port = 554; diff --git a/examples/examples-communication/rtsp/communication-codec-rtsp/communication-codec-rtsp.ino b/examples/examples-communication/rtsp/communication-codec-rtsp/communication-codec-rtsp.ino index 8f190c81e9..051f1efd56 100644 --- a/examples/examples-communication/rtsp/communication-codec-rtsp/communication-codec-rtsp.ino +++ b/examples/examples-communication/rtsp/communication-codec-rtsp/communication-codec-rtsp.ino @@ -10,6 +10,7 @@ */ #include "AudioTools.h" #include "AudioTools/AudioCodecs/CodecG7xx.h" // https://github.com/pschatzmann/arduino-libg7xx.git +#define USE_RTSP_LOGIN // activate RTSP login support #include "AudioTools/Communication/RTSP.h" int port = 554; diff --git a/examples/examples-communication/rtsp/communication-generator-rtsp/communication-generator-rtsp.ino b/examples/examples-communication/rtsp/communication-generator-rtsp/communication-generator-rtsp.ino index 5b9bf66349..0872b768dd 100644 --- a/examples/examples-communication/rtsp/communication-generator-rtsp/communication-generator-rtsp.ino +++ b/examples/examples-communication/rtsp/communication-generator-rtsp/communication-generator-rtsp.ino @@ -10,6 +10,7 @@ */ #include "AudioTools.h" +#define USE_RTSP_LOGIN // activate RTSP login support #include "AudioTools/Communication/RTSP.h" int port = 554; diff --git a/examples/examples-communication/rtsp/communication-player_mp3-rtsp/communication-player_mp3-rtsp.ino b/examples/examples-communication/rtsp/communication-player_mp3-rtsp/communication-player_mp3-rtsp.ino index 5c8af4e5a8..c07228e3b1 100644 --- a/examples/examples-communication/rtsp/communication-player_mp3-rtsp/communication-player_mp3-rtsp.ino +++ b/examples/examples-communication/rtsp/communication-player_mp3-rtsp/communication-player_mp3-rtsp.ino @@ -3,8 +3,9 @@ #include "AudioTools.h" #include "AudioTools/Disk/AudioSourceSDMMC.h" -#include "AudioTools/Communication/RTSP.h" #include "AudioTools/AudioCodecs/MP3Parser.h" +#define USE_RTSP_LOGIN // activate RTSP login support +#include "AudioTools/Communication/RTSP.h" int port = 554; const char* wifi = "SSID"; diff --git a/examples/examples-communication/rtsp/communication-player_mp3-rtsp_adcpm/communication-player_mp3-rtsp_adcpm.ino b/examples/examples-communication/rtsp/communication-player_mp3-rtsp_adcpm/communication-player_mp3-rtsp_adcpm.ino index abdeba93cb..75f82eb9fa 100644 --- a/examples/examples-communication/rtsp/communication-player_mp3-rtsp_adcpm/communication-player_mp3-rtsp_adcpm.ino +++ b/examples/examples-communication/rtsp/communication-player_mp3-rtsp_adcpm/communication-player_mp3-rtsp_adcpm.ino @@ -5,6 +5,7 @@ #include "AudioTools/Disk/AudioSourceSDMMC.h" #include "AudioTools/AudioCodecs/CodecMP3Helix.h" #include "AudioTools/AudioCodecs/CodecADPCM.h" +#define USE_RTSP_LOGIN // activate RTSP login support #include "AudioTools/Communication/RTSP.h" int port = 554; diff --git a/src/AudioTools/Communication/RTSP/RTSPServer.h b/src/AudioTools/Communication/RTSP/RTSPServer.h index b77372e39b..0a1afa25ac 100644 --- a/src/AudioTools/Communication/RTSP/RTSPServer.h +++ b/src/AudioTools/Communication/RTSP/RTSPServer.h @@ -96,7 +96,9 @@ class RTSPServer { onSessionPathRef = ref; } - /** +#if defined(USE_RTSP_LOGIN) + +/** * @brief Initialize WiFi and start RTSP server * * Convenience method that connects to a WiFi network and then starts the RTSP @@ -112,16 +114,13 @@ class RTSPServer { * @see runAsync() */ bool begin(const char* ssid, const char* password) { -#ifdef ESP32 // Start Wifi WiFi.begin(ssid, password); while (WiFi.status() != WL_CONNECTED) { delay(500); Serial.print("."); } - - esp_wifi_set_ps(WIFI_PS_NONE); -#endif + WiFi.setSleep(WIFI_PS_NONE); Serial.println(); Serial.print("connect to rtsp://"); @@ -131,6 +130,8 @@ class RTSPServer { Serial.println(); return begin(); } + +#endif /** Start the RTSP server */ bool begin() { From d51989ac2bcd44504e567949b52141fff56f757d Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 13:34:01 +0200 Subject: [PATCH 09/15] USE_RTSP_LOGIN --- tests-cmake/rtsp/rtsp.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests-cmake/rtsp/rtsp.cpp b/tests-cmake/rtsp/rtsp.cpp index 5f8ac43dc4..19179f957f 100644 --- a/tests-cmake/rtsp/rtsp.cpp +++ b/tests-cmake/rtsp/rtsp.cpp @@ -33,7 +33,7 @@ void setup() { rtsp_out.begin(); // Start Wifi & rtsp server - rtsp.begin(wifi, password); + rtsp.begin(); } From dd24c15a27b84361fd9e0af21c563f65102c132d Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 16:04:20 +0200 Subject: [PATCH 10/15] RTSPClient: error corrections --- .../Communication/RTSP/RTSPClient.h | 153 +++++++++++------- 1 file changed, 98 insertions(+), 55 deletions(-) diff --git a/src/AudioTools/Communication/RTSP/RTSPClient.h b/src/AudioTools/Communication/RTSP/RTSPClient.h index 3470495782..05fd3e99c8 100644 --- a/src/AudioTools/Communication/RTSP/RTSPClient.h +++ b/src/AudioTools/Communication/RTSP/RTSPClient.h @@ -158,12 +158,14 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { // OPTIONS LOGI("OPTIONS"); - if (!sendSimpleRequest("OPTIONS", m_baseUrl, nullptr, 0, m_hdrBuf, - sizeof(m_hdrBuf), nullptr, 0)) { - // Some servers expect "OPTIONS *" instead of URL - if (!sendSimpleRequest("OPTIONS", "*", nullptr, 0, m_hdrBuf, - sizeof(m_hdrBuf), nullptr, 0)) { + int retry = m_connectRetries; + while (!sendSimpleRequest("OPTIONS", m_baseUrl, nullptr, 0, m_hdrBuf, + sizeof(m_hdrBuf), nullptr, 0)) { + if (--retry == 0) { return fail("OPTIONS failed"); + } else { + LOGW("RTSPClient: retrying OPTIONS"); + delay(800); } } @@ -182,6 +184,8 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { // Parse a=control and build the correct track URL for SETUP parseControlFromSdp(m_bodyBuf); buildTrackUrlFromBaseAndControl(); + LOGI("RTSPClient: SDP control='%s' content-base='%s'", m_sdpControl, + m_contentBase); LOGI("RTSPClient: SETUP url: %s", m_trackUrl); // Prepare UDP (client_port) @@ -215,6 +219,7 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { primeUdpPath(); // PLAY + LOGI("PLAY"); char sessionHdr[128]; snprintf(sessionHdr, sizeof(sessionHdr), "Session: %s\r\n", m_sessionId); if (!sendSimpleRequest("PLAY", m_baseUrl, sessionHdr, strlen(sessionHdr), @@ -257,10 +262,8 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { } } - if (m_udp) { - m_udp->stop(); - delete m_udp; - m_udp = nullptr; + if (m_udp_active) { + m_udp.stop(); } if (m_tcp.connected()) m_tcp.stop(); m_started = false; @@ -282,7 +285,7 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { return 0; } serviceUdp(); - int avail = (int)(m_pktSize > m_pktPos ? (m_pktSize - m_pktPos) : 0); + int avail = m_pktBuf.available(); if (avail == 0) delay(m_idleDelayMs); return avail; } @@ -358,7 +361,7 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { if (ok) { m_isPlaying = false; // drop any buffered payload - m_pktPos = m_pktSize = 0; + m_pktBuf.clear(); } } return ok; @@ -391,7 +394,7 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { return 0; } serviceUdp(); - if (m_pktPos >= m_pktSize) { + if (m_pktBuf.isEmpty()) { LOGW("no data"); delay(m_idleDelayMs); return 0; @@ -412,10 +415,10 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { } } - size_t n = m_pktSize - m_pktPos; - size_t written = m_multi_decoder.write(m_pktBuf.data() + m_pktPos, n); - m_pktPos = m_pktSize = 0; + int n = m_pktBuf.available(); + size_t written = m_multi_decoder.write(m_pktBuf.data(), n); LOGI("copy: %d -> %d", (int)n, (int)written); + m_pktBuf.clearArray(written); return written; } @@ -445,7 +448,8 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { protected: // Connection TcpClient m_tcp; - UdpSocket* m_udp = nullptr; + UdpSocket m_udp; + bool m_udp_active = false; IPAddress m_addr{}; uint16_t m_port = 0; @@ -464,9 +468,8 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { const uint32_t m_keepaliveIntervalMs = 25000; // 25s // Buffers - audio_tools::Vector m_pktBuf{0}; - size_t m_pktPos = 0; - size_t m_pktSize = 0; + SingleBuffer m_pktBuf{0}; + SingleBuffer m_tcpCmd{0}; char m_hdrBuf[1024]; char m_bodyBuf[1024]; @@ -497,9 +500,9 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { m_clientRtpPort = 0; m_cseq = 1; m_pktBuf.resize(2048); - m_pktPos = m_pktSize = 0; + m_pktBuf.clear(); m_decoderReady = false; - // keep default decoders registered once per instance + m_udp_active = false; } void buildUrls(const char* path) { @@ -523,19 +526,15 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { snprintf(m_trackUrl, sizeof(m_trackUrl), "%strackID=0", m_baseUrl); } - // Always routed: MultiDecoder -> Resampler -> User sink - bool openUdpPorts() { // Try a few even RTP ports starting at 5004 for (uint16_t p = 5004; p < 65000; p += 2) { - UdpSocket* s = new UdpSocket(); - if (s->begin(p)) { + if (m_udp.begin(p)) { LOGI("RTSPClient: bound UDP RTP port %u", (unsigned)p); - m_udp = s; m_clientRtpPort = p; + m_udp_active = true; return true; } - delete s; } return false; } @@ -562,54 +561,78 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { } } + // Compute the RTP payload offset inside a UDP packet + // Considers fixed RTP header (12 bytes), CSRC count, and configured extra + // offset + size_t computeRtpPayloadOffset(const uint8_t* data, size_t length) { + if (length <= 12) return length; + size_t offset = 12; + uint8_t cc = data[0] & 0x0F; // CSRC count + offset += cc * 4; + // Apply any configured additional payload offset (e.g., RFC2250) + offset += m_payloadOffset; + return offset; + } + void serviceUdp() { // Keep RTSP session alive maybeKeepalive(); - if (!m_udp) return; - if (m_pktPos < m_pktSize) return; // still have data buffered + if (!m_udp_active) { + LOGE("no UDP"); + return; + } + if (m_pktBuf.available() > 0) { + LOGI("Still have unprocessed data"); + return; // still have data buffered + } - int packetSize = m_udp->parsePacket(); - if (packetSize <= 0) return; + // parse next UDP packet + int packetSize = m_udp.parsePacket(); + if (packetSize <= 0) { + LOGW("packet size: %d", packetSize); + return; + } + // Fill buffer if ((size_t)packetSize > m_pktBuf.size()) m_pktBuf.resize(packetSize); - int n = m_udp->read(m_pktBuf.data(), packetSize); - if (n <= 12) return; // too small to contain RTP - - // Very basic RTP parsing: skip 12-byte header - size_t payloadOffset = 12; - uint8_t cc = m_pktBuf[0] & 0x0F; - uint8_t payloadType = m_pktBuf[1] & 0x7F; - payloadOffset += cc * 4; // skip CSRCs if present - // Apply any configured additional payload offset (e.g., RFC2250) - payloadOffset += m_payloadOffset; - if (payloadOffset >= (size_t)n) return; + int n = m_udp.read(m_pktBuf.data(), packetSize); + m_pktBuf.setAvailable(n); + if (n <= 12) { + LOGE("packet too small: %d", n); + return; // too small to contain RTP + } + + // Very basic RTP parsing: compute payload offset + uint8_t* data = m_pktBuf.data(); + size_t payloadOffset = computeRtpPayloadOffset(data, (size_t)n); + if (payloadOffset >= (size_t)n) { + LOGW("no payload: %d", n); + } - m_pktPos = 0; - m_pktSize = n - payloadOffset; // move payload to beginning for contiguous read - memmove(m_pktBuf.data(), m_pktBuf.data() + payloadOffset, m_pktSize); + m_pktBuf.clearArray(payloadOffset); } void primeUdpPath() { - if (!m_udp) return; + if (!m_udp_active) return; if (m_serverRtpPort == 0) return; // Send a tiny datagram to server RTP port to open NAT/flows // Not required by RTSP, but improves interoperability for (int i = 0; i < 2; ++i) { - m_udp->beginPacket(m_addr, m_serverRtpPort); + m_udp.beginPacket(m_addr, m_serverRtpPort); uint8_t b = 0x00; - m_udp->write(&b, 1); - m_udp->endPacket(); + m_udp.write(&b, 1); + m_udp.endPacket(); delay(2); } } bool sniffUdpFor(uint32_t ms) { - if (!m_udp) return false; + if (!m_udp_active) return false; uint32_t start = millis(); while ((millis() - start) < ms) { - int packetSize = m_udp->parsePacket(); + int packetSize = m_udp.parsePacket(); if (packetSize > 0) { // restore to be processed by normal path return true; @@ -619,6 +642,19 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { return false; } + // Centralized TCP write helper + size_t tcpWrite(const uint8_t* data, size_t len) { + if (m_tcpCmd.size() < 400) m_tcpCmd.resize(400); + return m_tcpCmd.writeArray(data, len); + } + + bool tcpCommit() { + bool rc = m_tcp.write(m_tcpCmd.data(), m_tcpCmd.available()) == + m_tcpCmd.available(); + m_tcpCmd.clear(); + return rc; + } + bool sendSimpleRequest(const char* method, const char* url, const char* extraHeaders, size_t extraLen, char* outHeaders, size_t outHeadersLen, char* outBody, @@ -632,18 +668,25 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { if (reqLen <= 0) return false; // Send start line + mandatory headers - if (m_tcp.write((const uint8_t*)reqStart, reqLen) != (size_t)reqLen) + if (tcpWrite((const uint8_t*)reqStart, reqLen) != (size_t)reqLen) { return false; + } // Optional extra headers if (extraHeaders && extraLen) { - if (m_tcp.write((const uint8_t*)extraHeaders, extraLen) != extraLen) + if (tcpWrite((const uint8_t*)extraHeaders, extraLen) != extraLen) { return false; + } } // End of headers const char* end = "\r\n"; - if (m_tcp.write((const uint8_t*)end, 2) != 2) return false; + if (tcpWrite((const uint8_t*)end, 2) != 2) { + return false; + } - m_tcp.flush(); + if (!tcpCommit()) { + LOGE("TCP write failed"); + return false; + } // Read response headers until CRLFCRLF int hdrUsed = 0; From e30dd90f6c2920f781115be6769ac1313ad7f326 Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 16:47:21 +0200 Subject: [PATCH 11/15] RTSP corrections --- .../Communication/RTSP/RTSPAudioStreamer.h | 28 ++++++++++++++++++- .../Communication/RTSP/RTSPClient.h | 12 +++++--- .../Communication/RTSP/RTSPSession.h | 4 +++ tests-cmake/rtsp/rtsp.cpp | 2 -- 4 files changed, 39 insertions(+), 7 deletions(-) diff --git a/src/AudioTools/Communication/RTSP/RTSPAudioStreamer.h b/src/AudioTools/Communication/RTSP/RTSPAudioStreamer.h index 4833690d4d..49d7854115 100644 --- a/src/AudioTools/Communication/RTSP/RTSPAudioStreamer.h +++ b/src/AudioTools/Communication/RTSP/RTSPAudioStreamer.h @@ -207,9 +207,12 @@ class RTSPAudioStreamerBase { }; ++m_udpRefCount; - LOGD("RTP Streamer set up with client IP %s and client Port %i", + LOGI("RTP Streamer set up with client IP %s and client Port %i", m_ClientIP.toString().c_str(), m_ClientPort); + // If client IP is unknown (0.0.0.0), try to learn it from an inbound UDP packet + tryLearnClientFromUdp(true); + return true; } @@ -590,6 +593,7 @@ class RTSPAudioStreamerBase { inline void sendOut(uint16_t totalLen) { if (m_useTcpInterleaved && m_RtspTcpSocket != Platform::NULL_TCP_SOCKET) { + LOGD("Sending TCP: %d", totalLen); uint8_t hdr[4]; hdr[0] = 0x24; // '$' hdr[1] = (uint8_t)m_TcpRtpChannel; @@ -598,10 +602,32 @@ class RTSPAudioStreamerBase { Platform::sendSocket(m_RtspTcpSocket, hdr, sizeof(hdr)); Platform::sendSocket(m_RtspTcpSocket, mRtpBuf.data(), totalLen); } else { + // If client IP is still unknown, attempt to learn it just-in-time + tryLearnClientFromUdp(false); + LOGI("Sending UDP: %d bytes (to %s:%d)", totalLen, + m_ClientIP.toString().c_str(), m_ClientPort); Platform::sendUdpSocket(m_RtpSocket, mRtpBuf.data(), totalLen, m_ClientIP, m_ClientPort); } } + + inline void tryLearnClientFromUdp(bool warnIfNone) { + if (m_ClientIP == IPAddress(0, 0, 0, 0) && m_RtpSocket) { + int avail = m_RtpSocket->parsePacket(); + if (avail > 0) { + IPAddress learnedIp = m_RtpSocket->remoteIP(); + uint16_t learnedPort = m_RtpSocket->remotePort(); + if (learnedIp != IPAddress(0, 0, 0, 0)) { + m_ClientIP = learnedIp; + if (m_ClientPort == 0) m_ClientPort = learnedPort; + LOGI("RTP learned client via UDP: %s:%u", + m_ClientIP.toString().c_str(), (unsigned)m_ClientPort); + } + } else if (warnIfNone) { + LOGW("Client IP unknown (0.0.0.0) and no inbound UDP yet"); + } + } + } }; /** diff --git a/src/AudioTools/Communication/RTSP/RTSPClient.h b/src/AudioTools/Communication/RTSP/RTSPClient.h index 05fd3e99c8..ac257ffe8c 100644 --- a/src/AudioTools/Communication/RTSP/RTSPClient.h +++ b/src/AudioTools/Communication/RTSP/RTSPClient.h @@ -384,18 +384,22 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { size_t copy() { if (!m_started) { delay(m_idleDelayMs); - LOGW("not started"); + LOGD("not started"); return 0; } + maybeKeepalive(); + if (!m_isPlaying) { delay(m_idleDelayMs); - LOGW("not playing"); + LOGD("not playing"); return 0; } + serviceUdp(); + if (m_pktBuf.isEmpty()) { - LOGW("no data"); + LOGD("no data"); delay(m_idleDelayMs); return 0; } @@ -590,7 +594,7 @@ class RTSPClient : public AudioInfoSource, public AudioInfoSupport { // parse next UDP packet int packetSize = m_udp.parsePacket(); if (packetSize <= 0) { - LOGW("packet size: %d", packetSize); + LOGD("packet size: %d", packetSize); return; } diff --git a/src/AudioTools/Communication/RTSP/RTSPSession.h b/src/AudioTools/Communication/RTSP/RTSPSession.h index e0ea766c1b..1170b0021a 100644 --- a/src/AudioTools/Communication/RTSP/RTSPSession.h +++ b/src/AudioTools/Communication/RTSP/RTSPSession.h @@ -830,6 +830,10 @@ class RtspSession { uint16_t clientPort; Platform::getSocketPeerAddr(m_RtspClient, &clientIP, &clientPort); + LOGI("SETUP peer resolved: %s:%u (RTP client_port=%u)", + clientIP.toString().c_str(), (unsigned)clientPort, + (unsigned)m_RtpClientPort); + m_Streamer->initUdpTransport(clientIP, m_RtpClientPort); } diff --git a/tests-cmake/rtsp/rtsp.cpp b/tests-cmake/rtsp/rtsp.cpp index 19179f957f..38992a5ffc 100644 --- a/tests-cmake/rtsp/rtsp.cpp +++ b/tests-cmake/rtsp/rtsp.cpp @@ -5,8 +5,6 @@ #include "AudioTools/Communication/RTSP.h" int port = 8554; -const char* wifi = "SSID"; -const char* password = "password"; // rtsp RTSPFormatMP3 mp3format; // RTSP mp3 From 32564b28200f55f003334657d61e6f4bd26496b1 Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Tue, 23 Sep 2025 17:23:02 +0200 Subject: [PATCH 12/15] examples: correct build errors --- examples/build-examples-log.txt | 14 ++++++++++++-- examples/build-examples.sh | 6 ++++++ .../tests/etc/test-audiolibs/test-audiolibs.ino | 6 ------ src/AudioTools/AudioCodecs/CodecVorbis.h | 1 + src/AudioTools/Communication/HLSStream.h | 2 +- 5 files changed, 20 insertions(+), 9 deletions(-) delete mode 100644 examples/tests/etc/test-audiolibs/test-audiolibs.ino diff --git a/examples/build-examples-log.txt b/examples/build-examples-log.txt index 59ec0a0a2e..d08c2fa858 100644 --- a/examples/build-examples-log.txt +++ b/examples/build-examples-log.txt @@ -139,6 +139,16 @@ ../examples/examples-dsp/examples-stk/streams-stk_myinstrument-audiokit -> rc=0 ../examples/examples-dsp/examples-stk/streams-stk_sine-audiokit -> rc=0 ../examples/examples-dsp/examples-stk/streams-stk_synth-audiokit -> rc=0 +../examples/examples-dsp/examples-stk/README.md -> rc=1 +../examples/examples-dsp/examples-stk/streams-stk_allinstruments-audiokit -> rc=0 +../examples/examples-dsp/examples-stk/streams-stk-audiokit -> rc=0 +../examples/examples-dsp/examples-stk/streams-stk-desktop -> rc=1 +../examples/examples-dsp/examples-stk/streams-stk_files-audiokit -> rc=0 +../examples/examples-dsp/examples-stk/streams-stk_generator-audiokit -> rc=0 +../examples/examples-dsp/examples-stk/streams-stk_loop-audiokit -> rc=0 +../examples/examples-dsp/examples-stk/streams-stk_myinstrument-audiokit -> rc=0 +../examples/examples-dsp/examples-stk/streams-stk_sine-audiokit -> rc=0 +../examples/examples-dsp/examples-stk/streams-stk_synth-audiokit -> rc=0 ../examples/examples-dsp/examples-faust/streams-faust_flute-i2s -> rc=0 ../examples/examples-dsp/examples-faust/streams-faust_noise-i2s -> rc=0 ../examples/examples-dsp/examples-faust/streams-generator-faust-i2s -> rc=0 @@ -188,8 +198,9 @@ ../examples/examples-communication/rtsp/communication-generator-rtsp -> rc=0 ../examples/examples-communication/rtsp/communication-player_mp3-rtsp -> rc=0 ../examples/examples-communication/rtsp/communication-player_mp3-rtsp_adcpm -> rc=0 +../examples/examples-communication/rtsp/communication-rtsp555-audiokit -> rc=0 +../examples/examples-communication/rtsp/communication-rtsp555-i2s -> rc=0 ../examples/examples-communication/rtsp/communication-rtsp-audiokit -> rc=0 -../examples/examples-communication/rtsp/communication-rtsp-i2s -> rc=0 ../examples/examples-communication/serial/mp3 -> rc=1 ../examples/examples-communication/serial/mp3-custom -> rc=1 ../examples/examples-communication/serial/mp3-xon-xoff -> rc=1 @@ -313,7 +324,6 @@ ../examples/tests/effects/pitch-shift-180 -> rc=0 ../examples/tests/effects/pitch-shift-simple -> rc=0 ../examples/tests/etc/callback-write -> rc=0 -../examples/tests/etc/test-audiolibs -> rc=0 ../examples/tests/etc/test-mulit-compilation-units -> rc=0 ../examples/tests/etc/test-pins -> rc=0 ../examples/tests/etc/test-ringbufferfile -> rc=0 diff --git a/examples/build-examples.sh b/examples/build-examples.sh index 1806ff5af9..61ce538cdb 100755 --- a/examples/build-examples.sh +++ b/examples/build-examples.sh @@ -18,6 +18,12 @@ function compile_example { FILES=$2 for f in $FILES do + # Skip README.md files + if [[ $(basename "$f") == "README.md" ]]; then + echo "Skipping README.md file: $f" + continue + fi + echo "Processing $f ..." # take action on each file. $f store current file name #arduino-cli compile -b "$ARCH" "$f" diff --git a/examples/tests/etc/test-audiolibs/test-audiolibs.ino b/examples/tests/etc/test-audiolibs/test-audiolibs.ino deleted file mode 100644 index 514367ed24..0000000000 --- a/examples/tests/etc/test-audiolibs/test-audiolibs.ino +++ /dev/null @@ -1,6 +0,0 @@ -#include "AudioTools.h" -#include "AudioTools/AudioLibs/All.h" - -void setup(){} - -void loop(){} \ No newline at end of file diff --git a/src/AudioTools/AudioCodecs/CodecVorbis.h b/src/AudioTools/AudioCodecs/CodecVorbis.h index 9a4e05750f..9ce0657c71 100644 --- a/src/AudioTools/AudioCodecs/CodecVorbis.h +++ b/src/AudioTools/AudioCodecs/CodecVorbis.h @@ -1,6 +1,7 @@ #pragma once #include "AudioTools/AudioCodecs/AudioCodecsBase.h" #include "AudioToolsConfig.h" +#include "ogg.h" #include "vorbis-tremor.h" // #include "AudioTools/AudioCodecs/ContainerOgg.h" diff --git a/src/AudioTools/Communication/HLSStream.h b/src/AudioTools/Communication/HLSStream.h index 50aff75668..b7fbd5066d 100644 --- a/src/AudioTools/Communication/HLSStream.h +++ b/src/AudioTools/Communication/HLSStream.h @@ -1,7 +1,7 @@ #pragma once #include "AudioTools/AudioCodecs/AudioEncoded.h" #include "AudioTools/CoreAudio/AudioBasic/Str.h" -#include "AudioTools/CoreAudio/AudioHttp/URLStream.h" +#include "AudioTools/Communication/HTTP/URLStream.h" #include "AudioTools/CoreAudio/StreamCopy.h" #include "AudioToolsConfig.h" From a66f1b5b55d48365d7d6b23b243f99b3227514fe Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Wed, 24 Sep 2025 00:38:32 +0200 Subject: [PATCH 13/15] AudioPlayer: improve comments --- src/AudioTools/CoreAudio/AudioPlayer.h | 115 ++++++++++++++----------- 1 file changed, 64 insertions(+), 51 deletions(-) diff --git a/src/AudioTools/CoreAudio/AudioPlayer.h b/src/AudioTools/CoreAudio/AudioPlayer.h index 684967d190..51a2e3b365 100644 --- a/src/AudioTools/CoreAudio/AudioPlayer.h +++ b/src/AudioTools/CoreAudio/AudioPlayer.h @@ -23,13 +23,27 @@ namespace audio_tools { /** - * @brief Implements a simple audio player which supports the following - * commands: - * - begin - * - play - * - stop - * - next - * - set Volume + * @brief High-level audio playback pipeline and controller. + * + * Provides pull-driven playback from an AudioSource through optional decoding, + * volume control and click-free fades to an AudioOutput/AudioStream/Print. + * + * Features: + * - Playback control: begin, play, stop, next, previous, setIndex + * - PCM and encoded formats via AudioDecoder with dynamic audio info updates + * - Volume management (0.0–1.0) with pluggable VolumeControl + * - Auto-fade in/out to avoid pops; optional silence while inactive + * - Auto-advance on timeout with forward/backward navigation + * - Metadata: ICY (via source) or ID3 (internal MetaDataID3) + * - Callbacks: metadata updates and stream-change notification + * - Flow control: adjustable copy buffer and optional delay when output is full + * + * Pipeline: AudioSource → StreamCopy → EncodedAudioOutput → VolumeStream → + * FadeStream → Output. + * + * Operation model: call copy() regularly (non-blocking) or copyAll() for + * blocking end-to-end playback. + * * @ingroup player * @author Phil Schatzmann * @copyright GPLv3 @@ -94,10 +108,13 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { decoder.addNotifyAudioChange(*this); } + /// Non-copyable: copy constructor is deleted AudioPlayer(AudioPlayer const &) = delete; + /// Non-assignable: assignment operator is deleted AudioPlayer &operator=(AudioPlayer const &) = delete; + /// Sets the final output to an AudioOutput (adds Volume/Fade for PCM) void setOutput(AudioOutput &output) { if (p_decoder->isResultPCM()) { this->fade.setOutput(output); @@ -112,6 +129,7 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { this->p_final_stream = nullptr; } + /// Sets the final output to a Print (adds Volume/Fade for PCM) void setOutput(Print &output) { if (p_decoder->isResultPCM()) { this->fade.setOutput(output); @@ -126,6 +144,7 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { this->p_final_stream = nullptr; } + /// Sets the final output to an AudioStream (adds Volume/Fade for PCM) void setOutput(AudioStream &output) { if (p_decoder->isResultPCM()) { this->fade.setOutput(output); @@ -140,11 +159,10 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { this->p_final_stream = &output; } - /// Defines the number of bytes used by the copier + /// Sets the internal copy buffer size (bytes) void setBufferSize(int size) { copier.resize(size); } - /// (Re)Starts the playing of the music (from the beginning or the indicated - /// index) + /// Starts or restarts playback from the first or given stream index bool begin(int index = 0, bool isActive = true) { TRACED(); bool result = false; @@ -200,6 +218,7 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { return result; } + /// Ends playback and resets decoder/intermediate stages void end() { TRACED(); active = false; @@ -213,19 +232,19 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { } } - /// Provides the actual audio source + /// Returns the active AudioSource AudioSource &audioSource() { return *p_source; } - /// (Re)defines the audio source + /// Sets or replaces the AudioSource void setAudioSource(AudioSource &source) { this->p_source = &source; } - /// (Re)defines the decoder + /// Sets or replaces the AudioDecoder void setDecoder(AudioDecoder &decoder) { this->p_decoder = &decoder; out_decoding.setDecoder(p_decoder); } - /// (Re)defines the notify + /// Adds/updates a listener notified on audio info changes void addNotifyAudioChange(AudioInfoSupport *notify) { this->p_final_notify = notify; // notification for audio configuration @@ -234,7 +253,7 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { } } - /// Updates the audio info in the related objects + /// Receives and forwards updated AudioInfo to the chain void setAudioInfo(AudioInfo info) override { TRACED(); LOGI("sample_rate: %d", (int)info.sample_rate); @@ -250,9 +269,11 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { if (p_final_notify != nullptr) p_final_notify->setAudioInfo(info); }; + /// Returns the current AudioInfo of the playback chain AudioInfo audioInfo() override { return info; } /// starts / resumes the playing after calling stop(): same as setActive(true) + /// Resumes playback after stop(); equivalent to setActive(true) void play() { TRACED(); setActive(true); @@ -278,18 +299,17 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { return true; } - /// Obsolete: use PlayPath! + /// Obsolete: use PlayPath! bool playFile(const char *path) { return playPath(path); } - /// halts the playing: same as setActive(false) + /// Halts playback; equivalent to setActive(false) void stop() { TRACED(); setActive(false); } - /// moves to next file or nth next file when indicating an offset. Negative - /// values are supported to move back. + /// Moves to the next/previous stream by offset (negative supported) bool next(int offset = 1) { TRACED(); writeEnd(); @@ -298,7 +318,7 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { return active; } - /// moves to the selected file position + /// Selects stream by absolute index in the source bool setIndex(int idx) { TRACED(); writeEnd(); @@ -307,7 +327,7 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { return active; } - /// Moves to the selected file w/o updating the actual file position + /// Selects stream by path without changing the source iterator bool setPath(const char *path) { TRACED(); writeEnd(); @@ -316,7 +336,7 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { return active; } - /// moves to previous file + /// Moves back by offset streams (defaults to 1) bool previous(int offset = 1) { TRACED(); writeEnd(); @@ -325,7 +345,7 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { return active; } - /// start selected input stream + /// Activates the provided Stream as current input bool setStream(Stream *input) { end(); out_decoding.begin(); @@ -341,16 +361,16 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { return p_input_stream != nullptr; } - /// Provides the actual stream (=e.g.file) + /// Returns the currently active input Stream (e.g., file) Stream *getStream() { return p_input_stream; } - /// determines if the player is active + /// Checks whether playback is active bool isActive() { return active; } - /// determines if the player is active + /// Boolean conversion returns isActive() operator bool() { return isActive(); } - /// The same like start() / stop() + /// Toggles playback activity; triggers fade and optional silence void setActive(bool isActive) { if (is_auto_fade) { if (isActive) { @@ -364,7 +384,7 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { active = isActive; } - /// sets the volume - values need to be between 0.0 and 1.0 + /// Sets volume in range [0.0, 1.0]; updates VolumeStream bool setVolume(float volume) override { bool result = true; if (volume >= 0.0f && volume <= 1.0f) { @@ -380,22 +400,20 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { return result; } - /// Determines the actual volume + /// Returns the current volume [0.0, 1.0] float volume() override { return current_volume; } - /// Set automatically move to next file and end of current file: This is - /// determined from the AudioSource. If you want to override it call this - /// method after calling begin()! + /// Enables/disables auto-advance at end/timeout (overrides AudioSource) void setAutoNext(bool next) { autonext = next; } - /// Defines the wait time in ms if the target output is full + /// Sets delay (ms) to wait when output is full void setDelayIfOutputFull(int delayMs) { delay_if_full = delayMs; } /// Copies DEFAULT_BUFFER_SIZE (=1024 bytes) from the source to the decoder: /// Call this method in the loop. size_t copy() { return copy(copier.bufferSize()); } - /// Copies all the data + /// Copies until source is exhausted (blocking) size_t copyAll() { size_t result = 0; size_t step = copy(); @@ -406,8 +424,7 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { return result; } - /// Copies the indicated number of bytes from the source to the decoder: Call - /// this method in the loop. + /// Copies the requested bytes from source to decoder (call in loop) size_t copy(size_t bytes) { size_t result = 0; if (active) { @@ -444,21 +461,20 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { return result; } - /// Change the VolumeControl implementation + /// Sets a custom VolumeControl implementation void setVolumeControl(VolumeControl &vc) { volume_out.setVolumeControl(vc); } - /// Provides access to the StreamCopy, so that we can register additinal + /// Provides access to StreamCopy to register additional callbacks /// callbacks StreamCopy &getStreamCopy() { return copier; } - /// If set to true the player writes 0 values instead of no data if the player - /// is inactive + /// When enabled, writes zeros while inactive to keep sinks alive void setSilenceOnInactive(bool active) { silence_on_inactive = active; } - /// Checks if silence_on_inactive has been activated (default false) + /// Returns whether silence-on-inactive is enabled bool isSilenceOnInactive() { return silence_on_inactive; } - /// Sends the requested bytes as 0 values to the output + /// Writes the requested number of zero bytes to the output void writeSilence(size_t bytes) { TRACEI(); if (p_final_print != nullptr) { @@ -468,25 +484,22 @@ class AudioPlayer : public AudioInfoSupport, public VolumeSupport { } } - // /// Provides the Print object to which we send the decoding result - // Print *getVolumeOutput() { return &volume_out; } - - /// Provides the reference to the volume stream + /// Returns the VolumeStream used by the player VolumeStream &getVolumeStream() { return volume_out; } - /// Activates/deactivates the automatic fade in and fade out to prevent - /// popping sounds: default is active + /// Enables/disables automatic fade in/out to prevent pops void setAutoFade(bool active) { is_auto_fade = active; } + /// Checks whether automatic fade in/out is enabled bool isAutoFade() { return is_auto_fade; } - /// Change the default ID3 max metadata size (256) + /// Sets the maximum ID3 metadata buffer size (default 256) void setMetaDataSize(int size) { meta_out.resize(size); } - /// this is used to set the reference for the stream change callback + /// Sets a user reference passed to the stream-change callback void setReference(void *ref) { p_reference = ref; } - /// Defines the medatadata callback + /// Defines the metadata callback void setMetadataCallback(void (*callback)(MetaDataType type, const char *str, int len), ID3TypeSelection sel = SELECT_ID3) { From c0ebf3d5800b3a3e83df8f883532af990663c495 Mon Sep 17 00:00:00 2001 From: pschatzmann Date: Wed, 24 Sep 2025 00:46:40 +0200 Subject: [PATCH 14/15] Documentation: public methods --- src/AudioTools/CoreAudio/AudioRuntime.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/AudioTools/CoreAudio/AudioRuntime.h b/src/AudioTools/CoreAudio/AudioRuntime.h index de509941ea..166bd3a986 100644 --- a/src/AudioTools/CoreAudio/AudioRuntime.h +++ b/src/AudioTools/CoreAudio/AudioRuntime.h @@ -8,12 +8,6 @@ namespace audio_tools { -/** - * @brief Public generic methods - * @author Phil Schatzmann - * @copyright GPLv3 - */ - /// stops any further processing by spinning in an endless loop @ingroup basic inline void stop() { #ifdef EXIT_ON_STOP @@ -34,6 +28,8 @@ inline static void checkMemory(bool printMemory=false) { } #ifdef ARDUINO + +/// prints n times the character ch and a new line @ingroup basic inline void printNChar(char ch, int n){ for (int j=0;j Date: Wed, 24 Sep 2025 00:52:19 +0200 Subject: [PATCH 15/15] Documentation --- src/AudioTools/Sandbox/BLE/AudioBLEStream.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/AudioTools/Sandbox/BLE/AudioBLEStream.h b/src/AudioTools/Sandbox/BLE/AudioBLEStream.h index 49e289fdaa..dd2f7b3953 100644 --- a/src/AudioTools/Sandbox/BLE/AudioBLEStream.h +++ b/src/AudioTools/Sandbox/BLE/AudioBLEStream.h @@ -9,11 +9,12 @@ namespace audio_tools { /** - * @ingroup main * @brief Transmit and receive data via BLE using a Serial API. * The following additional experimental features are offered: * setFramed(true) tries to keep the original write sizes; * setAudioInfoActive(true) informs about changes in the audio info + * @ingroup communications + * @author Phil Schatzmann */ class AudioBLEStream : public AudioStream {