From c20eb1d93f63440ce0edd30c32c1d19f960cb975 Mon Sep 17 00:00:00 2001 From: winlin Date: Sat, 28 Jan 2023 11:28:29 +0800 Subject: [PATCH 01/18] Rename bridges to better name 1. Rename SrsRtcFromRtmpBridge to SrsRtmpToRtcBridge 2. Rename SrsRtmpFromRtcBridge to SrsRtcToRtmpBridge 3. Rename SrsRtmpFromSrtBridge to SrsSrtToRtmpBridge --- trunk/src/app/srs_app_rtc_conn.cpp | 2 +- trunk/src/app/srs_app_rtc_source.cpp | 60 ++++++++++++++-------------- trunk/src/app/srs_app_rtc_source.hpp | 14 +++---- trunk/src/app/srs_app_rtmp_conn.cpp | 2 +- trunk/src/app/srs_app_srt_conn.cpp | 4 +- trunk/src/app/srs_app_srt_source.cpp | 32 +++++++-------- trunk/src/app/srs_app_srt_source.hpp | 6 +-- 7 files changed, 60 insertions(+), 60 deletions(-) diff --git a/trunk/src/app/srs_app_rtc_conn.cpp b/trunk/src/app/srs_app_rtc_conn.cpp index b26dfa16a2..5abdd8452c 100644 --- a/trunk/src/app/srs_app_rtc_conn.cpp +++ b/trunk/src/app/srs_app_rtc_conn.cpp @@ -1197,7 +1197,7 @@ srs_error_t SrsRtcPublishStream::initialize(SrsRequest* r, SrsRtcSourceDescripti // especially for stream merging. rtmp->set_cache(false); - SrsRtmpFromRtcBridge *bridge = new SrsRtmpFromRtcBridge(rtmp); + SrsRtcToRtmpBridge *bridge = new SrsRtcToRtmpBridge(rtmp); if ((err = bridge->initialize(r)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "create bridge"); diff --git a/trunk/src/app/srs_app_rtc_source.cpp b/trunk/src/app/srs_app_rtc_source.cpp index f18b9518eb..d37ad9da12 100644 --- a/trunk/src/app/srs_app_rtc_source.cpp +++ b/trunk/src/app/srs_app_rtc_source.cpp @@ -712,7 +712,7 @@ srs_error_t SrsRtcSource::on_timer(srs_utime_t interval) #ifdef SRS_FFMPEG_FIT -SrsRtcFromRtmpBridge::SrsRtcFromRtmpBridge(SrsRtcSource* source) +SrsRtmpToRtcBridge::SrsRtmpToRtcBridge(SrsRtcSource* source) { req = NULL; source_ = source; @@ -747,14 +747,14 @@ SrsRtcFromRtmpBridge::SrsRtcFromRtmpBridge(SrsRtcSource* source) } } -SrsRtcFromRtmpBridge::~SrsRtcFromRtmpBridge() +SrsRtmpToRtcBridge::~SrsRtmpToRtcBridge() { srs_freep(format); srs_freep(codec_); srs_freep(meta); } -srs_error_t SrsRtcFromRtmpBridge::initialize(SrsRequest* r) +srs_error_t SrsRtmpToRtcBridge::initialize(SrsRequest* r) { srs_error_t err = srs_success; @@ -778,7 +778,7 @@ srs_error_t SrsRtcFromRtmpBridge::initialize(SrsRequest* r) return err; } -srs_error_t SrsRtcFromRtmpBridge::on_publish() +srs_error_t SrsRtmpToRtcBridge::on_publish() { srs_error_t err = srs_success; @@ -798,7 +798,7 @@ srs_error_t SrsRtcFromRtmpBridge::on_publish() return err; } -void SrsRtcFromRtmpBridge::on_unpublish() +void SrsRtmpToRtcBridge::on_unpublish() { if (!rtmp_to_rtc) { return; @@ -814,7 +814,7 @@ void SrsRtcFromRtmpBridge::on_unpublish() source_->on_unpublish(); } -srs_error_t SrsRtcFromRtmpBridge::on_audio(SrsSharedPtrMessage* msg) +srs_error_t SrsRtmpToRtcBridge::on_audio(SrsSharedPtrMessage* msg) { srs_error_t err = srs_success; @@ -880,7 +880,7 @@ srs_error_t SrsRtcFromRtmpBridge::on_audio(SrsSharedPtrMessage* msg) return err; } -srs_error_t SrsRtcFromRtmpBridge::init_codec(SrsAudioCodecId codec) +srs_error_t SrsRtmpToRtcBridge::init_codec(SrsAudioCodecId codec) { srs_error_t err = srs_success; @@ -909,7 +909,7 @@ srs_error_t SrsRtcFromRtmpBridge::init_codec(SrsAudioCodecId codec) return err; } -srs_error_t SrsRtcFromRtmpBridge::transcode(SrsAudioFrame* audio) +srs_error_t SrsRtmpToRtcBridge::transcode(SrsAudioFrame* audio) { srs_error_t err = srs_success; @@ -945,7 +945,7 @@ srs_error_t SrsRtcFromRtmpBridge::transcode(SrsAudioFrame* audio) return err; } -srs_error_t SrsRtcFromRtmpBridge::package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt) +srs_error_t SrsRtmpToRtcBridge::package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt) { srs_error_t err = srs_success; @@ -966,7 +966,7 @@ srs_error_t SrsRtcFromRtmpBridge::package_opus(SrsAudioFrame* audio, SrsRtpPacke return err; } -srs_error_t SrsRtcFromRtmpBridge::on_video(SrsSharedPtrMessage* msg) +srs_error_t SrsRtmpToRtcBridge::on_video(SrsSharedPtrMessage* msg) { srs_error_t err = srs_success; @@ -1047,7 +1047,7 @@ srs_error_t SrsRtcFromRtmpBridge::on_video(SrsSharedPtrMessage* msg) return consume_packets(pkts); } -srs_error_t SrsRtcFromRtmpBridge::filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, vector& samples) +srs_error_t SrsRtmpToRtcBridge::filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, vector& samples) { srs_error_t err = srs_success; @@ -1077,7 +1077,7 @@ srs_error_t SrsRtcFromRtmpBridge::filter(SrsSharedPtrMessage* msg, SrsFormat* fo return err; } -srs_error_t SrsRtcFromRtmpBridge::package_stap_a(SrsRtcSource* source, SrsSharedPtrMessage* msg, SrsRtpPacket* pkt) +srs_error_t SrsRtmpToRtcBridge::package_stap_a(SrsRtcSource* source, SrsSharedPtrMessage* msg, SrsRtpPacket* pkt) { srs_error_t err = srs_success; @@ -1136,7 +1136,7 @@ srs_error_t SrsRtcFromRtmpBridge::package_stap_a(SrsRtcSource* source, SrsShared return err; } -srs_error_t SrsRtcFromRtmpBridge::package_nalus(SrsSharedPtrMessage* msg, const vector& samples, vector& pkts) +srs_error_t SrsRtmpToRtcBridge::package_nalus(SrsSharedPtrMessage* msg, const vector& samples, vector& pkts) { srs_error_t err = srs_success; @@ -1232,7 +1232,7 @@ srs_error_t SrsRtcFromRtmpBridge::package_nalus(SrsSharedPtrMessage* msg, const } // Single NAL Unit Packet @see https://tools.ietf.org/html/rfc6184#section-5.6 -srs_error_t SrsRtcFromRtmpBridge::package_single_nalu(SrsSharedPtrMessage* msg, SrsSample* sample, vector& pkts) +srs_error_t SrsRtmpToRtcBridge::package_single_nalu(SrsSharedPtrMessage* msg, SrsSample* sample, vector& pkts) { srs_error_t err = srs_success; @@ -1256,7 +1256,7 @@ srs_error_t SrsRtcFromRtmpBridge::package_single_nalu(SrsSharedPtrMessage* msg, return err; } -srs_error_t SrsRtcFromRtmpBridge::package_fu_a(SrsSharedPtrMessage* msg, SrsSample* sample, int fu_payload_size, vector& pkts) +srs_error_t SrsRtmpToRtcBridge::package_fu_a(SrsSharedPtrMessage* msg, SrsSample* sample, int fu_payload_size, vector& pkts) { srs_error_t err = srs_success; @@ -1298,7 +1298,7 @@ srs_error_t SrsRtcFromRtmpBridge::package_fu_a(SrsSharedPtrMessage* msg, SrsSamp return err; } -srs_error_t SrsRtcFromRtmpBridge::consume_packets(vector& pkts) +srs_error_t SrsRtmpToRtcBridge::consume_packets(vector& pkts) { srs_error_t err = srs_success; @@ -1319,7 +1319,7 @@ srs_error_t SrsRtcFromRtmpBridge::consume_packets(vector& pkts) return err; } -SrsRtmpFromRtcBridge::SrsRtmpFromRtcBridge(SrsLiveSource *src) +SrsRtcToRtmpBridge::SrsRtcToRtmpBridge(SrsLiveSource *src) { source_ = src; codec_ = NULL; @@ -1331,14 +1331,14 @@ SrsRtmpFromRtcBridge::SrsRtmpFromRtcBridge(SrsLiveSource *src) memset(cache_video_pkts_, 0, sizeof(cache_video_pkts_)); } -SrsRtmpFromRtcBridge::~SrsRtmpFromRtcBridge() +SrsRtcToRtmpBridge::~SrsRtcToRtmpBridge() { srs_freep(codec_); srs_freep(format); clear_cached_video(); } -srs_error_t SrsRtmpFromRtcBridge::initialize(SrsRequest* r) +srs_error_t SrsRtcToRtmpBridge::initialize(SrsRequest* r) { srs_error_t err = srs_success; @@ -1364,7 +1364,7 @@ srs_error_t SrsRtmpFromRtcBridge::initialize(SrsRequest* r) return err; } -srs_error_t SrsRtmpFromRtcBridge::on_publish() +srs_error_t SrsRtcToRtmpBridge::on_publish() { srs_error_t err = srs_success; @@ -1379,7 +1379,7 @@ srs_error_t SrsRtmpFromRtcBridge::on_publish() return err; } -srs_error_t SrsRtmpFromRtcBridge::on_rtp(SrsRtpPacket *pkt) +srs_error_t SrsRtcToRtmpBridge::on_rtp(SrsRtpPacket *pkt) { srs_error_t err = srs_success; @@ -1402,13 +1402,13 @@ srs_error_t SrsRtmpFromRtcBridge::on_rtp(SrsRtpPacket *pkt) return err; } -void SrsRtmpFromRtcBridge::on_unpublish() +void SrsRtcToRtmpBridge::on_unpublish() { // TODO: FIXME: Should sync with bridge? source_->on_unpublish(); } -srs_error_t SrsRtmpFromRtcBridge::transcode_audio(SrsRtpPacket *pkt) +srs_error_t SrsRtcToRtmpBridge::transcode_audio(SrsRtpPacket *pkt) { srs_error_t err = srs_success; @@ -1457,7 +1457,7 @@ srs_error_t SrsRtmpFromRtcBridge::transcode_audio(SrsRtpPacket *pkt) return err; } -void SrsRtmpFromRtcBridge::packet_aac(SrsCommonMessage* audio, char* data, int len, uint32_t pts, bool is_header) +void SrsRtcToRtmpBridge::packet_aac(SrsCommonMessage* audio, char* data, int len, uint32_t pts, bool is_header) { int rtmp_len = len + 2; audio->header.initialize_audio(rtmp_len, pts, 1); @@ -1474,7 +1474,7 @@ void SrsRtmpFromRtcBridge::packet_aac(SrsCommonMessage* audio, char* data, int l audio->size = rtmp_len; } -srs_error_t SrsRtmpFromRtcBridge::packet_video(SrsRtpPacket* src) +srs_error_t SrsRtcToRtmpBridge::packet_video(SrsRtpPacket* src) { srs_error_t err = srs_success; @@ -1514,7 +1514,7 @@ srs_error_t SrsRtmpFromRtcBridge::packet_video(SrsRtpPacket* src) return err; } -srs_error_t SrsRtmpFromRtcBridge::packet_video_key_frame(SrsRtpPacket* pkt) +srs_error_t SrsRtcToRtmpBridge::packet_video_key_frame(SrsRtpPacket* pkt) { srs_error_t err = srs_success; @@ -1609,7 +1609,7 @@ srs_error_t SrsRtmpFromRtcBridge::packet_video_key_frame(SrsRtpPacket* pkt) return err; } -srs_error_t SrsRtmpFromRtcBridge::packet_video_rtmp(const uint16_t start, const uint16_t end) +srs_error_t SrsRtcToRtmpBridge::packet_video_rtmp(const uint16_t start, const uint16_t end) { srs_error_t err = srs_success; @@ -1758,7 +1758,7 @@ srs_error_t SrsRtmpFromRtcBridge::packet_video_rtmp(const uint16_t start, const return err; } -int32_t SrsRtmpFromRtcBridge::find_next_lost_sn(uint16_t current_sn, uint16_t& end_sn) +int32_t SrsRtcToRtmpBridge::find_next_lost_sn(uint16_t current_sn, uint16_t& end_sn) { uint32_t last_rtp_ts = cache_video_pkts_[cache_index(header_sn_)].rtp_ts; for (int i = 0; i < s_cache_size; ++i) { @@ -1784,7 +1784,7 @@ int32_t SrsRtmpFromRtcBridge::find_next_lost_sn(uint16_t current_sn, uint16_t& e return -2; } -void SrsRtmpFromRtcBridge::clear_cached_video() +void SrsRtcToRtmpBridge::clear_cached_video() { for (size_t i = 0; i < s_cache_size; i++) { @@ -1798,7 +1798,7 @@ void SrsRtmpFromRtcBridge::clear_cached_video() } } -bool SrsRtmpFromRtcBridge::check_frame_complete(const uint16_t start, const uint16_t end) +bool SrsRtcToRtmpBridge::check_frame_complete(const uint16_t start, const uint16_t end) { int16_t cnt = srs_rtp_seq_distance(start, end) + 1; srs_assert(cnt >= 1); diff --git a/trunk/src/app/srs_app_rtc_source.hpp b/trunk/src/app/srs_app_rtc_source.hpp index 7f702ff2e2..ab66d218f3 100644 --- a/trunk/src/app/srs_app_rtc_source.hpp +++ b/trunk/src/app/srs_app_rtc_source.hpp @@ -27,7 +27,7 @@ class SrsSharedPtrMessage; class SrsCommonMessage; class SrsMessageArray; class SrsRtcSource; -class SrsRtcFromRtmpBridge; +class SrsRtmpToRtcBridge; class SrsAudioTranscoder; class SrsRtpPacket; class SrsSample; @@ -245,7 +245,7 @@ class SrsRtcSource : public ISrsFastTimer }; #ifdef SRS_FFMPEG_FIT -class SrsRtcFromRtmpBridge : public ISrsLiveSourceBridge +class SrsRtmpToRtcBridge : public ISrsLiveSourceBridge { private: SrsRequest* req; @@ -267,8 +267,8 @@ class SrsRtcFromRtmpBridge : public ISrsLiveSourceBridge uint8_t audio_payload_type_; uint8_t video_payload_type_; public: - SrsRtcFromRtmpBridge(SrsRtcSource* source); - virtual ~SrsRtcFromRtmpBridge(); + SrsRtmpToRtcBridge(SrsRtcSource* source); + virtual ~SrsRtmpToRtcBridge(); public: virtual srs_error_t initialize(SrsRequest* r); virtual srs_error_t on_publish(); @@ -289,7 +289,7 @@ class SrsRtcFromRtmpBridge : public ISrsLiveSourceBridge srs_error_t consume_packets(std::vector& pkts); }; -class SrsRtmpFromRtcBridge : public ISrsRtcSourceBridge +class SrsRtcToRtmpBridge : public ISrsRtcSourceBridge { private: SrsLiveSource *source_; @@ -314,8 +314,8 @@ class SrsRtmpFromRtcBridge : public ISrsRtcSourceBridge uint16_t lost_sn_; int64_t rtp_key_frame_ts_; public: - SrsRtmpFromRtcBridge(SrsLiveSource *src); - virtual ~SrsRtmpFromRtcBridge(); + SrsRtcToRtmpBridge(SrsLiveSource *src); + virtual ~SrsRtcToRtmpBridge(); public: srs_error_t initialize(SrsRequest* r); public: diff --git a/trunk/src/app/srs_app_rtmp_conn.cpp b/trunk/src/app/srs_app_rtmp_conn.cpp index 32b2bd5b48..e97cf5362d 100644 --- a/trunk/src/app/srs_app_rtmp_conn.cpp +++ b/trunk/src/app/srs_app_rtmp_conn.cpp @@ -1092,7 +1092,7 @@ srs_error_t SrsRtmpConn::acquire_publish(SrsLiveSource* source) // Bridge to RTC streaming. #if defined(SRS_RTC) && defined(SRS_FFMPEG_FIT) if (rtc) { - SrsRtcFromRtmpBridge *bridge = new SrsRtcFromRtmpBridge(rtc); + SrsRtmpToRtcBridge *bridge = new SrsRtmpToRtcBridge(rtc); if ((err = bridge->initialize(req)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "bridge init"); diff --git a/trunk/src/app/srs_app_srt_conn.cpp b/trunk/src/app/srs_app_srt_conn.cpp index df51160597..80d58d7ec9 100644 --- a/trunk/src/app/srs_app_srt_conn.cpp +++ b/trunk/src/app/srs_app_srt_conn.cpp @@ -398,7 +398,7 @@ srs_error_t SrsMpegtsSrtConn::acquire_publish() // Bridge to RTC streaming. #if defined(SRS_RTC) && defined(SRS_FFMPEG_FIT) if (rtc) { - SrsRtcFromRtmpBridge *bridge = new SrsRtcFromRtmpBridge(rtc); + SrsRtmpToRtcBridge *bridge = new SrsRtmpToRtcBridge(rtc); if ((err = bridge->initialize(req_)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "bridge init"); @@ -408,7 +408,7 @@ srs_error_t SrsMpegtsSrtConn::acquire_publish() } #endif - SrsRtmpFromSrtBridge *bridger = new SrsRtmpFromSrtBridge(live_source); + SrsSrtToRtmpBridge *bridger = new SrsSrtToRtmpBridge(live_source); if ((err = bridger->initialize(req_)) != srs_success) { srs_freep(bridger); return srs_error_wrap(err, "create bridger"); diff --git a/trunk/src/app/srs_app_srt_source.cpp b/trunk/src/app/srs_app_srt_source.cpp index 4629253ad3..0a620ff2a6 100644 --- a/trunk/src/app/srs_app_srt_source.cpp +++ b/trunk/src/app/srs_app_srt_source.cpp @@ -243,7 +243,7 @@ ISrsSrtSourceBridge::~ISrsSrtSourceBridge() { } -SrsRtmpFromSrtBridge::SrsRtmpFromSrtBridge(SrsLiveSource* source) : ISrsSrtSourceBridge() +SrsSrtToRtmpBridge::SrsSrtToRtmpBridge(SrsLiveSource* source) : ISrsSrtSourceBridge() { ts_ctx_ = new SrsTsContext(); @@ -260,7 +260,7 @@ SrsRtmpFromSrtBridge::SrsRtmpFromSrtBridge(SrsLiveSource* source) : ISrsSrtSourc pp_audio_duration_ = new SrsAlonePithyPrint(); } -SrsRtmpFromSrtBridge::~SrsRtmpFromSrtBridge() +SrsSrtToRtmpBridge::~SrsSrtToRtmpBridge() { srs_freep(ts_ctx_); srs_freep(req_); @@ -268,7 +268,7 @@ SrsRtmpFromSrtBridge::~SrsRtmpFromSrtBridge() srs_freep(pp_audio_duration_); } -srs_error_t SrsRtmpFromSrtBridge::on_publish() +srs_error_t SrsSrtToRtmpBridge::on_publish() { srs_error_t err = srs_success; @@ -279,7 +279,7 @@ srs_error_t SrsRtmpFromSrtBridge::on_publish() return err; } -srs_error_t SrsRtmpFromSrtBridge::on_packet(SrsSrtPacket *pkt) +srs_error_t SrsSrtToRtmpBridge::on_packet(SrsSrtPacket *pkt) { srs_error_t err = srs_success; @@ -306,12 +306,12 @@ srs_error_t SrsRtmpFromSrtBridge::on_packet(SrsSrtPacket *pkt) return err; } -void SrsRtmpFromSrtBridge::on_unpublish() +void SrsSrtToRtmpBridge::on_unpublish() { live_source_->on_unpublish(); } -srs_error_t SrsRtmpFromSrtBridge::initialize(SrsRequest* req) +srs_error_t SrsSrtToRtmpBridge::initialize(SrsRequest* req) { srs_error_t err = srs_success; @@ -321,7 +321,7 @@ srs_error_t SrsRtmpFromSrtBridge::initialize(SrsRequest* req) return err; } -srs_error_t SrsRtmpFromSrtBridge::on_ts_message(SrsTsMessage* msg) +srs_error_t SrsSrtToRtmpBridge::on_ts_message(SrsTsMessage* msg) { srs_error_t err = srs_success; @@ -369,7 +369,7 @@ srs_error_t SrsRtmpFromSrtBridge::on_ts_message(SrsTsMessage* msg) return err; } -srs_error_t SrsRtmpFromSrtBridge::on_ts_video_avc(SrsTsMessage* msg, SrsBuffer* avs) +srs_error_t SrsSrtToRtmpBridge::on_ts_video_avc(SrsTsMessage* msg, SrsBuffer* avs) { srs_error_t err = srs_success; @@ -430,7 +430,7 @@ srs_error_t SrsRtmpFromSrtBridge::on_ts_video_avc(SrsTsMessage* msg, SrsBuffer* return on_h264_frame(msg, ipb_frames); } -srs_error_t SrsRtmpFromSrtBridge::check_sps_pps_change(SrsTsMessage* msg) +srs_error_t SrsSrtToRtmpBridge::check_sps_pps_change(SrsTsMessage* msg) { srs_error_t err = srs_success; @@ -477,7 +477,7 @@ srs_error_t SrsRtmpFromSrtBridge::check_sps_pps_change(SrsTsMessage* msg) return err; } -srs_error_t SrsRtmpFromSrtBridge::on_h264_frame(SrsTsMessage* msg, vector >& ipb_frames) +srs_error_t SrsSrtToRtmpBridge::on_h264_frame(SrsTsMessage* msg, vector >& ipb_frames) { srs_error_t err = srs_success; @@ -534,7 +534,7 @@ srs_error_t SrsRtmpFromSrtBridge::on_h264_frame(SrsTsMessage* msg, vector >& ipb_frames) +srs_error_t SrsSrtToRtmpBridge::on_hevc_frame(SrsTsMessage* msg, vector >& ipb_frames) { srs_error_t err = srs_success; @@ -721,7 +721,7 @@ srs_error_t SrsRtmpFromSrtBridge::on_hevc_frame(SrsTsMessage* msg, vector Date: Sat, 28 Jan 2023 12:21:50 +0800 Subject: [PATCH 02/18] Fix typo, bridger to bridge --- trunk/src/app/srs_app_rtc_conn.cpp | 2 +- trunk/src/app/srs_app_rtc_source.cpp | 2 +- trunk/src/app/srs_app_rtc_source.hpp | 2 +- trunk/src/app/srs_app_rtmp_conn.cpp | 2 +- trunk/src/app/srs_app_srt_conn.cpp | 12 ++++++------ trunk/src/app/srs_app_srt_source.hpp | 2 +- trunk/src/app/srs_app_statistic.cpp | 2 +- trunk/src/kernel/srs_kernel_error.hpp | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/trunk/src/app/srs_app_rtc_conn.cpp b/trunk/src/app/srs_app_rtc_conn.cpp index 5abdd8452c..8b209080f1 100644 --- a/trunk/src/app/srs_app_rtc_conn.cpp +++ b/trunk/src/app/srs_app_rtc_conn.cpp @@ -1197,7 +1197,7 @@ srs_error_t SrsRtcPublishStream::initialize(SrsRequest* r, SrsRtcSourceDescripti // especially for stream merging. rtmp->set_cache(false); - SrsRtcToRtmpBridge *bridge = new SrsRtcToRtmpBridge(rtmp); + SrsRtcToRtmpBridge* bridge = new SrsRtcToRtmpBridge(rtmp); if ((err = bridge->initialize(r)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "create bridge"); diff --git a/trunk/src/app/srs_app_rtc_source.cpp b/trunk/src/app/srs_app_rtc_source.cpp index d37ad9da12..4f9661c7bd 100644 --- a/trunk/src/app/srs_app_rtc_source.cpp +++ b/trunk/src/app/srs_app_rtc_source.cpp @@ -464,7 +464,7 @@ SrsContextId SrsRtcSource::pre_source_id() return _pre_source_id; } -void SrsRtcSource::set_bridge(ISrsRtcSourceBridge *bridge) +void SrsRtcSource::set_bridge(ISrsRtcSourceBridge* bridge) { srs_freep(bridge_); bridge_ = bridge; diff --git a/trunk/src/app/srs_app_rtc_source.hpp b/trunk/src/app/srs_app_rtc_source.hpp index ab66d218f3..8eca0a1ca6 100644 --- a/trunk/src/app/srs_app_rtc_source.hpp +++ b/trunk/src/app/srs_app_rtc_source.hpp @@ -205,7 +205,7 @@ class SrsRtcSource : public ISrsFastTimer virtual SrsContextId source_id(); virtual SrsContextId pre_source_id(); public: - void set_bridge(ISrsRtcSourceBridge *bridge); + void set_bridge(ISrsRtcSourceBridge* bridge); public: // Create consumer // @param consumer, output the create consumer. diff --git a/trunk/src/app/srs_app_rtmp_conn.cpp b/trunk/src/app/srs_app_rtmp_conn.cpp index e97cf5362d..d0987ce4b5 100644 --- a/trunk/src/app/srs_app_rtmp_conn.cpp +++ b/trunk/src/app/srs_app_rtmp_conn.cpp @@ -1092,7 +1092,7 @@ srs_error_t SrsRtmpConn::acquire_publish(SrsLiveSource* source) // Bridge to RTC streaming. #if defined(SRS_RTC) && defined(SRS_FFMPEG_FIT) if (rtc) { - SrsRtmpToRtcBridge *bridge = new SrsRtmpToRtcBridge(rtc); + SrsRtmpToRtcBridge* bridge = new SrsRtmpToRtcBridge(rtc); if ((err = bridge->initialize(req)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "bridge init"); diff --git a/trunk/src/app/srs_app_srt_conn.cpp b/trunk/src/app/srs_app_srt_conn.cpp index 80d58d7ec9..4eb1bd17be 100644 --- a/trunk/src/app/srs_app_srt_conn.cpp +++ b/trunk/src/app/srs_app_srt_conn.cpp @@ -398,7 +398,7 @@ srs_error_t SrsMpegtsSrtConn::acquire_publish() // Bridge to RTC streaming. #if defined(SRS_RTC) && defined(SRS_FFMPEG_FIT) if (rtc) { - SrsRtmpToRtcBridge *bridge = new SrsRtmpToRtcBridge(rtc); + SrsRtmpToRtcBridge* bridge = new SrsRtmpToRtcBridge(rtc); if ((err = bridge->initialize(req_)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "bridge init"); @@ -408,13 +408,13 @@ srs_error_t SrsMpegtsSrtConn::acquire_publish() } #endif - SrsSrtToRtmpBridge *bridger = new SrsSrtToRtmpBridge(live_source); - if ((err = bridger->initialize(req_)) != srs_success) { - srs_freep(bridger); - return srs_error_wrap(err, "create bridger"); + SrsSrtToRtmpBridge* bridge = new SrsSrtToRtmpBridge(live_source); + if ((err = bridge->initialize(req_)) != srs_success) { + srs_freep(bridge); + return srs_error_wrap(err, "create bridge"); } - srt_source_->set_bridge(bridger); + srt_source_->set_bridge(bridge); } if ((err = srt_source_->on_publish()) != srs_success) { diff --git a/trunk/src/app/srs_app_srt_source.hpp b/trunk/src/app/srs_app_srt_source.hpp index 147e285628..015426fb50 100644 --- a/trunk/src/app/srs_app_srt_source.hpp +++ b/trunk/src/app/srs_app_srt_source.hpp @@ -176,7 +176,7 @@ class SrsSrtSource // Update the authentication information in request. virtual void update_auth(SrsRequest* r); public: - void set_bridge(ISrsSrtSourceBridge *bridger); + void set_bridge(ISrsSrtSourceBridge* bridge); public: // Create consumer // @param consumer, output the create consumer. diff --git a/trunk/src/app/srs_app_statistic.cpp b/trunk/src/app/srs_app_statistic.cpp index a355b84976..9942c3490a 100644 --- a/trunk/src/app/srs_app_statistic.cpp +++ b/trunk/src/app/srs_app_statistic.cpp @@ -178,7 +178,7 @@ srs_error_t SrsStatisticStream::dumps(SrsJsonObject* obj) void SrsStatisticStream::publish(std::string id) { - // To prevent duplicated publish event by bridger. + // To prevent duplicated publish event by bridge. if (active) { return; } diff --git a/trunk/src/kernel/srs_kernel_error.hpp b/trunk/src/kernel/srs_kernel_error.hpp index f095d9533a..a315148962 100644 --- a/trunk/src/kernel/srs_kernel_error.hpp +++ b/trunk/src/kernel/srs_kernel_error.hpp @@ -361,7 +361,7 @@ XX(ERROR_RTC_DISABLED , 5021, "RtcDisabled", "RTC is disabled by configuration") \ XX(ERROR_RTC_NO_SESSION , 5022, "RtcNoSession", "Invalid packet for no RTC session matched") \ XX(ERROR_RTC_INVALID_PARAMS , 5023, "RtcInvalidParams", "Invalid API parameters for RTC") \ - XX(ERROR_RTC_DUMMY_BRIDGER , 5024, "RtcDummyBridger", "RTC dummy bridger error") \ + XX(ERROR_RTC_DUMMY_BRIDGE , 5024, "RtcDummyBridge", "RTC dummy bridge error") \ XX(ERROR_RTC_STREM_STARTED , 5025, "RtcStreamStarted", "RTC stream already started") \ XX(ERROR_RTC_TRACK_CODEC , 5026, "RtcTrackCodec", "RTC track codec error") \ XX(ERROR_RTC_NO_PLAYER , 5027, "RtcNoPlayer", "RTC player not found") \ From 16dcf79a05e225546b8d4c4e7ffaa108d6faf191 Mon Sep 17 00:00:00 2001 From: winlin Date: Sat, 28 Jan 2023 14:03:51 +0800 Subject: [PATCH 03/18] Refine SrsRtmpToRtcBridge interface to on_frame. --- trunk/src/app/srs_app_rtc_source.cpp | 10 ++++++++++ trunk/src/app/srs_app_rtc_source.hpp | 4 +++- trunk/src/app/srs_app_source.cpp | 4 ++-- trunk/src/app/srs_app_source.hpp | 3 +-- 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/trunk/src/app/srs_app_rtc_source.cpp b/trunk/src/app/srs_app_rtc_source.cpp index 4f9661c7bd..f7cfbba746 100644 --- a/trunk/src/app/srs_app_rtc_source.cpp +++ b/trunk/src/app/srs_app_rtc_source.cpp @@ -814,6 +814,16 @@ void SrsRtmpToRtcBridge::on_unpublish() source_->on_unpublish(); } +srs_error_t SrsRtmpToRtcBridge::on_frame(SrsSharedPtrMessage* frame) +{ + if (frame->is_audio()) { + return on_audio(frame); + } else if (frame->is_video()) { + return on_video(frame); + } + return srs_success; +} + srs_error_t SrsRtmpToRtcBridge::on_audio(SrsSharedPtrMessage* msg) { srs_error_t err = srs_success; diff --git a/trunk/src/app/srs_app_rtc_source.hpp b/trunk/src/app/srs_app_rtc_source.hpp index 8eca0a1ca6..f4901cf2f6 100644 --- a/trunk/src/app/srs_app_rtc_source.hpp +++ b/trunk/src/app/srs_app_rtc_source.hpp @@ -273,12 +273,14 @@ class SrsRtmpToRtcBridge : public ISrsLiveSourceBridge virtual srs_error_t initialize(SrsRequest* r); virtual srs_error_t on_publish(); virtual void on_unpublish(); + virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); +private: virtual srs_error_t on_audio(SrsSharedPtrMessage* msg); private: srs_error_t init_codec(SrsAudioCodecId codec); srs_error_t transcode(SrsAudioFrame* audio); srs_error_t package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt); -public: +private: virtual srs_error_t on_video(SrsSharedPtrMessage* msg); private: srs_error_t filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, std::vector& samples); diff --git a/trunk/src/app/srs_app_source.cpp b/trunk/src/app/srs_app_source.cpp index 705d328fbe..79440738fd 100755 --- a/trunk/src/app/srs_app_source.cpp +++ b/trunk/src/app/srs_app_source.cpp @@ -2319,7 +2319,7 @@ srs_error_t SrsLiveSource::on_audio_imp(SrsSharedPtrMessage* msg) } // For bridge to consume the message. - if (bridge_ && (err = bridge_->on_audio(msg)) != srs_success) { + if (bridge_ && (err = bridge_->on_frame(msg)) != srs_success) { return srs_error_wrap(err, "bridge consume audio"); } @@ -2464,7 +2464,7 @@ srs_error_t SrsLiveSource::on_video_imp(SrsSharedPtrMessage* msg) } // For bridge to consume the message. - if (bridge_ && (err = bridge_->on_video(msg)) != srs_success) { + if (bridge_ && (err = bridge_->on_frame(msg)) != srs_success) { return srs_error_wrap(err, "bridge consume video"); } diff --git a/trunk/src/app/srs_app_source.hpp b/trunk/src/app/srs_app_source.hpp index b8025a7bbe..e5348c1e57 100644 --- a/trunk/src/app/srs_app_source.hpp +++ b/trunk/src/app/srs_app_source.hpp @@ -479,8 +479,7 @@ class ISrsLiveSourceBridge virtual ~ISrsLiveSourceBridge(); public: virtual srs_error_t on_publish() = 0; - virtual srs_error_t on_audio(SrsSharedPtrMessage* audio) = 0; - virtual srs_error_t on_video(SrsSharedPtrMessage* video) = 0; + virtual srs_error_t on_frame(SrsSharedPtrMessage* frame) = 0; virtual void on_unpublish() = 0; }; From e27a407e064d0533cad15631134ddc33dedd7ee2 Mon Sep 17 00:00:00 2001 From: winlin Date: Sat, 28 Jan 2023 19:39:36 +0800 Subject: [PATCH 04/18] Extract frame builder for RTC to RTMP bridge. --- trunk/src/app/srs_app_rtc_source.cpp | 165 ++++++++++++++++++--------- trunk/src/app/srs_app_rtc_source.hpp | 50 +++++--- trunk/src/app/srs_app_source.cpp | 55 ++++----- trunk/src/app/srs_app_source.hpp | 2 + 4 files changed, 169 insertions(+), 103 deletions(-) diff --git a/trunk/src/app/srs_app_rtc_source.cpp b/trunk/src/app/srs_app_rtc_source.cpp index f7cfbba746..6f11d8446d 100644 --- a/trunk/src/app/srs_app_rtc_source.cpp +++ b/trunk/src/app/srs_app_rtc_source.cpp @@ -341,6 +341,7 @@ SrsRtcSource::SrsRtcSource() req = NULL; bridge_ = NULL; + frame_builder_ = NULL; pli_for_rtmp_ = pli_elapsed_ = 0; } @@ -351,6 +352,7 @@ SrsRtcSource::~SrsRtcSource() // for all consumers are auto free. consumers.clear(); + srs_freep(frame_builder_); srs_freep(bridge_); srs_freep(req); srs_freep(stream_desc_); @@ -468,6 +470,9 @@ void SrsRtcSource::set_bridge(ISrsRtcSourceBridge* bridge) { srs_freep(bridge_); bridge_ = bridge; + + srs_freep(frame_builder_); + frame_builder_ = new SrsRtcFrameBuilder(bridge); } srs_error_t SrsRtcSource::create_consumer(SrsRtcConsumer*& consumer) @@ -541,6 +546,14 @@ srs_error_t SrsRtcSource::on_publish() // If bridge to other source, handle event and start timer to request PLI. if (bridge_) { + if ((err = frame_builder_->initialize(req)) != srs_success) { + return srs_error_wrap(err, "frame builder initialize"); + } + + if ((err = frame_builder_->on_publish()) != srs_success) { + return srs_error_wrap(err, "frame builder on publish"); + } + if ((err = bridge_->on_publish()) != srs_success) { return srs_error_wrap(err, "bridge on publish"); } @@ -585,6 +598,9 @@ void SrsRtcSource::on_unpublish() // For SrsRtcSource::on_timer() _srs_hybrid->timer100ms()->unsubscribe(this); + frame_builder_->on_unpublish(); + srs_freep(frame_builder_); + bridge_->on_unpublish(); srs_freep(bridge_); } @@ -636,8 +652,8 @@ srs_error_t SrsRtcSource::on_rtp(SrsRtpPacket* pkt) } } - if (bridge_ && (err = bridge_->on_rtp(pkt)) != srs_success) { - return srs_error_wrap(err, "bridge consume message"); + if (frame_builder_ && (err = frame_builder_->on_rtp(pkt)) != srs_success) { + return srs_error_wrap(err, "frame builder consume packet"); } return err; @@ -1332,28 +1348,62 @@ srs_error_t SrsRtmpToRtcBridge::consume_packets(vector& pkts) SrsRtcToRtmpBridge::SrsRtcToRtmpBridge(SrsLiveSource *src) { source_ = src; +} + +SrsRtcToRtmpBridge::~SrsRtcToRtmpBridge() +{ +} + +srs_error_t SrsRtcToRtmpBridge::initialize(SrsRequest* r) +{ + return srs_success; +} + +srs_error_t SrsRtcToRtmpBridge::on_publish() +{ + srs_error_t err = srs_success; + + // TODO: FIXME: Should sync with bridge? + if ((err = source_->on_publish()) != srs_success) { + return srs_error_wrap(err, "source publish"); + } + + return err; +} + +void SrsRtcToRtmpBridge::on_unpublish() +{ + // TODO: FIXME: Should sync with bridge? + source_->on_unpublish(); +} + +srs_error_t SrsRtcToRtmpBridge::on_frame(SrsSharedPtrMessage* frame) +{ + return source_->on_frame(frame); +} + +SrsRtcFrameBuilder::SrsRtcFrameBuilder(ISrsRtcSourceBridge* bridge) +{ + bridge_ = bridge; + is_first_audio_ = true; codec_ = NULL; - is_first_audio = true; - is_first_video = true; - format = NULL; - rtp_key_frame_ts_ = -1; header_sn_ = 0; memset(cache_video_pkts_, 0, sizeof(cache_video_pkts_)); + rtp_key_frame_ts_ = -1; } -SrsRtcToRtmpBridge::~SrsRtcToRtmpBridge() +SrsRtcFrameBuilder::~SrsRtcFrameBuilder() { srs_freep(codec_); - srs_freep(format); clear_cached_video(); } -srs_error_t SrsRtcToRtmpBridge::initialize(SrsRequest* r) +srs_error_t SrsRtcFrameBuilder::initialize(SrsRequest* r) { srs_error_t err = srs_success; + srs_freep(codec_); codec_ = new SrsAudioTranscoder(); - format = new SrsRtmpFormat(); SrsAudioCodecId from = SrsAudioCodecIdOpus; // TODO: From SDP? SrsAudioCodecId to = SrsAudioCodecIdAAC; // The output audio codec. @@ -1364,32 +1414,21 @@ srs_error_t SrsRtcToRtmpBridge::initialize(SrsRequest* r) return srs_error_wrap(err, "bridge initialize"); } - if ((err = format->initialize()) != srs_success) { - return srs_error_wrap(err, "format initialize"); - } - - // Setup the SPS/PPS parsing strategy. - format->try_annexb_first = _srs_config->try_annexb_first(r->vhost); - return err; } -srs_error_t SrsRtcToRtmpBridge::on_publish() +srs_error_t SrsRtcFrameBuilder::on_publish() { - srs_error_t err = srs_success; - - is_first_audio = true; - is_first_video = true; + is_first_audio_ = true; - // TODO: FIXME: Should sync with bridge? - if ((err = source_->on_publish()) != srs_success) { - return srs_error_wrap(err, "source publish"); - } + return srs_success; +} - return err; +void SrsRtcFrameBuilder::on_unpublish() +{ } -srs_error_t SrsRtcToRtmpBridge::on_rtp(SrsRtpPacket *pkt) +srs_error_t SrsRtcFrameBuilder::on_rtp(SrsRtpPacket *pkt) { srs_error_t err = srs_success; @@ -1397,7 +1436,7 @@ srs_error_t SrsRtcToRtmpBridge::on_rtp(SrsRtpPacket *pkt) return err; } - // Have no received any sender report, can't calculate avsync_time, + // Have no received any sender report, can't calculate avsync_time, // discard it to avoid timestamp problem in live source if (pkt->get_avsync_time() <= 0) { return err; @@ -1412,31 +1451,30 @@ srs_error_t SrsRtcToRtmpBridge::on_rtp(SrsRtpPacket *pkt) return err; } -void SrsRtcToRtmpBridge::on_unpublish() -{ - // TODO: FIXME: Should sync with bridge? - source_->on_unpublish(); -} - -srs_error_t SrsRtcToRtmpBridge::transcode_audio(SrsRtpPacket *pkt) +srs_error_t SrsRtcFrameBuilder::transcode_audio(SrsRtpPacket *pkt) { srs_error_t err = srs_success; // to common message. uint32_t ts = pkt->get_avsync_time(); - if (is_first_audio) { + if (is_first_audio_) { int header_len = 0; uint8_t* header = NULL; codec_->aac_codec_header(&header, &header_len); SrsCommonMessage out_rtmp; - packet_aac(&out_rtmp, (char *)header, header_len, ts, is_first_audio); + packet_aac(&out_rtmp, (char *)header, header_len, ts, is_first_audio_); - if ((err = source_->on_audio(&out_rtmp)) != srs_success) { + SrsSharedPtrMessage msg; + if ((err = msg.create(&out_rtmp)) != srs_success) { + return srs_error_wrap(err, "create message"); + } + + if ((err = bridge_->on_frame(&msg)) != srs_success) { return srs_error_wrap(err, "source on audio"); } - is_first_audio = false; + is_first_audio_ = false; } std::vector out_pkts; @@ -1455,9 +1493,14 @@ srs_error_t SrsRtcToRtmpBridge::transcode_audio(SrsRtpPacket *pkt) for (std::vector::iterator it = out_pkts.begin(); it != out_pkts.end(); ++it) { SrsCommonMessage out_rtmp; out_rtmp.header.timestamp = (*it)->dts; - packet_aac(&out_rtmp, (*it)->samples[0].bytes, (*it)->samples[0].size, ts, is_first_audio); + packet_aac(&out_rtmp, (*it)->samples[0].bytes, (*it)->samples[0].size, ts, is_first_audio_); - if ((err = source_->on_audio(&out_rtmp)) != srs_success) { + SrsSharedPtrMessage msg; + if ((err = msg.create(&out_rtmp)) != srs_success) { + return srs_error_wrap(err, "create message"); + } + + if ((err = bridge_->on_frame(&msg)) != srs_success) { err = srs_error_wrap(err, "source on audio"); break; } @@ -1467,7 +1510,7 @@ srs_error_t SrsRtcToRtmpBridge::transcode_audio(SrsRtpPacket *pkt) return err; } -void SrsRtcToRtmpBridge::packet_aac(SrsCommonMessage* audio, char* data, int len, uint32_t pts, bool is_header) +void SrsRtcFrameBuilder::packet_aac(SrsCommonMessage* audio, char* data, int len, uint32_t pts, bool is_header) { int rtmp_len = len + 2; audio->header.initialize_audio(rtmp_len, pts, 1); @@ -1484,7 +1527,7 @@ void SrsRtcToRtmpBridge::packet_aac(SrsCommonMessage* audio, char* data, int len audio->size = rtmp_len; } -srs_error_t SrsRtcToRtmpBridge::packet_video(SrsRtpPacket* src) +srs_error_t SrsRtcFrameBuilder::packet_video(SrsRtpPacket* src) { srs_error_t err = srs_success; @@ -1524,7 +1567,7 @@ srs_error_t SrsRtcToRtmpBridge::packet_video(SrsRtpPacket* src) return err; } -srs_error_t SrsRtcToRtmpBridge::packet_video_key_frame(SrsRtpPacket* pkt) +srs_error_t SrsRtcFrameBuilder::packet_video_key_frame(SrsRtpPacket* pkt) { srs_error_t err = srs_success; @@ -1549,7 +1592,7 @@ srs_error_t SrsRtcToRtmpBridge::packet_video_key_frame(SrsRtpPacket* pkt) char* flv = NULL; int nb_flv = 0; if ((err = avc->mux_avc2flv(sh, SrsVideoAvcFrameTypeKeyFrame, SrsVideoAvcFrameTraitSequenceHeader, pkt->get_avsync_time(), - pkt->get_avsync_time(), &flv, &nb_flv)) != srs_success) { + pkt->get_avsync_time(), &flv, &nb_flv)) != srs_success) { return srs_error_wrap(err, "avc to flv"); } @@ -1560,7 +1603,12 @@ srs_error_t SrsRtcToRtmpBridge::packet_video_key_frame(SrsRtpPacket* pkt) return srs_error_wrap(err, "create rtmp"); } - if ((err = source_->on_video(&rtmp)) != srs_success) { + SrsSharedPtrMessage msg; + if ((err = msg.create(&rtmp)) != srs_success) { + return srs_error_wrap(err, "create message"); + } + + if ((err = bridge_->on_frame(&msg)) != srs_success) { return err; } } @@ -1583,7 +1631,7 @@ srs_error_t SrsRtcToRtmpBridge::packet_video_key_frame(SrsRtpPacket* pkt) lost_sn_ = header_sn_ + 1; clear_cached_video(); srs_warn("drop old ts=%u, header=%hu, lost=%hu, set new ts=%u, header=%hu, lost=%hu", - (uint32_t)old_ts, old_header_sn, old_lost_sn, (uint32_t)rtp_key_frame_ts_, header_sn_, lost_sn_); + (uint32_t)old_ts, old_header_sn, old_lost_sn, (uint32_t)rtp_key_frame_ts_, header_sn_, lost_sn_); } uint16_t index = cache_index(pkt->header.get_sequence()); @@ -1619,7 +1667,7 @@ srs_error_t SrsRtcToRtmpBridge::packet_video_key_frame(SrsRtpPacket* pkt) return err; } -srs_error_t SrsRtcToRtmpBridge::packet_video_rtmp(const uint16_t start, const uint16_t end) +srs_error_t SrsRtcFrameBuilder::packet_video_rtmp(const uint16_t start, const uint16_t end) { srs_error_t err = srs_success; @@ -1649,7 +1697,7 @@ srs_error_t SrsRtcToRtmpBridge::packet_video_rtmp(const uint16_t start, const ui if (stap_payload) { for (int j = 0; j < (int)stap_payload->nalus.size(); ++j) { SrsSample* sample = stap_payload->nalus.at(j); - if (sample->size > 0) { + if (sample->size > 0) { nb_payload += 4 + sample->size; } } @@ -1667,7 +1715,7 @@ srs_error_t SrsRtcToRtmpBridge::packet_video_rtmp(const uint16_t start, const ui srs_warn("empty nalu"); return err; } - + //type_codec1 + avc_type + composition time + nalu size + nalu nb_payload += 1 + 1 + 3; @@ -1728,7 +1776,7 @@ srs_error_t SrsRtcToRtmpBridge::packet_video_rtmp(const uint16_t start, const ui if (stap_payload) { for (int j = 0; j < (int)stap_payload->nalus.size(); ++j) { SrsSample* sample = stap_payload->nalus.at(j); - if (sample->size > 0) { + if (sample->size > 0) { payload.write_4bytes(sample->size); payload.write_bytes(sample->bytes, sample->size); } @@ -1748,7 +1796,12 @@ srs_error_t SrsRtcToRtmpBridge::packet_video_rtmp(const uint16_t start, const ui srs_freep(pkt); } - if ((err = source_->on_video(&rtmp)) != srs_success) { + SrsSharedPtrMessage msg; + if ((err = msg.create(&rtmp)) != srs_success) { + return srs_error_wrap(err, "create message"); + } + + if ((err = bridge_->on_frame(&msg)) != srs_success) { srs_warn("fail to pack video frame"); } @@ -1768,7 +1821,7 @@ srs_error_t SrsRtcToRtmpBridge::packet_video_rtmp(const uint16_t start, const ui return err; } -int32_t SrsRtcToRtmpBridge::find_next_lost_sn(uint16_t current_sn, uint16_t& end_sn) +int32_t SrsRtcFrameBuilder::find_next_lost_sn(uint16_t current_sn, uint16_t& end_sn) { uint32_t last_rtp_ts = cache_video_pkts_[cache_index(header_sn_)].rtp_ts; for (int i = 0; i < s_cache_size; ++i) { @@ -1794,7 +1847,7 @@ int32_t SrsRtcToRtmpBridge::find_next_lost_sn(uint16_t current_sn, uint16_t& end return -2; } -void SrsRtcToRtmpBridge::clear_cached_video() +void SrsRtcFrameBuilder::clear_cached_video() { for (size_t i = 0; i < s_cache_size; i++) { @@ -1808,7 +1861,7 @@ void SrsRtcToRtmpBridge::clear_cached_video() } } -bool SrsRtcToRtmpBridge::check_frame_complete(const uint16_t start, const uint16_t end) +bool SrsRtcFrameBuilder::check_frame_complete(const uint16_t start, const uint16_t end) { int16_t cnt = srs_rtp_seq_distance(start, end) + 1; srs_assert(cnt >= 1); diff --git a/trunk/src/app/srs_app_rtc_source.hpp b/trunk/src/app/srs_app_rtc_source.hpp index f4901cf2f6..3dd4c777e9 100644 --- a/trunk/src/app/srs_app_rtc_source.hpp +++ b/trunk/src/app/srs_app_rtc_source.hpp @@ -38,6 +38,7 @@ class SrsRtpRingBuffer; class SrsRtpNackForReceiver; class SrsJsonObject; class SrsErrorPithyPrint; +class SrsRtcFrameBuilder; class SrsNtp { @@ -153,7 +154,7 @@ class ISrsRtcSourceBridge virtual ~ISrsRtcSourceBridge(); public: virtual srs_error_t on_publish() = 0; - virtual srs_error_t on_rtp(SrsRtpPacket *pkt) = 0; + virtual srs_error_t on_frame(SrsSharedPtrMessage* frame) = 0; virtual void on_unpublish() = 0; }; @@ -172,6 +173,9 @@ class SrsRtcSource : public ISrsFastTimer ISrsRtcPublishStream* publish_stream_; // Steam description for this steam. SrsRtcSourceDescription* stream_desc_; +private: + // Collect and build WebRTC RTP packets to AV frames. + SrsRtcFrameBuilder* frame_builder_; // The Source bridge, bridge stream to other source. ISrsRtcSourceBridge* bridge_; private: @@ -245,6 +249,8 @@ class SrsRtcSource : public ISrsFastTimer }; #ifdef SRS_FFMPEG_FIT + +// A bridge to covert RTMP to WebRTC stream. class SrsRtmpToRtcBridge : public ISrsLiveSourceBridge { private: @@ -291,16 +297,32 @@ class SrsRtmpToRtcBridge : public ISrsLiveSourceBridge srs_error_t consume_packets(std::vector& pkts); }; +// A bridge to covert WebRTC to RTMP stream. class SrsRtcToRtmpBridge : public ISrsRtcSourceBridge { private: SrsLiveSource *source_; - SrsAudioTranscoder *codec_; - bool is_first_audio; - bool is_first_video; - // The format, codec information. - SrsRtmpFormat* format; +public: + SrsRtcToRtmpBridge(SrsLiveSource *src); + virtual ~SrsRtcToRtmpBridge(); +public: + srs_error_t initialize(SrsRequest* r); +public: + virtual srs_error_t on_publish(); + virtual void on_unpublish(); + virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); +}; +// Collect and build WebRTC RTP packets to AV frames. +class SrsRtcFrameBuilder +{ +private: + ISrsRtcSourceBridge* bridge_; +private: + bool is_first_audio_; + SrsAudioTranscoder *codec_; +private: + const static uint16_t s_cache_size = 512; //TODO:use SrsRtpRingBuffer //TODO:jitter buffer class struct RtcPacketCache { @@ -310,33 +332,33 @@ class SrsRtcToRtmpBridge : public ISrsRtcSourceBridge uint32_t rtp_ts; SrsRtpPacket* pkt; }; - const static uint16_t s_cache_size = 512; RtcPacketCache cache_video_pkts_[s_cache_size]; uint16_t header_sn_; uint16_t lost_sn_; int64_t rtp_key_frame_ts_; public: - SrsRtcToRtmpBridge(SrsLiveSource *src); - virtual ~SrsRtcToRtmpBridge(); + SrsRtcFrameBuilder(ISrsRtcSourceBridge* bridge); + virtual ~SrsRtcFrameBuilder(); public: srs_error_t initialize(SrsRequest* r); -public: virtual srs_error_t on_publish(); - virtual srs_error_t on_rtp(SrsRtpPacket *pkt); virtual void on_unpublish(); + virtual srs_error_t on_rtp(SrsRtpPacket *pkt); private: srs_error_t transcode_audio(SrsRtpPacket *pkt); void packet_aac(SrsCommonMessage* audio, char* data, int len, uint32_t pts, bool is_header); +private: srs_error_t packet_video(SrsRtpPacket* pkt); srs_error_t packet_video_key_frame(SrsRtpPacket* pkt); - srs_error_t packet_video_rtmp(const uint16_t start, const uint16_t end); - int32_t find_next_lost_sn(uint16_t current_sn, uint16_t& end_sn); - void clear_cached_video(); inline uint16_t cache_index(uint16_t current_sn) { return current_sn % s_cache_size; } + int32_t find_next_lost_sn(uint16_t current_sn, uint16_t& end_sn); bool check_frame_complete(const uint16_t start, const uint16_t end); + srs_error_t packet_video_rtmp(const uint16_t start, const uint16_t end); + void clear_cached_video(); }; + #endif // TODO: FIXME: Rename it. diff --git a/trunk/src/app/srs_app_source.cpp b/trunk/src/app/srs_app_source.cpp index 79440738fd..0b10f588f1 100755 --- a/trunk/src/app/srs_app_source.cpp +++ b/trunk/src/app/srs_app_source.cpp @@ -2243,31 +2243,42 @@ srs_error_t SrsLiveSource::on_meta_data(SrsCommonMessage* msg, SrsOnMetaDataPack srs_error_t SrsLiveSource::on_audio(SrsCommonMessage* shared_audio) { srs_error_t err = srs_success; - + // Detect where stream is monotonically increasing. if (!mix_correct && is_monotonically_increase) { if (last_packet_time > 0 && shared_audio->header.timestamp < last_packet_time) { is_monotonically_increase = false; srs_warn("AUDIO: Timestamp %" PRId64 "=>%" PRId64 ", may need mix_correct.", - last_packet_time, shared_audio->header.timestamp); + last_packet_time, shared_audio->header.timestamp); } } last_packet_time = shared_audio->header.timestamp; - + // convert shared_audio to msg, user should not use shared_audio again. // the payload is transfer to msg, and set to NULL in shared_audio. SrsSharedPtrMessage msg; if ((err = msg.create(shared_audio)) != srs_success) { return srs_error_wrap(err, "create message"); } + + return on_frame(&msg); +} + +srs_error_t SrsLiveSource::on_frame(SrsSharedPtrMessage* msg) +{ + srs_error_t err = srs_success; // directly process the audio message. if (!mix_correct) { - return on_audio_imp(&msg); + if (msg->is_audio()) { + return on_audio_imp(msg); + } else { + return on_video_imp(msg); + } } // insert msg to the queue. - mix_queue->push(msg.copy()); + mix_queue->push(msg->copy()); // fetch someone from mix queue. SrsSharedPtrMessage* m = mix_queue->pop(); @@ -2372,11 +2383,11 @@ srs_error_t SrsLiveSource::on_video(SrsCommonMessage* shared_video) if (last_packet_time > 0 && shared_video->header.timestamp < last_packet_time) { is_monotonically_increase = false; srs_warn("VIDEO: Timestamp %" PRId64 "=>%" PRId64 ", may need mix_correct.", - last_packet_time, shared_video->header.timestamp); + last_packet_time, shared_video->header.timestamp); } } last_packet_time = shared_video->header.timestamp; - + // drop any unknown header video. // @see https://github.com/ossrs/srs/issues/421 if (!SrsFlvVideo::acceptable(shared_video->payload, shared_video->size)) { @@ -2384,41 +2395,19 @@ srs_error_t SrsLiveSource::on_video(SrsCommonMessage* shared_video) if (shared_video->size > 0) { b0 = shared_video->payload[0]; } - + srs_warn("drop unknown header video, size=%d, bytes[0]=%#x", shared_video->size, b0); return err; } - + // convert shared_video to msg, user should not use shared_video again. // the payload is transfer to msg, and set to NULL in shared_video. SrsSharedPtrMessage msg; if ((err = msg.create(shared_video)) != srs_success) { return srs_error_wrap(err, "create message"); } - - // directly process the video message. - if (!mix_correct) { - return on_video_imp(&msg); - } - - // insert msg to the queue. - mix_queue->push(msg.copy()); - - // fetch someone from mix queue. - SrsSharedPtrMessage* m = mix_queue->pop(); - if (!m) { - return err; - } - - // consume the monotonically increase message. - if (m->is_audio()) { - err = on_audio_imp(m); - } else { - err = on_video_imp(m); - } - srs_freep(m); - - return err; + + return on_frame(&msg); } srs_error_t SrsLiveSource::on_video_imp(SrsSharedPtrMessage* msg) diff --git a/trunk/src/app/srs_app_source.hpp b/trunk/src/app/srs_app_source.hpp index e5348c1e57..d1452783d8 100644 --- a/trunk/src/app/srs_app_source.hpp +++ b/trunk/src/app/srs_app_source.hpp @@ -478,6 +478,7 @@ class ISrsLiveSourceBridge ISrsLiveSourceBridge(); virtual ~ISrsLiveSourceBridge(); public: + virtual srs_error_t initialize(SrsRequest* r) = 0; virtual srs_error_t on_publish() = 0; virtual srs_error_t on_frame(SrsSharedPtrMessage* frame) = 0; virtual void on_unpublish() = 0; @@ -568,6 +569,7 @@ class SrsLiveSource : public ISrsReloadHandler public: // TODO: FIXME: Use SrsSharedPtrMessage instead. virtual srs_error_t on_audio(SrsCommonMessage* audio); + srs_error_t on_frame(SrsSharedPtrMessage* msg); private: virtual srs_error_t on_audio_imp(SrsSharedPtrMessage* audio); public: From e6915880b2575011ed5f0935e54c72666a68c416 Mon Sep 17 00:00:00 2001 From: winlin Date: Sat, 28 Jan 2023 20:21:31 +0800 Subject: [PATCH 05/18] Fix build fail. --- trunk/src/app/srs_app_rtc_source.cpp | 12 ++++++++++++ trunk/src/app/srs_app_rtc_source.hpp | 2 ++ 2 files changed, 14 insertions(+) diff --git a/trunk/src/app/srs_app_rtc_source.cpp b/trunk/src/app/srs_app_rtc_source.cpp index 6f11d8446d..46d3c2fed3 100644 --- a/trunk/src/app/srs_app_rtc_source.cpp +++ b/trunk/src/app/srs_app_rtc_source.cpp @@ -341,7 +341,9 @@ SrsRtcSource::SrsRtcSource() req = NULL; bridge_ = NULL; +#ifdef SRS_FFMPEG_FIT frame_builder_ = NULL; +#endif pli_for_rtmp_ = pli_elapsed_ = 0; } @@ -352,7 +354,9 @@ SrsRtcSource::~SrsRtcSource() // for all consumers are auto free. consumers.clear(); +#ifdef SRS_FFMPEG_FIT srs_freep(frame_builder_); +#endif srs_freep(bridge_); srs_freep(req); srs_freep(stream_desc_); @@ -471,8 +475,10 @@ void SrsRtcSource::set_bridge(ISrsRtcSourceBridge* bridge) srs_freep(bridge_); bridge_ = bridge; +#ifdef SRS_FFMPEG_FIT srs_freep(frame_builder_); frame_builder_ = new SrsRtcFrameBuilder(bridge); +#endif } srs_error_t SrsRtcSource::create_consumer(SrsRtcConsumer*& consumer) @@ -546,6 +552,7 @@ srs_error_t SrsRtcSource::on_publish() // If bridge to other source, handle event and start timer to request PLI. if (bridge_) { +#ifdef SRS_FFMPEG_FIT if ((err = frame_builder_->initialize(req)) != srs_success) { return srs_error_wrap(err, "frame builder initialize"); } @@ -553,6 +560,7 @@ srs_error_t SrsRtcSource::on_publish() if ((err = frame_builder_->on_publish()) != srs_success) { return srs_error_wrap(err, "frame builder on publish"); } +#endif if ((err = bridge_->on_publish()) != srs_success) { return srs_error_wrap(err, "bridge on publish"); @@ -598,8 +606,10 @@ void SrsRtcSource::on_unpublish() // For SrsRtcSource::on_timer() _srs_hybrid->timer100ms()->unsubscribe(this); +#ifdef SRS_FFMPEG_FIT frame_builder_->on_unpublish(); srs_freep(frame_builder_); +#endif bridge_->on_unpublish(); srs_freep(bridge_); @@ -652,9 +662,11 @@ srs_error_t SrsRtcSource::on_rtp(SrsRtpPacket* pkt) } } +#ifdef SRS_FFMPEG_FIT if (frame_builder_ && (err = frame_builder_->on_rtp(pkt)) != srs_success) { return srs_error_wrap(err, "frame builder consume packet"); } +#endif return err; } diff --git a/trunk/src/app/srs_app_rtc_source.hpp b/trunk/src/app/srs_app_rtc_source.hpp index 3dd4c777e9..8fbff12065 100644 --- a/trunk/src/app/srs_app_rtc_source.hpp +++ b/trunk/src/app/srs_app_rtc_source.hpp @@ -174,8 +174,10 @@ class SrsRtcSource : public ISrsFastTimer // Steam description for this steam. SrsRtcSourceDescription* stream_desc_; private: +#ifdef SRS_FFMPEG_FIT // Collect and build WebRTC RTP packets to AV frames. SrsRtcFrameBuilder* frame_builder_; +#endif // The Source bridge, bridge stream to other source. ISrsRtcSourceBridge* bridge_; private: From 6b0e711b99eaf2e65d69eb5b6d5221c99785d9a5 Mon Sep 17 00:00:00 2001 From: winlin Date: Sat, 28 Jan 2023 21:34:55 +0800 Subject: [PATCH 06/18] Extract frame builder for SRT to RTMP bridge. --- trunk/src/app/srs_app_srt_source.cpp | 156 ++++++++++++++++++++------- trunk/src/app/srs_app_srt_source.hpp | 38 +++++-- 2 files changed, 143 insertions(+), 51 deletions(-) diff --git a/trunk/src/app/srs_app_srt_source.cpp b/trunk/src/app/srs_app_srt_source.cpp index 0a620ff2a6..26fc78002e 100644 --- a/trunk/src/app/srs_app_srt_source.cpp +++ b/trunk/src/app/srs_app_srt_source.cpp @@ -243,7 +243,42 @@ ISrsSrtSourceBridge::~ISrsSrtSourceBridge() { } -SrsSrtToRtmpBridge::SrsSrtToRtmpBridge(SrsLiveSource* source) : ISrsSrtSourceBridge() +SrsSrtToRtmpBridge::SrsSrtToRtmpBridge(SrsLiveSource* source) +{ + live_source_ = source; +} + +SrsSrtToRtmpBridge::~SrsSrtToRtmpBridge() +{ +} + +srs_error_t SrsSrtToRtmpBridge::on_publish() +{ + srs_error_t err = srs_success; + + if ((err = live_source_->on_publish()) != srs_success) { + return srs_error_wrap(err, "on publish"); + } + + return err; +} + +srs_error_t SrsSrtToRtmpBridge::on_frame(SrsSharedPtrMessage* frame) +{ + return live_source_->on_frame(frame); +} + +void SrsSrtToRtmpBridge::on_unpublish() +{ + live_source_->on_unpublish(); +} + +srs_error_t SrsSrtToRtmpBridge::initialize(SrsRequest* req) +{ + return srs_success; +} + +SrsSrtFrameBuilder::SrsSrtFrameBuilder(ISrsSrtSourceBridge* bridge) { ts_ctx_ = new SrsTsContext(); @@ -252,7 +287,7 @@ SrsSrtToRtmpBridge::SrsSrtToRtmpBridge(SrsLiveSource* source) : ISrsSrtSourceBri pps_ = ""; req_ = NULL; - live_source_ = source; + bridge_ = bridge; video_streamid_ = 1; audio_streamid_ = 2; @@ -260,7 +295,7 @@ SrsSrtToRtmpBridge::SrsSrtToRtmpBridge(SrsLiveSource* source) : ISrsSrtSourceBri pp_audio_duration_ = new SrsAlonePithyPrint(); } -SrsSrtToRtmpBridge::~SrsSrtToRtmpBridge() +SrsSrtFrameBuilder::~SrsSrtFrameBuilder() { srs_freep(ts_ctx_); srs_freep(req_); @@ -268,18 +303,12 @@ SrsSrtToRtmpBridge::~SrsSrtToRtmpBridge() srs_freep(pp_audio_duration_); } -srs_error_t SrsSrtToRtmpBridge::on_publish() +srs_error_t SrsSrtFrameBuilder::on_publish() { - srs_error_t err = srs_success; - - if ((err = live_source_->on_publish()) != srs_success) { - return srs_error_wrap(err, "on publish"); - } - - return err; + return srs_success; } -srs_error_t SrsSrtToRtmpBridge::on_packet(SrsSrtPacket *pkt) +srs_error_t SrsSrtFrameBuilder::on_packet(SrsSrtPacket *pkt) { srs_error_t err = srs_success; @@ -290,10 +319,10 @@ srs_error_t SrsSrtToRtmpBridge::on_packet(SrsSrtPacket *pkt) int nb_packet = nb_buf / SRS_TS_PACKET_SIZE; for (int i = 0; i < nb_packet; i++) { char* p = buf + (i * SRS_TS_PACKET_SIZE); - + SrsBuffer* stream = new SrsBuffer(p, SRS_TS_PACKET_SIZE); SrsAutoFree(SrsBuffer, stream); - + // Process each ts packet. Note that the jitter of UDP may cause video glitch when packet loss or wrong seq. We // don't handle it because SRT will, see tlpktdrop at https://ossrs.net/lts/zh-cn/docs/v4/doc/srt-params if ((err = ts_ctx_->decode(stream, this)) != srs_success) { @@ -302,16 +331,15 @@ srs_error_t SrsSrtToRtmpBridge::on_packet(SrsSrtPacket *pkt) continue; } } - + return err; } -void SrsSrtToRtmpBridge::on_unpublish() +void SrsSrtFrameBuilder::on_unpublish() { - live_source_->on_unpublish(); } -srs_error_t SrsSrtToRtmpBridge::initialize(SrsRequest* req) +srs_error_t SrsSrtFrameBuilder::initialize(SrsRequest* req) { srs_error_t err = srs_success; @@ -321,7 +349,7 @@ srs_error_t SrsSrtToRtmpBridge::initialize(SrsRequest* req) return err; } -srs_error_t SrsSrtToRtmpBridge::on_ts_message(SrsTsMessage* msg) +srs_error_t SrsSrtFrameBuilder::on_ts_message(SrsTsMessage* msg) { srs_error_t err = srs_success; @@ -369,7 +397,7 @@ srs_error_t SrsSrtToRtmpBridge::on_ts_message(SrsTsMessage* msg) return err; } -srs_error_t SrsSrtToRtmpBridge::on_ts_video_avc(SrsTsMessage* msg, SrsBuffer* avs) +srs_error_t SrsSrtFrameBuilder::on_ts_video_avc(SrsTsMessage* msg, SrsBuffer* avs) { srs_error_t err = srs_success; @@ -430,7 +458,7 @@ srs_error_t SrsSrtToRtmpBridge::on_ts_video_avc(SrsTsMessage* msg, SrsBuffer* av return on_h264_frame(msg, ipb_frames); } -srs_error_t SrsSrtToRtmpBridge::check_sps_pps_change(SrsTsMessage* msg) +srs_error_t SrsSrtFrameBuilder::check_sps_pps_change(SrsTsMessage* msg) { srs_error_t err = srs_success; @@ -470,14 +498,19 @@ srs_error_t SrsSrtToRtmpBridge::check_sps_pps_change(SrsTsMessage* msg) return srs_error_wrap(err, "create rtmp"); } - if ((err = live_source_->on_video(&rtmp)) != srs_success) { + SrsSharedPtrMessage frame; + if ((err = frame.create(&rtmp)) != srs_success) { + return srs_error_wrap(err, "create frame"); + } + + if ((err = bridge_->on_frame(&frame)) != srs_success) { return srs_error_wrap(err, "srt to rtmp sps/pps"); } return err; } -srs_error_t SrsSrtToRtmpBridge::on_h264_frame(SrsTsMessage* msg, vector >& ipb_frames) +srs_error_t SrsSrtFrameBuilder::on_h264_frame(SrsTsMessage* msg, vector >& ipb_frames) { srs_error_t err = srs_success; @@ -526,7 +559,12 @@ srs_error_t SrsSrtToRtmpBridge::on_h264_frame(SrsTsMessage* msg, vectoron_video(&rtmp)) != srs_success) { + SrsSharedPtrMessage frame; + if ((err = frame.create(&rtmp)) != srs_success) { + return srs_error_wrap(err, "create frame"); + } + + if ((err = bridge_->on_frame(&frame)) != srs_success) { return srs_error_wrap(err ,"srt ts video to rtmp"); } @@ -534,7 +572,7 @@ srs_error_t SrsSrtToRtmpBridge::on_h264_frame(SrsTsMessage* msg, vectoron_video(&rtmp)) != srs_success) { + SrsSharedPtrMessage frame; + if ((err = frame.create(&rtmp)) != srs_success) { + return srs_error_wrap(err, "create frame"); + } + + if ((err = bridge_->on_frame(&frame)) != srs_success) { return srs_error_wrap(err, "srt to rtmp sps/pps"); } return err; } -srs_error_t SrsSrtToRtmpBridge::on_hevc_frame(SrsTsMessage* msg, vector >& ipb_frames) +srs_error_t SrsSrtFrameBuilder::on_hevc_frame(SrsTsMessage* msg, vector >& ipb_frames) { srs_error_t err = srs_success; @@ -713,7 +756,12 @@ srs_error_t SrsSrtToRtmpBridge::on_hevc_frame(SrsTsMessage* msg, vectoron_video(&rtmp)) != srs_success) { + SrsSharedPtrMessage frame; + if ((err = frame.create(&rtmp)) != srs_success) { + return srs_error_wrap(err, "create frame"); + } + + if ((err = bridge_->on_frame(&frame)) != srs_success) { return srs_error_wrap(err ,"srt ts hevc video to rtmp"); } @@ -721,7 +769,7 @@ srs_error_t SrsSrtToRtmpBridge::on_hevc_frame(SrsTsMessage* msg, vectoron_audio(&rtmp)) != srs_success) { + if ((err = bridge_->on_frame(&frame)) != srs_success) { return srs_error_wrap(err, "srt to rtmp audio sh"); } return err; } -srs_error_t SrsSrtToRtmpBridge::on_aac_frame(SrsTsMessage* msg, uint32_t pts, char* frame, int frame_size) +srs_error_t SrsSrtFrameBuilder::on_aac_frame(SrsTsMessage* msg, uint32_t pts, char* data, int data_size) { srs_error_t err = srs_success; - int rtmp_len = frame_size + 2/* 2 bytes of flv audio tag header*/; + int rtmp_len = data_size + 2/* 2 bytes of flv audio tag header*/; SrsCommonMessage rtmp; rtmp.header.initialize_audio(rtmp_len, pts, audio_streamid_); @@ -842,9 +895,14 @@ srs_error_t SrsSrtToRtmpBridge::on_aac_frame(SrsTsMessage* msg, uint32_t pts, ch stream.write_1bytes(aac_flag); stream.write_1bytes(1); // Write audio frame. - stream.write_bytes(frame, frame_size); + stream.write_bytes(data, data_size); + + SrsSharedPtrMessage frame; + if ((err = frame.create(&rtmp)) != srs_success) { + return srs_error_wrap(err, "create frame"); + } - if ((err = live_source_->on_audio(&rtmp)) != srs_success) { + if ((err = bridge_->on_frame(&frame)) != srs_success) { return srs_error_wrap(err, "srt to rtmp audio sh"); } @@ -855,6 +913,7 @@ SrsSrtSource::SrsSrtSource() { req = NULL; can_publish_ = true; + frame_builder_ = NULL; bridge_ = NULL; } @@ -864,6 +923,7 @@ SrsSrtSource::~SrsSrtSource() // for all consumers are auto free. consumers.clear(); + srs_freep(frame_builder_); srs_freep(bridge_); } @@ -918,6 +978,9 @@ void SrsSrtSource::set_bridge(ISrsSrtSourceBridge* bridge) { srs_freep(bridge_); bridge_ = bridge; + + srs_freep(frame_builder_); + frame_builder_ = new SrsSrtFrameBuilder(bridge); } srs_error_t SrsSrtSource::create_consumer(SrsSrtConsumer*& consumer) @@ -964,8 +1027,18 @@ srs_error_t SrsSrtSource::on_publish() return srs_error_wrap(err, "source id change"); } - if (bridge_ && (err = bridge_->on_publish()) != srs_success) { - return srs_error_wrap(err, "bridge on publish"); + if (bridge_) { + if ((err = frame_builder_->initialize(req)) != srs_success) { + return srs_error_wrap(err, "frame builder initialize"); + } + + if ((err = frame_builder_->on_publish()) != srs_success) { + return srs_error_wrap(err, "frame builder on publish"); + } + + if ((err = bridge_->on_publish()) != srs_success) { + return srs_error_wrap(err, "bridge on publish"); + } } SrsStatistic* stat = SrsStatistic::instance(); @@ -984,9 +1057,12 @@ void SrsSrtSource::on_unpublish() can_publish_ = true; if (bridge_) { + frame_builder_->on_unpublish(); + srs_freep(frame_builder_); + bridge_->on_unpublish(); + srs_freep(bridge_); } - srs_freep(bridge_); } srs_error_t SrsSrtSource::on_packet(SrsSrtPacket* packet) @@ -1000,7 +1076,7 @@ srs_error_t SrsSrtSource::on_packet(SrsSrtPacket* packet) } } - if (bridge_ && (err = bridge_->on_packet(packet)) != srs_success) { + if (frame_builder_ && (err = frame_builder_->on_packet(packet)) != srs_success) { return srs_error_wrap(err, "bridge consume message"); } diff --git a/trunk/src/app/srs_app_srt_source.hpp b/trunk/src/app/srs_app_srt_source.hpp index 015426fb50..f940ca41c8 100644 --- a/trunk/src/app/srs_app_srt_source.hpp +++ b/trunk/src/app/srs_app_srt_source.hpp @@ -21,6 +21,7 @@ class SrsRequest; class SrsLiveSource; class SrsSrtSource; class SrsAlonePithyPrint; +class SrsSrtFrameBuilder; // The SRT packet with shared message. class SrsSrtPacket @@ -98,21 +99,38 @@ class ISrsSrtSourceBridge virtual ~ISrsSrtSourceBridge(); public: virtual srs_error_t on_publish() = 0; - virtual srs_error_t on_packet(SrsSrtPacket *pkt) = 0; + virtual srs_error_t on_frame(SrsSharedPtrMessage* frame) = 0; virtual void on_unpublish() = 0; }; -class SrsSrtToRtmpBridge : public ISrsSrtSourceBridge, public ISrsTsHandler +// A bridge to covert SRT to RTMP stream. +class SrsSrtToRtmpBridge : public ISrsSrtSourceBridge { public: SrsSrtToRtmpBridge(SrsLiveSource* source); virtual ~SrsSrtToRtmpBridge(); public: virtual srs_error_t on_publish(); - virtual srs_error_t on_packet(SrsSrtPacket *pkt); + virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); virtual void on_unpublish(); public: srs_error_t initialize(SrsRequest* req); +private: + SrsLiveSource* live_source_; +}; + +// Collect and build SRT TS packet to AV frames. +class SrsSrtFrameBuilder : public ISrsTsHandler +{ +public: + SrsSrtFrameBuilder(ISrsSrtSourceBridge* bridge); + virtual ~SrsSrtFrameBuilder(); +public: + srs_error_t initialize(SrsRequest* r); +public: + virtual srs_error_t on_publish(); + virtual srs_error_t on_packet(SrsSrtPacket* pkt); + virtual void on_unpublish(); // Interface ISrsTsHandler public: virtual srs_error_t on_ts_message(SrsTsMessage* msg); @@ -123,35 +141,31 @@ class SrsSrtToRtmpBridge : public ISrsSrtSourceBridge, public ISrsTsHandler srs_error_t on_h264_frame(SrsTsMessage* msg, std::vector >& ipb_frames); srs_error_t check_audio_sh_change(SrsTsMessage* msg, uint32_t pts); srs_error_t on_aac_frame(SrsTsMessage* msg, uint32_t pts, char* frame, int frame_size); - #ifdef SRS_H265 srs_error_t on_ts_video_hevc(SrsTsMessage *msg, SrsBuffer *avs); srs_error_t check_vps_sps_pps_change(SrsTsMessage *msg); srs_error_t on_hevc_frame(SrsTsMessage *msg, std::vector> &ipb_frames); #endif - +private: + ISrsSrtSourceBridge* bridge_; private: SrsTsContext* ts_ctx_; - // Record sps/pps had changed, if change, need to generate new video sh frame. bool sps_pps_change_; std::string sps_; std::string pps_; - #ifdef SRS_H265 bool vps_sps_pps_change_; std::string hevc_vps_; std::string hevc_sps_; std::string hevc_pps_; #endif - // Record audio sepcific config had changed, if change, need to generate new audio sh frame. bool audio_sh_change_; std::string audio_sh_; - +private: SrsRequest* req_; - SrsLiveSource* live_source_; - +private: // SRT to rtmp, video stream id. int video_streamid_; // SRT to rtmp, audio stream id. @@ -201,6 +215,8 @@ class SrsSrtSource // To delivery packets to clients. std::vector consumers; bool can_publish_; +private: + SrsSrtFrameBuilder* frame_builder_; ISrsSrtSourceBridge* bridge_; }; From 9575d06b1f4ab92762d147e21edcd78106f37a67 Mon Sep 17 00:00:00 2001 From: winlin Date: Sat, 28 Jan 2023 21:55:11 +0800 Subject: [PATCH 07/18] Extract stream bridge interface for all bridge. --- trunk/configure | 2 +- trunk/src/app/srs_app_rtc_source.cpp | 12 ++-------- trunk/src/app/srs_app_rtc_source.hpp | 29 ++++++++--------------- trunk/src/app/srs_app_source.cpp | 10 +------- trunk/src/app/srs_app_source.hpp | 18 +++----------- trunk/src/app/srs_app_srt_source.cpp | 12 ++-------- trunk/src/app/srs_app_srt_source.hpp | 23 +++++------------- trunk/src/app/srs_app_stream_bridge.cpp | 16 +++++++++++++ trunk/src/app/srs_app_stream_bridge.hpp | 31 +++++++++++++++++++++++++ 9 files changed, 72 insertions(+), 81 deletions(-) create mode 100644 trunk/src/app/srs_app_stream_bridge.cpp create mode 100644 trunk/src/app/srs_app_stream_bridge.hpp diff --git a/trunk/configure b/trunk/configure index 29b10e998d..d35e13b802 100755 --- a/trunk/configure +++ b/trunk/configure @@ -294,7 +294,7 @@ if [[ $SRS_FFMPEG_FIT == YES ]]; then fi MODULE_FILES=("srs_app_server" "srs_app_conn" "srs_app_rtmp_conn" "srs_app_source" "srs_app_refer" "srs_app_hls" "srs_app_forward" "srs_app_encoder" "srs_app_http_stream" - "srs_app_st" "srs_app_log" "srs_app_config" + "srs_app_st" "srs_app_log" "srs_app_config" "srs_app_stream_bridge" "srs_app_pithy_print" "srs_app_reload" "srs_app_http_api" "srs_app_http_conn" "srs_app_http_hooks" "srs_app_ingest" "srs_app_ffmpeg" "srs_app_utility" "srs_app_edge" "srs_app_heartbeat" "srs_app_empty" "srs_app_http_client" "srs_app_http_static" diff --git a/trunk/src/app/srs_app_rtc_source.cpp b/trunk/src/app/srs_app_rtc_source.cpp index 46d3c2fed3..ceba9aafb6 100644 --- a/trunk/src/app/srs_app_rtc_source.cpp +++ b/trunk/src/app/srs_app_rtc_source.cpp @@ -323,14 +323,6 @@ ISrsRtcSourceEventHandler::~ISrsRtcSourceEventHandler() { } -ISrsRtcSourceBridge::ISrsRtcSourceBridge() -{ -} - -ISrsRtcSourceBridge::~ISrsRtcSourceBridge() -{ -} - SrsRtcSource::SrsRtcSource() { is_created_ = false; @@ -470,7 +462,7 @@ SrsContextId SrsRtcSource::pre_source_id() return _pre_source_id; } -void SrsRtcSource::set_bridge(ISrsRtcSourceBridge* bridge) +void SrsRtcSource::set_bridge(ISrsStreamBridge* bridge) { srs_freep(bridge_); bridge_ = bridge; @@ -1394,7 +1386,7 @@ srs_error_t SrsRtcToRtmpBridge::on_frame(SrsSharedPtrMessage* frame) return source_->on_frame(frame); } -SrsRtcFrameBuilder::SrsRtcFrameBuilder(ISrsRtcSourceBridge* bridge) +SrsRtcFrameBuilder::SrsRtcFrameBuilder(ISrsStreamBridge* bridge) { bridge_ = bridge; is_first_audio_ = true; diff --git a/trunk/src/app/srs_app_rtc_source.hpp b/trunk/src/app/srs_app_rtc_source.hpp index 8fbff12065..5ff5ee803a 100644 --- a/trunk/src/app/srs_app_rtc_source.hpp +++ b/trunk/src/app/srs_app_rtc_source.hpp @@ -18,8 +18,10 @@ #include #include -#include #include +#include +#include +#include class SrsRequest; class SrsMetaCache; @@ -39,6 +41,7 @@ class SrsRtpNackForReceiver; class SrsJsonObject; class SrsErrorPithyPrint; class SrsRtcFrameBuilder; +class SrsLiveSource; class SrsNtp { @@ -146,18 +149,6 @@ class ISrsRtcSourceEventHandler virtual void on_consumers_finished() = 0; }; -// SrsRtcSource bridge to SrsLiveSource -class ISrsRtcSourceBridge -{ -public: - ISrsRtcSourceBridge(); - virtual ~ISrsRtcSourceBridge(); -public: - virtual srs_error_t on_publish() = 0; - virtual srs_error_t on_frame(SrsSharedPtrMessage* frame) = 0; - virtual void on_unpublish() = 0; -}; - // A Source is a stream, to publish and to play with, binding to SrsRtcPublishStream and SrsRtcPlayStream. class SrsRtcSource : public ISrsFastTimer { @@ -179,7 +170,7 @@ class SrsRtcSource : public ISrsFastTimer SrsRtcFrameBuilder* frame_builder_; #endif // The Source bridge, bridge stream to other source. - ISrsRtcSourceBridge* bridge_; + ISrsStreamBridge* bridge_; private: // To delivery stream to clients. std::vector consumers; @@ -211,7 +202,7 @@ class SrsRtcSource : public ISrsFastTimer virtual SrsContextId source_id(); virtual SrsContextId pre_source_id(); public: - void set_bridge(ISrsRtcSourceBridge* bridge); + void set_bridge(ISrsStreamBridge* bridge); public: // Create consumer // @param consumer, output the create consumer. @@ -253,7 +244,7 @@ class SrsRtcSource : public ISrsFastTimer #ifdef SRS_FFMPEG_FIT // A bridge to covert RTMP to WebRTC stream. -class SrsRtmpToRtcBridge : public ISrsLiveSourceBridge +class SrsRtmpToRtcBridge : public ISrsStreamBridge { private: SrsRequest* req; @@ -300,7 +291,7 @@ class SrsRtmpToRtcBridge : public ISrsLiveSourceBridge }; // A bridge to covert WebRTC to RTMP stream. -class SrsRtcToRtmpBridge : public ISrsRtcSourceBridge +class SrsRtcToRtmpBridge : public ISrsStreamBridge { private: SrsLiveSource *source_; @@ -319,7 +310,7 @@ class SrsRtcToRtmpBridge : public ISrsRtcSourceBridge class SrsRtcFrameBuilder { private: - ISrsRtcSourceBridge* bridge_; + ISrsStreamBridge* bridge_; private: bool is_first_audio_; SrsAudioTranscoder *codec_; @@ -339,7 +330,7 @@ class SrsRtcFrameBuilder uint16_t lost_sn_; int64_t rtp_key_frame_ts_; public: - SrsRtcFrameBuilder(ISrsRtcSourceBridge* bridge); + SrsRtcFrameBuilder(ISrsStreamBridge* bridge); virtual ~SrsRtcFrameBuilder(); public: srs_error_t initialize(SrsRequest* r); diff --git a/trunk/src/app/srs_app_source.cpp b/trunk/src/app/srs_app_source.cpp index 0b10f588f1..f154b6ea50 100755 --- a/trunk/src/app/srs_app_source.cpp +++ b/trunk/src/app/srs_app_source.cpp @@ -1910,14 +1910,6 @@ void SrsLiveSourceManager::destroy() pool.clear(); } -ISrsLiveSourceBridge::ISrsLiveSourceBridge() -{ -} - -ISrsLiveSourceBridge::~ISrsLiveSourceBridge() -{ -} - SrsLiveSource::SrsLiveSource() { req = NULL; @@ -2046,7 +2038,7 @@ srs_error_t SrsLiveSource::initialize(SrsRequest* r, ISrsLiveSourceHandler* h) return err; } -void SrsLiveSource::set_bridge(ISrsLiveSourceBridge* v) +void SrsLiveSource::set_bridge(ISrsStreamBridge* v) { srs_freep(bridge_); bridge_ = v; diff --git a/trunk/src/app/srs_app_source.hpp b/trunk/src/app/srs_app_source.hpp index d1452783d8..c248d40ebc 100644 --- a/trunk/src/app/srs_app_source.hpp +++ b/trunk/src/app/srs_app_source.hpp @@ -18,6 +18,7 @@ #include #include #include +#include class SrsFormat; class SrsRtmpFormat; @@ -471,19 +472,6 @@ class SrsLiveSourceManager : public ISrsHourGlass // Global singleton instance. extern SrsLiveSourceManager* _srs_sources; -// For RTMP2RTC, bridge SrsLiveSource to SrsRtcSource -class ISrsLiveSourceBridge -{ -public: - ISrsLiveSourceBridge(); - virtual ~ISrsLiveSourceBridge(); -public: - virtual srs_error_t initialize(SrsRequest* r) = 0; - virtual srs_error_t on_publish() = 0; - virtual srs_error_t on_frame(SrsSharedPtrMessage* frame) = 0; - virtual void on_unpublish() = 0; -}; - // The live streaming source. class SrsLiveSource : public ISrsReloadHandler { @@ -518,7 +506,7 @@ class SrsLiveSource : public ISrsReloadHandler // The event handler. ISrsLiveSourceHandler* handler; // The source bridge for other source. - ISrsLiveSourceBridge* bridge_; + ISrsStreamBridge* bridge_; // The edge control service SrsPlayEdge* play_edge; SrsPublishEdge* publish_edge; @@ -548,7 +536,7 @@ class SrsLiveSource : public ISrsReloadHandler // Initialize the hls with handlers. virtual srs_error_t initialize(SrsRequest* r, ISrsLiveSourceHandler* h); // Bridge to other source, forward packets to it. - void set_bridge(ISrsLiveSourceBridge* v); + void set_bridge(ISrsStreamBridge* v); // Interface ISrsReloadHandler public: virtual srs_error_t on_reload_vhost_play(std::string vhost); diff --git a/trunk/src/app/srs_app_srt_source.cpp b/trunk/src/app/srs_app_srt_source.cpp index 26fc78002e..237e960e47 100644 --- a/trunk/src/app/srs_app_srt_source.cpp +++ b/trunk/src/app/srs_app_srt_source.cpp @@ -235,14 +235,6 @@ void SrsSrtConsumer::wait(int nb_msgs, srs_utime_t timeout) srs_cond_timedwait(mw_wait, timeout); } -ISrsSrtSourceBridge::ISrsSrtSourceBridge() -{ -} - -ISrsSrtSourceBridge::~ISrsSrtSourceBridge() -{ -} - SrsSrtToRtmpBridge::SrsSrtToRtmpBridge(SrsLiveSource* source) { live_source_ = source; @@ -278,7 +270,7 @@ srs_error_t SrsSrtToRtmpBridge::initialize(SrsRequest* req) return srs_success; } -SrsSrtFrameBuilder::SrsSrtFrameBuilder(ISrsSrtSourceBridge* bridge) +SrsSrtFrameBuilder::SrsSrtFrameBuilder(ISrsStreamBridge* bridge) { ts_ctx_ = new SrsTsContext(); @@ -974,7 +966,7 @@ void SrsSrtSource::update_auth(SrsRequest* r) req->update_auth(r); } -void SrsSrtSource::set_bridge(ISrsSrtSourceBridge* bridge) +void SrsSrtSource::set_bridge(ISrsStreamBridge* bridge) { srs_freep(bridge_); bridge_ = bridge; diff --git a/trunk/src/app/srs_app_srt_source.hpp b/trunk/src/app/srs_app_srt_source.hpp index f940ca41c8..5888f6c4ba 100644 --- a/trunk/src/app/srs_app_srt_source.hpp +++ b/trunk/src/app/srs_app_srt_source.hpp @@ -14,7 +14,7 @@ #include #include -#include +#include class SrsSharedPtrMessage; class SrsRequest; @@ -92,19 +92,8 @@ class SrsSrtConsumer virtual void wait(int nb_msgs, srs_utime_t timeout); }; -class ISrsSrtSourceBridge -{ -public: - ISrsSrtSourceBridge(); - virtual ~ISrsSrtSourceBridge(); -public: - virtual srs_error_t on_publish() = 0; - virtual srs_error_t on_frame(SrsSharedPtrMessage* frame) = 0; - virtual void on_unpublish() = 0; -}; - // A bridge to covert SRT to RTMP stream. -class SrsSrtToRtmpBridge : public ISrsSrtSourceBridge +class SrsSrtToRtmpBridge : public ISrsStreamBridge { public: SrsSrtToRtmpBridge(SrsLiveSource* source); @@ -123,7 +112,7 @@ class SrsSrtToRtmpBridge : public ISrsSrtSourceBridge class SrsSrtFrameBuilder : public ISrsTsHandler { public: - SrsSrtFrameBuilder(ISrsSrtSourceBridge* bridge); + SrsSrtFrameBuilder(ISrsStreamBridge* bridge); virtual ~SrsSrtFrameBuilder(); public: srs_error_t initialize(SrsRequest* r); @@ -147,7 +136,7 @@ class SrsSrtFrameBuilder : public ISrsTsHandler srs_error_t on_hevc_frame(SrsTsMessage *msg, std::vector> &ipb_frames); #endif private: - ISrsSrtSourceBridge* bridge_; + ISrsStreamBridge* bridge_; private: SrsTsContext* ts_ctx_; // Record sps/pps had changed, if change, need to generate new video sh frame. @@ -190,7 +179,7 @@ class SrsSrtSource // Update the authentication information in request. virtual void update_auth(SrsRequest* r); public: - void set_bridge(ISrsSrtSourceBridge* bridge); + void set_bridge(ISrsStreamBridge* bridge); public: // Create consumer // @param consumer, output the create consumer. @@ -217,7 +206,7 @@ class SrsSrtSource bool can_publish_; private: SrsSrtFrameBuilder* frame_builder_; - ISrsSrtSourceBridge* bridge_; + ISrsStreamBridge* bridge_; }; #endif diff --git a/trunk/src/app/srs_app_stream_bridge.cpp b/trunk/src/app/srs_app_stream_bridge.cpp new file mode 100644 index 0000000000..648f6e3ae6 --- /dev/null +++ b/trunk/src/app/srs_app_stream_bridge.cpp @@ -0,0 +1,16 @@ +// +// Copyright (c) 2013-2023 The SRS Authors +// +// SPDX-License-Identifier: MIT or MulanPSL-2.0 +// + +#include + +ISrsStreamBridge::ISrsStreamBridge() +{ +} + +ISrsStreamBridge::~ISrsStreamBridge() +{ +} + diff --git a/trunk/src/app/srs_app_stream_bridge.hpp b/trunk/src/app/srs_app_stream_bridge.hpp new file mode 100644 index 0000000000..dc2cd884f6 --- /dev/null +++ b/trunk/src/app/srs_app_stream_bridge.hpp @@ -0,0 +1,31 @@ +// +// Copyright (c) 2013-2023 The SRS Authors +// +// SPDX-License-Identifier: MIT or MulanPSL-2.0 +// + +#ifndef SRS_APP_STREAM_BRIDGE_HPP +#define SRS_APP_STREAM_BRIDGE_HPP + +#include + +class SrsRequest; +class SrsSharedPtrMessage; + +// A stream bridge is used to convert stream via different protocols, such as bridge for RTMP and RTC. Generally, we use +// frame as message for bridge. A frame is a audio or video frame, such as an I/B/P frame, a general frame for decoder. +// So you must assemble RTP or TS packets to a video frame if WebRTC or SRT. +class ISrsStreamBridge +{ +public: + ISrsStreamBridge(); + virtual ~ISrsStreamBridge(); +public: + virtual srs_error_t initialize(SrsRequest* r) = 0; + virtual srs_error_t on_publish() = 0; + virtual srs_error_t on_frame(SrsSharedPtrMessage* frame) = 0; + virtual void on_unpublish() = 0; +}; + +#endif + From 687554798345d80316ae6fbbc9cdc8146b255507 Mon Sep 17 00:00:00 2001 From: winlin Date: Sat, 28 Jan 2023 23:30:37 +0800 Subject: [PATCH 08/18] Merge SRT/RTC to RTMP bridge to Frame bridge. --- trunk/src/app/srs_app_rtc_conn.cpp | 2 +- trunk/src/app/srs_app_rtc_source.cpp | 37 ----------------------- trunk/src/app/srs_app_rtc_source.hpp | 16 ---------- trunk/src/app/srs_app_srt_conn.cpp | 2 +- trunk/src/app/srs_app_srt_source.cpp | 35 ---------------------- trunk/src/app/srs_app_srt_source.hpp | 16 ---------- trunk/src/app/srs_app_stream_bridge.cpp | 39 +++++++++++++++++++++++++ trunk/src/app/srs_app_stream_bridge.hpp | 18 ++++++++++++ 8 files changed, 59 insertions(+), 106 deletions(-) diff --git a/trunk/src/app/srs_app_rtc_conn.cpp b/trunk/src/app/srs_app_rtc_conn.cpp index 8b209080f1..eb5ea2aa46 100644 --- a/trunk/src/app/srs_app_rtc_conn.cpp +++ b/trunk/src/app/srs_app_rtc_conn.cpp @@ -1197,7 +1197,7 @@ srs_error_t SrsRtcPublishStream::initialize(SrsRequest* r, SrsRtcSourceDescripti // especially for stream merging. rtmp->set_cache(false); - SrsRtcToRtmpBridge* bridge = new SrsRtcToRtmpBridge(rtmp); + SrsFrameToRtmpBridge* bridge = new SrsFrameToRtmpBridge(rtmp); if ((err = bridge->initialize(r)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "create bridge"); diff --git a/trunk/src/app/srs_app_rtc_source.cpp b/trunk/src/app/srs_app_rtc_source.cpp index ceba9aafb6..ece9aa7677 100644 --- a/trunk/src/app/srs_app_rtc_source.cpp +++ b/trunk/src/app/srs_app_rtc_source.cpp @@ -1349,43 +1349,6 @@ srs_error_t SrsRtmpToRtcBridge::consume_packets(vector& pkts) return err; } -SrsRtcToRtmpBridge::SrsRtcToRtmpBridge(SrsLiveSource *src) -{ - source_ = src; -} - -SrsRtcToRtmpBridge::~SrsRtcToRtmpBridge() -{ -} - -srs_error_t SrsRtcToRtmpBridge::initialize(SrsRequest* r) -{ - return srs_success; -} - -srs_error_t SrsRtcToRtmpBridge::on_publish() -{ - srs_error_t err = srs_success; - - // TODO: FIXME: Should sync with bridge? - if ((err = source_->on_publish()) != srs_success) { - return srs_error_wrap(err, "source publish"); - } - - return err; -} - -void SrsRtcToRtmpBridge::on_unpublish() -{ - // TODO: FIXME: Should sync with bridge? - source_->on_unpublish(); -} - -srs_error_t SrsRtcToRtmpBridge::on_frame(SrsSharedPtrMessage* frame) -{ - return source_->on_frame(frame); -} - SrsRtcFrameBuilder::SrsRtcFrameBuilder(ISrsStreamBridge* bridge) { bridge_ = bridge; diff --git a/trunk/src/app/srs_app_rtc_source.hpp b/trunk/src/app/srs_app_rtc_source.hpp index 5ff5ee803a..f61dc24f22 100644 --- a/trunk/src/app/srs_app_rtc_source.hpp +++ b/trunk/src/app/srs_app_rtc_source.hpp @@ -290,22 +290,6 @@ class SrsRtmpToRtcBridge : public ISrsStreamBridge srs_error_t consume_packets(std::vector& pkts); }; -// A bridge to covert WebRTC to RTMP stream. -class SrsRtcToRtmpBridge : public ISrsStreamBridge -{ -private: - SrsLiveSource *source_; -public: - SrsRtcToRtmpBridge(SrsLiveSource *src); - virtual ~SrsRtcToRtmpBridge(); -public: - srs_error_t initialize(SrsRequest* r); -public: - virtual srs_error_t on_publish(); - virtual void on_unpublish(); - virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); -}; - // Collect and build WebRTC RTP packets to AV frames. class SrsRtcFrameBuilder { diff --git a/trunk/src/app/srs_app_srt_conn.cpp b/trunk/src/app/srs_app_srt_conn.cpp index 4eb1bd17be..c16809801d 100644 --- a/trunk/src/app/srs_app_srt_conn.cpp +++ b/trunk/src/app/srs_app_srt_conn.cpp @@ -408,7 +408,7 @@ srs_error_t SrsMpegtsSrtConn::acquire_publish() } #endif - SrsSrtToRtmpBridge* bridge = new SrsSrtToRtmpBridge(live_source); + SrsFrameToRtmpBridge* bridge = new SrsFrameToRtmpBridge(live_source); if ((err = bridge->initialize(req_)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "create bridge"); diff --git a/trunk/src/app/srs_app_srt_source.cpp b/trunk/src/app/srs_app_srt_source.cpp index 237e960e47..18b741d1d1 100644 --- a/trunk/src/app/srs_app_srt_source.cpp +++ b/trunk/src/app/srs_app_srt_source.cpp @@ -235,41 +235,6 @@ void SrsSrtConsumer::wait(int nb_msgs, srs_utime_t timeout) srs_cond_timedwait(mw_wait, timeout); } -SrsSrtToRtmpBridge::SrsSrtToRtmpBridge(SrsLiveSource* source) -{ - live_source_ = source; -} - -SrsSrtToRtmpBridge::~SrsSrtToRtmpBridge() -{ -} - -srs_error_t SrsSrtToRtmpBridge::on_publish() -{ - srs_error_t err = srs_success; - - if ((err = live_source_->on_publish()) != srs_success) { - return srs_error_wrap(err, "on publish"); - } - - return err; -} - -srs_error_t SrsSrtToRtmpBridge::on_frame(SrsSharedPtrMessage* frame) -{ - return live_source_->on_frame(frame); -} - -void SrsSrtToRtmpBridge::on_unpublish() -{ - live_source_->on_unpublish(); -} - -srs_error_t SrsSrtToRtmpBridge::initialize(SrsRequest* req) -{ - return srs_success; -} - SrsSrtFrameBuilder::SrsSrtFrameBuilder(ISrsStreamBridge* bridge) { ts_ctx_ = new SrsTsContext(); diff --git a/trunk/src/app/srs_app_srt_source.hpp b/trunk/src/app/srs_app_srt_source.hpp index 5888f6c4ba..825a74981d 100644 --- a/trunk/src/app/srs_app_srt_source.hpp +++ b/trunk/src/app/srs_app_srt_source.hpp @@ -92,22 +92,6 @@ class SrsSrtConsumer virtual void wait(int nb_msgs, srs_utime_t timeout); }; -// A bridge to covert SRT to RTMP stream. -class SrsSrtToRtmpBridge : public ISrsStreamBridge -{ -public: - SrsSrtToRtmpBridge(SrsLiveSource* source); - virtual ~SrsSrtToRtmpBridge(); -public: - virtual srs_error_t on_publish(); - virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); - virtual void on_unpublish(); -public: - srs_error_t initialize(SrsRequest* req); -private: - SrsLiveSource* live_source_; -}; - // Collect and build SRT TS packet to AV frames. class SrsSrtFrameBuilder : public ISrsTsHandler { diff --git a/trunk/src/app/srs_app_stream_bridge.cpp b/trunk/src/app/srs_app_stream_bridge.cpp index 648f6e3ae6..f3327d016a 100644 --- a/trunk/src/app/srs_app_stream_bridge.cpp +++ b/trunk/src/app/srs_app_stream_bridge.cpp @@ -6,6 +6,8 @@ #include +#include + ISrsStreamBridge::ISrsStreamBridge() { } @@ -14,3 +16,40 @@ ISrsStreamBridge::~ISrsStreamBridge() { } +SrsFrameToRtmpBridge::SrsFrameToRtmpBridge(SrsLiveSource *src) +{ + source_ = src; +} + +SrsFrameToRtmpBridge::~SrsFrameToRtmpBridge() +{ +} + +srs_error_t SrsFrameToRtmpBridge::initialize(SrsRequest* r) +{ + return srs_success; +} + +srs_error_t SrsFrameToRtmpBridge::on_publish() +{ + srs_error_t err = srs_success; + + // TODO: FIXME: Should sync with bridge? + if ((err = source_->on_publish()) != srs_success) { + return srs_error_wrap(err, "source publish"); + } + + return err; +} + +void SrsFrameToRtmpBridge::on_unpublish() +{ + // TODO: FIXME: Should sync with bridge? + source_->on_unpublish(); +} + +srs_error_t SrsFrameToRtmpBridge::on_frame(SrsSharedPtrMessage* frame) +{ + return source_->on_frame(frame); +} + diff --git a/trunk/src/app/srs_app_stream_bridge.hpp b/trunk/src/app/srs_app_stream_bridge.hpp index dc2cd884f6..8abd098ef4 100644 --- a/trunk/src/app/srs_app_stream_bridge.hpp +++ b/trunk/src/app/srs_app_stream_bridge.hpp @@ -11,6 +11,7 @@ class SrsRequest; class SrsSharedPtrMessage; +class SrsLiveSource; // A stream bridge is used to convert stream via different protocols, such as bridge for RTMP and RTC. Generally, we use // frame as message for bridge. A frame is a audio or video frame, such as an I/B/P frame, a general frame for decoder. @@ -27,5 +28,22 @@ class ISrsStreamBridge virtual void on_unpublish() = 0; }; +// A bridge to feed AV frame to RTMP stream. +class SrsFrameToRtmpBridge : public ISrsStreamBridge +{ +private: + SrsLiveSource *source_; +public: + SrsFrameToRtmpBridge(SrsLiveSource *src); + virtual ~SrsFrameToRtmpBridge(); +public: + srs_error_t initialize(SrsRequest* r); +public: + virtual srs_error_t on_publish(); + virtual void on_unpublish(); +public: + virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); +}; + #endif From adcb51c49b73302b947206e1c7ca61bdafdd83de Mon Sep 17 00:00:00 2001 From: winlin Date: Sat, 28 Jan 2023 23:48:17 +0800 Subject: [PATCH 09/18] Move SrsRtmpToRtcBridge to stream bridge file. --- trunk/src/app/srs_app_rtc_source.cpp | 682 +----------------------- trunk/src/app/srs_app_rtc_source.hpp | 56 +- trunk/src/app/srs_app_statistic.cpp | 4 +- trunk/src/app/srs_app_stream_bridge.cpp | 680 +++++++++++++++++++++++ trunk/src/app/srs_app_stream_bridge.hpp | 59 ++ 5 files changed, 753 insertions(+), 728 deletions(-) diff --git a/trunk/src/app/srs_app_rtc_source.cpp b/trunk/src/app/srs_app_rtc_source.cpp index ece9aa7677..4efe9a5fe5 100644 --- a/trunk/src/app/srs_app_rtc_source.cpp +++ b/trunk/src/app/srs_app_rtc_source.cpp @@ -52,67 +52,8 @@ SrsPps* _srs_pps_rmnack = NULL; extern SrsPps* _srs_pps_aloss2; -// Firefox defaults as 109, Chrome is 111. -const int kAudioPayloadType = 111; -const int kAudioChannel = 2; -const int kAudioSamplerate = 48000; - -// Firefox defaults as 126, Chrome is 102. -const int kVideoPayloadType = 102; -const int kVideoSamplerate = 90000; - -// The RTP payload max size, reserved some paddings for SRTP as such: -// kRtpPacketSize = kRtpMaxPayloadSize + paddings -// For example, if kRtpPacketSize is 1500, recommend to set kRtpMaxPayloadSize to 1400, -// which reserves 100 bytes for SRTP or paddings. -// otherwise, the kRtpPacketSize must less than MTU, in webrtc source code, -// the rtp max size is assigned by kVideoMtu = 1200. -// so we set kRtpMaxPayloadSize = 1200. -// see @doc https://groups.google.com/g/discuss-webrtc/c/gH5ysR3SoZI -const int kRtpMaxPayloadSize = kRtpPacketSize - 300; - using namespace std; -// TODO: Add this function into SrsRtpMux class. -srs_error_t aac_raw_append_adts_header(SrsSharedPtrMessage* shared_audio, SrsFormat* format, char** pbuf, int* pnn_buf) -{ - srs_error_t err = srs_success; - - if (format->is_aac_sequence_header()) { - return err; - } - - // If no audio RAW frame, or not parsed for no sequence header, drop the packet. - if (format->audio->nb_samples == 0) { - srs_warn("RTC: Drop AAC %d bytes for no sample", shared_audio->size); - return err; - } - - if (format->audio->nb_samples != 1) { - return srs_error_new(ERROR_RTC_RTP_MUXER, "adts samples=%d", format->audio->nb_samples); - } - - int nb_buf = format->audio->samples[0].size + 7; - char* buf = new char[nb_buf]; - SrsBuffer stream(buf, nb_buf); - - // TODO: Add comment. - stream.write_1bytes(0xFF); - stream.write_1bytes(0xF9); - stream.write_1bytes(((format->acodec->aac_object - 1) << 6) | ((format->acodec->aac_sample_rate & 0x0F) << 2) | ((format->acodec->aac_channels & 0x04) >> 2)); - stream.write_1bytes(((format->acodec->aac_channels & 0x03) << 6) | ((nb_buf >> 11) & 0x03)); - stream.write_1bytes((nb_buf >> 3) & 0xFF); - stream.write_1bytes(((nb_buf & 0x07) << 5) | 0x1F); - stream.write_1bytes(0xFC); - - stream.write_bytes(format->audio->samples[0].bytes, format->audio->samples[0].size); - - *pbuf = buf; - *pnn_buf = nb_buf; - - return err; -} - uint64_t SrsNtp::kMagicNtpFractionalUnit = 1ULL << 32; SrsNtp::SrsNtp() @@ -732,623 +673,6 @@ srs_error_t SrsRtcSource::on_timer(srs_utime_t interval) #ifdef SRS_FFMPEG_FIT -SrsRtmpToRtcBridge::SrsRtmpToRtcBridge(SrsRtcSource* source) -{ - req = NULL; - source_ = source; - format = new SrsRtmpFormat(); - codec_ = new SrsAudioTranscoder(); - latest_codec_ = SrsAudioCodecIdForbidden; - rtmp_to_rtc = false; - keep_bframe = false; - merge_nalus = false; - meta = new SrsMetaCache(); - audio_sequence = 0; - video_sequence = 0; - - // audio track ssrc - if (true) { - std::vector descs = source->get_track_desc("audio", "opus"); - if (!descs.empty()) { - audio_ssrc = descs.at(0)->ssrc_; - } - // Note we must use the PT of source, see https://github.com/ossrs/srs/pull/3079 - audio_payload_type_ = descs.empty() ? kAudioPayloadType : descs.front()->media_->pt_; - } - - // video track ssrc - if (true) { - std::vector descs = source->get_track_desc("video", "H264"); - if (!descs.empty()) { - video_ssrc = descs.at(0)->ssrc_; - } - // Note we must use the PT of source, see https://github.com/ossrs/srs/pull/3079 - video_payload_type_ = descs.empty() ? kVideoPayloadType : descs.front()->media_->pt_; - } -} - -SrsRtmpToRtcBridge::~SrsRtmpToRtcBridge() -{ - srs_freep(format); - srs_freep(codec_); - srs_freep(meta); -} - -srs_error_t SrsRtmpToRtcBridge::initialize(SrsRequest* r) -{ - srs_error_t err = srs_success; - - req = r; - rtmp_to_rtc = _srs_config->get_rtc_from_rtmp(req->vhost); - - if (rtmp_to_rtc) { - if ((err = format->initialize()) != srs_success) { - return srs_error_wrap(err, "format initialize"); - } - - // Setup the SPS/PPS parsing strategy. - format->try_annexb_first = _srs_config->try_annexb_first(r->vhost); - } - - keep_bframe = _srs_config->get_rtc_keep_bframe(req->vhost); - merge_nalus = _srs_config->get_rtc_server_merge_nalus(); - srs_trace("RTC bridge from RTMP, rtmp2rtc=%d, keep_bframe=%d, merge_nalus=%d", - rtmp_to_rtc, keep_bframe, merge_nalus); - - return err; -} - -srs_error_t SrsRtmpToRtcBridge::on_publish() -{ - srs_error_t err = srs_success; - - if (!rtmp_to_rtc) { - return err; - } - - // TODO: FIXME: Should sync with bridge? - if ((err = source_->on_publish()) != srs_success) { - return srs_error_wrap(err, "source publish"); - } - - // Reset the metadata cache, to make VLC happy when disable/enable stream. - // @see https://github.com/ossrs/srs/issues/1630#issuecomment-597979448 - meta->clear(); - - return err; -} - -void SrsRtmpToRtcBridge::on_unpublish() -{ - if (!rtmp_to_rtc) { - return; - } - - // Reset the metadata cache, to make VLC happy when disable/enable stream. - // @see https://github.com/ossrs/srs/issues/1630#issuecomment-597979448 - meta->update_previous_vsh(); - meta->update_previous_ash(); - - // @remark This bridge might be disposed here, so never use it. - // TODO: FIXME: Should sync with bridge? - source_->on_unpublish(); -} - -srs_error_t SrsRtmpToRtcBridge::on_frame(SrsSharedPtrMessage* frame) -{ - if (frame->is_audio()) { - return on_audio(frame); - } else if (frame->is_video()) { - return on_video(frame); - } - return srs_success; -} - -srs_error_t SrsRtmpToRtcBridge::on_audio(SrsSharedPtrMessage* msg) -{ - srs_error_t err = srs_success; - - if (!rtmp_to_rtc) { - return err; - } - - // TODO: FIXME: Support parsing OPUS for RTC. - if ((err = format->on_audio(msg)) != srs_success) { - return srs_error_wrap(err, "format consume audio"); - } - - // Try to init codec when startup or codec changed. - if (format->acodec && (err = init_codec(format->acodec->id)) != srs_success) { - return srs_error_wrap(err, "init codec"); - } - - // Ignore if no format->acodec, it means the codec is not parsed, or unknown codec. - // @issue https://github.com/ossrs/srs/issues/1506#issuecomment-562079474 - if (!format->acodec) { - return err; - } - - // ts support audio codec: aac/mp3 - SrsAudioCodecId acodec = format->acodec->id; - if (acodec != SrsAudioCodecIdAAC && acodec != SrsAudioCodecIdMP3) { - return err; - } - - // ignore sequence header - srs_assert(format->audio); - - if (format->acodec->id == SrsAudioCodecIdMP3) { - return transcode(format->audio); - } - - // When drop aac audio packet, never transcode. - if (acodec != SrsAudioCodecIdAAC) { - return err; - } - - char* adts_audio = NULL; - int nn_adts_audio = 0; - // TODO: FIXME: Reserve 7 bytes header when create shared message. - if ((err = aac_raw_append_adts_header(msg, format, &adts_audio, &nn_adts_audio)) != srs_success) { - return srs_error_wrap(err, "aac append header"); - } - - if (!adts_audio) { - return err; - } - - SrsAudioFrame aac; - aac.dts = format->audio->dts; - aac.cts = format->audio->cts; - if ((err = aac.add_sample(adts_audio, nn_adts_audio)) == srs_success) { - // If OK, transcode the AAC to Opus and consume it. - err = transcode(&aac); - } - - srs_freepa(adts_audio); - - return err; -} - -srs_error_t SrsRtmpToRtcBridge::init_codec(SrsAudioCodecId codec) -{ - srs_error_t err = srs_success; - - // Ignore if not changed. - if (latest_codec_ == codec) return err; - - // Create a new codec. - srs_freep(codec_); - codec_ = new SrsAudioTranscoder(); - - // Initialize the codec according to the codec in stream. - int bitrate = 48000; // The output bitrate in bps. - if ((err = codec_->initialize(codec, SrsAudioCodecIdOpus, kAudioChannel, kAudioSamplerate, bitrate)) != srs_success) { - return srs_error_wrap(err, "init codec=%d", codec); - } - - // Update the latest codec in stream. - if (latest_codec_ == SrsAudioCodecIdForbidden) { - srs_trace("RTMP2RTC: Init audio codec to %d(%s)", codec, srs_audio_codec_id2str(codec).c_str()); - } else { - srs_trace("RTMP2RTC: Switch audio codec %d(%s) to %d(%s)", latest_codec_, srs_audio_codec_id2str(latest_codec_).c_str(), - codec, srs_audio_codec_id2str(codec).c_str()); - } - latest_codec_ = codec; - - return err; -} - -srs_error_t SrsRtmpToRtcBridge::transcode(SrsAudioFrame* audio) -{ - srs_error_t err = srs_success; - - std::vector out_audios; - if ((err = codec_->transcode(audio, out_audios)) != srs_success) { - return srs_error_wrap(err, "recode error"); - } - - // Save OPUS packets in shared message. - if (out_audios.empty()) { - return err; - } - - for (std::vector::iterator it = out_audios.begin(); it != out_audios.end(); ++it) { - SrsAudioFrame* out_audio = *it; - - SrsRtpPacket* pkt = new SrsRtpPacket(); - SrsAutoFree(SrsRtpPacket, pkt); - - if ((err = package_opus(out_audio, pkt)) != srs_success) { - err = srs_error_wrap(err, "package opus"); - break; - } - - if ((err = source_->on_rtp(pkt)) != srs_success) { - err = srs_error_wrap(err, "consume opus"); - break; - } - } - - codec_->free_frames(out_audios); - - return err; -} - -srs_error_t SrsRtmpToRtcBridge::package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt) -{ - srs_error_t err = srs_success; - - pkt->header.set_payload_type(audio_payload_type_); - pkt->header.set_ssrc(audio_ssrc); - pkt->frame_type = SrsFrameTypeAudio; - pkt->header.set_marker(true); - pkt->header.set_sequence(audio_sequence++); - pkt->header.set_timestamp(audio->dts * 48); - - SrsRtpRawPayload* raw = new SrsRtpRawPayload(); - pkt->set_payload(raw, SrsRtspPacketPayloadTypeRaw); - - srs_assert(audio->nb_samples == 1); - raw->payload = pkt->wrap(audio->samples[0].bytes, audio->samples[0].size); - raw->nn_payload = audio->samples[0].size; - - return err; -} - -srs_error_t SrsRtmpToRtcBridge::on_video(SrsSharedPtrMessage* msg) -{ - srs_error_t err = srs_success; - - if (!rtmp_to_rtc) { - return err; - } - - // cache the sequence header if h264 - bool is_sequence_header = SrsFlvVideo::sh(msg->payload, msg->size); - if (is_sequence_header && (err = meta->update_vsh(msg)) != srs_success) { - return srs_error_wrap(err, "meta update video"); - } - - if ((err = format->on_video(msg)) != srs_success) { - return srs_error_wrap(err, "format consume video"); - } - - // Ignore if no format->vcodec, it means the codec is not parsed, or unsupport/unknown codec - // such as H.263 codec - if (!format->vcodec) { - return err; - } - - bool has_idr = false; - vector samples; - if ((err = filter(msg, format, has_idr, samples)) != srs_success) { - return srs_error_wrap(err, "filter video"); - } - int nn_samples = (int)samples.size(); - - // Well, for each IDR, we append a SPS/PPS before it, which is packaged in STAP-A. - if (has_idr) { - SrsRtpPacket* pkt = new SrsRtpPacket(); - SrsAutoFree(SrsRtpPacket, pkt); - - if ((err = package_stap_a(source_, msg, pkt)) != srs_success) { - return srs_error_wrap(err, "package stap-a"); - } - - if ((err = source_->on_rtp(pkt)) != srs_success) { - return srs_error_wrap(err, "consume sps/pps"); - } - } - - // If merge Nalus, we pcakges all NALUs(samples) as one NALU, in a RTP or FUA packet. - vector pkts; - if (merge_nalus && nn_samples > 1) { - if ((err = package_nalus(msg, samples, pkts)) != srs_success) { - return srs_error_wrap(err, "package nalus as one"); - } - } else { - // By default, we package each NALU(sample) to a RTP or FUA packet. - for (int i = 0; i < nn_samples; i++) { - SrsSample* sample = samples[i]; - - // We always ignore bframe here, if config to discard bframe, - // the bframe flag will not be set. - if (sample->bframe) { - continue; - } - - if (sample->size <= kRtpMaxPayloadSize) { - if ((err = package_single_nalu(msg, sample, pkts)) != srs_success) { - return srs_error_wrap(err, "package single nalu"); - } - } else { - if ((err = package_fu_a(msg, sample, kRtpMaxPayloadSize, pkts)) != srs_success) { - return srs_error_wrap(err, "package fu-a"); - } - } - } - } - - if (!pkts.empty()) { - pkts.back()->header.set_marker(true); - } - - return consume_packets(pkts); -} - -srs_error_t SrsRtmpToRtcBridge::filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, vector& samples) -{ - srs_error_t err = srs_success; - - // If IDR, we will insert SPS/PPS before IDR frame. - if (format->video && format->video->has_idr) { - has_idr = true; - } - - // Update samples to shared frame. - for (int i = 0; i < format->video->nb_samples; ++i) { - SrsSample* sample = &format->video->samples[i]; - - // Because RTC does not support B-frame, so we will drop them. - // TODO: Drop B-frame in better way, which not cause picture corruption. - if (!keep_bframe) { - if ((err = sample->parse_bframe()) != srs_success) { - return srs_error_wrap(err, "parse bframe"); - } - if (sample->bframe) { - continue; - } - } - - samples.push_back(sample); - } - - return err; -} - -srs_error_t SrsRtmpToRtcBridge::package_stap_a(SrsRtcSource* source, SrsSharedPtrMessage* msg, SrsRtpPacket* pkt) -{ - srs_error_t err = srs_success; - - SrsFormat* format = meta->vsh_format(); - if (!format || !format->vcodec) { - return err; - } - - // Note that the sps/pps may change, so we should copy it. - const vector& sps = format->vcodec->sequenceParameterSetNALUnit; - const vector& pps = format->vcodec->pictureParameterSetNALUnit; - if (sps.empty() || pps.empty()) { - return srs_error_new(ERROR_RTC_RTP_MUXER, "sps/pps empty"); - } - - pkt->header.set_payload_type(video_payload_type_); - pkt->header.set_ssrc(video_ssrc); - pkt->frame_type = SrsFrameTypeVideo; - pkt->nalu_type = (SrsAvcNaluType)kStapA; - pkt->header.set_marker(false); - pkt->header.set_sequence(video_sequence++); - pkt->header.set_timestamp(msg->timestamp * 90); - - SrsRtpSTAPPayload* stap = new SrsRtpSTAPPayload(); - pkt->set_payload(stap, SrsRtspPacketPayloadTypeSTAP); - - uint8_t header = sps[0]; - stap->nri = (SrsAvcNaluType)header; - - // Copy the SPS/PPS bytes, because it may change. - int size = (int)(sps.size() + pps.size()); - char* payload = pkt->wrap(size); - - if (true) { - SrsSample* sample = new SrsSample(); - sample->bytes = payload; - sample->size = (int)sps.size(); - stap->nalus.push_back(sample); - - memcpy(payload, (char*)&sps[0], sps.size()); - payload += (int)sps.size(); - } - - if (true) { - SrsSample* sample = new SrsSample(); - sample->bytes = payload; - sample->size = (int)pps.size(); - stap->nalus.push_back(sample); - - memcpy(payload, (char*)&pps[0], pps.size()); - payload += (int)pps.size(); - } - - srs_info("RTC STAP-A seq=%u, sps %d, pps %d bytes", pkt->header.get_sequence(), sps.size(), pps.size()); - - return err; -} - -srs_error_t SrsRtmpToRtcBridge::package_nalus(SrsSharedPtrMessage* msg, const vector& samples, vector& pkts) -{ - srs_error_t err = srs_success; - - SrsRtpRawNALUs* raw = new SrsRtpRawNALUs(); - SrsAvcNaluType first_nalu_type = SrsAvcNaluTypeReserved; - - for (int i = 0; i < (int)samples.size(); i++) { - SrsSample* sample = samples[i]; - - // We always ignore bframe here, if config to discard bframe, - // the bframe flag will not be set. - if (sample->bframe) { - continue; - } - - if (!sample->size) { - continue; - } - - if (first_nalu_type == SrsAvcNaluTypeReserved) { - first_nalu_type = SrsAvcNaluType((uint8_t)(sample->bytes[0] & kNalTypeMask)); - } - - raw->push_back(sample->copy()); - } - - // Ignore empty. - int nn_bytes = raw->nb_bytes(); - if (nn_bytes <= 0) { - srs_freep(raw); - return err; - } - - if (nn_bytes < kRtpMaxPayloadSize) { - // Package NALUs in a single RTP packet. - SrsRtpPacket* pkt = new SrsRtpPacket(); - pkts.push_back(pkt); - - pkt->header.set_payload_type(video_payload_type_); - pkt->header.set_ssrc(video_ssrc); - pkt->frame_type = SrsFrameTypeVideo; - pkt->nalu_type = (SrsAvcNaluType)first_nalu_type; - pkt->header.set_sequence(video_sequence++); - pkt->header.set_timestamp(msg->timestamp * 90); - pkt->set_payload(raw, SrsRtspPacketPayloadTypeNALU); - pkt->wrap(msg); - } else { - // We must free it, should never use RTP packets to free it, - // because more than one RTP packet will refer to it. - SrsAutoFree(SrsRtpRawNALUs, raw); - - // Package NALUs in FU-A RTP packets. - int fu_payload_size = kRtpMaxPayloadSize; - - // The first byte is store in FU-A header. - uint8_t header = raw->skip_first_byte(); - uint8_t nal_type = header & kNalTypeMask; - int nb_left = nn_bytes - 1; - - int num_of_packet = 1 + (nn_bytes - 1) / fu_payload_size; - for (int i = 0; i < num_of_packet; ++i) { - int packet_size = srs_min(nb_left, fu_payload_size); - - SrsRtpFUAPayload* fua = new SrsRtpFUAPayload(); - if ((err = raw->read_samples(fua->nalus, packet_size)) != srs_success) { - srs_freep(fua); - return srs_error_wrap(err, "read samples %d bytes, left %d, total %d", packet_size, nb_left, nn_bytes); - } - - SrsRtpPacket* pkt = new SrsRtpPacket(); - pkts.push_back(pkt); - - pkt->header.set_payload_type(video_payload_type_); - pkt->header.set_ssrc(video_ssrc); - pkt->frame_type = SrsFrameTypeVideo; - pkt->nalu_type = (SrsAvcNaluType)kFuA; - pkt->header.set_sequence(video_sequence++); - pkt->header.set_timestamp(msg->timestamp * 90); - - fua->nri = (SrsAvcNaluType)header; - fua->nalu_type = (SrsAvcNaluType)nal_type; - fua->start = bool(i == 0); - fua->end = bool(i == num_of_packet - 1); - - pkt->set_payload(fua, SrsRtspPacketPayloadTypeFUA); - pkt->wrap(msg); - - nb_left -= packet_size; - } - } - - return err; -} - -// Single NAL Unit Packet @see https://tools.ietf.org/html/rfc6184#section-5.6 -srs_error_t SrsRtmpToRtcBridge::package_single_nalu(SrsSharedPtrMessage* msg, SrsSample* sample, vector& pkts) -{ - srs_error_t err = srs_success; - - SrsRtpPacket* pkt = new SrsRtpPacket(); - pkts.push_back(pkt); - - pkt->header.set_payload_type(video_payload_type_); - pkt->header.set_ssrc(video_ssrc); - pkt->frame_type = SrsFrameTypeVideo; - pkt->header.set_sequence(video_sequence++); - pkt->header.set_timestamp(msg->timestamp * 90); - - SrsRtpRawPayload* raw = new SrsRtpRawPayload(); - pkt->set_payload(raw, SrsRtspPacketPayloadTypeRaw); - - raw->payload = sample->bytes; - raw->nn_payload = sample->size; - - pkt->wrap(msg); - - return err; -} - -srs_error_t SrsRtmpToRtcBridge::package_fu_a(SrsSharedPtrMessage* msg, SrsSample* sample, int fu_payload_size, vector& pkts) -{ - srs_error_t err = srs_success; - - char* p = sample->bytes + 1; - int nb_left = sample->size - 1; - uint8_t header = sample->bytes[0]; - uint8_t nal_type = header & kNalTypeMask; - - int num_of_packet = 1 + (nb_left - 1) / fu_payload_size; - for (int i = 0; i < num_of_packet; ++i) { - int packet_size = srs_min(nb_left, fu_payload_size); - - SrsRtpPacket* pkt = new SrsRtpPacket(); - pkts.push_back(pkt); - - pkt->header.set_payload_type(video_payload_type_); - pkt->header.set_ssrc(video_ssrc); - pkt->frame_type = SrsFrameTypeVideo; - pkt->header.set_sequence(video_sequence++); - pkt->header.set_timestamp(msg->timestamp * 90); - - SrsRtpFUAPayload2* fua = new SrsRtpFUAPayload2(); - pkt->set_payload(fua, SrsRtspPacketPayloadTypeFUA2); - - fua->nri = (SrsAvcNaluType)header; - fua->nalu_type = (SrsAvcNaluType)nal_type; - fua->start = bool(i == 0); - fua->end = bool(i == num_of_packet - 1); - - fua->payload = p; - fua->size = packet_size; - - pkt->wrap(msg); - - p += packet_size; - nb_left -= packet_size; - } - - return err; -} - -srs_error_t SrsRtmpToRtcBridge::consume_packets(vector& pkts) -{ - srs_error_t err = srs_success; - - // TODO: FIXME: Consume a range of packets. - for (int i = 0; i < (int)pkts.size(); i++) { - SrsRtpPacket* pkt = pkts[i]; - if ((err = source_->on_rtp(pkt)) != srs_success) { - err = srs_error_wrap(err, "consume sps/pps"); - break; - } - } - - for (int i = 0; i < (int)pkts.size(); i++) { - SrsRtpPacket* pkt = pkts[i]; - srs_freep(pkt); - } - - return err; -} - SrsRtcFrameBuilder::SrsRtcFrameBuilder(ISrsStreamBridge* bridge) { bridge_ = bridge; @@ -1444,8 +768,8 @@ srs_error_t SrsRtcFrameBuilder::transcode_audio(SrsRtpPacket *pkt) is_first_audio_ = false; } - std::vector out_pkts; - SrsRtpRawPayload *payload = dynamic_cast(pkt->payload()); + std::vector out_pkts; + SrsRtpRawPayload *payload = dynamic_cast(pkt->payload()); SrsAudioFrame frame; frame.add_sample(payload->payload, payload->nn_payload); @@ -1457,7 +781,7 @@ srs_error_t SrsRtcFrameBuilder::transcode_audio(SrsRtpPacket *pkt) return err; } - for (std::vector::iterator it = out_pkts.begin(); it != out_pkts.end(); ++it) { + for (std::vector::iterator it = out_pkts.begin(); it != out_pkts.end(); ++it) { SrsCommonMessage out_rtmp; out_rtmp.header.timestamp = (*it)->dts; packet_aac(&out_rtmp, (*it)->samples[0].bytes, (*it)->samples[0].size, ts, is_first_audio_); diff --git a/trunk/src/app/srs_app_rtc_source.hpp b/trunk/src/app/srs_app_rtc_source.hpp index f61dc24f22..a593f4124f 100644 --- a/trunk/src/app/srs_app_rtc_source.hpp +++ b/trunk/src/app/srs_app_rtc_source.hpp @@ -43,6 +43,15 @@ class SrsErrorPithyPrint; class SrsRtcFrameBuilder; class SrsLiveSource; +// Firefox defaults as 109, Chrome is 111. +const int kAudioPayloadType = 111; +const int kAudioChannel = 2; +const int kAudioSamplerate = 48000; + +// Firefox defaults as 126, Chrome is 102. +const int kVideoPayloadType = 102; +const int kVideoSamplerate = 90000; + class SrsNtp { public: @@ -243,53 +252,6 @@ class SrsRtcSource : public ISrsFastTimer #ifdef SRS_FFMPEG_FIT -// A bridge to covert RTMP to WebRTC stream. -class SrsRtmpToRtcBridge : public ISrsStreamBridge -{ -private: - SrsRequest* req; - SrsRtcSource* source_; - // The format, codec information. - SrsRtmpFormat* format; - // The metadata cache. - SrsMetaCache* meta; -private: - bool rtmp_to_rtc; - SrsAudioCodecId latest_codec_; - SrsAudioTranscoder* codec_; - bool keep_bframe; - bool merge_nalus; - uint16_t audio_sequence; - uint16_t video_sequence; - uint32_t audio_ssrc; - uint32_t video_ssrc; - uint8_t audio_payload_type_; - uint8_t video_payload_type_; -public: - SrsRtmpToRtcBridge(SrsRtcSource* source); - virtual ~SrsRtmpToRtcBridge(); -public: - virtual srs_error_t initialize(SrsRequest* r); - virtual srs_error_t on_publish(); - virtual void on_unpublish(); - virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); -private: - virtual srs_error_t on_audio(SrsSharedPtrMessage* msg); -private: - srs_error_t init_codec(SrsAudioCodecId codec); - srs_error_t transcode(SrsAudioFrame* audio); - srs_error_t package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt); -private: - virtual srs_error_t on_video(SrsSharedPtrMessage* msg); -private: - srs_error_t filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, std::vector& samples); - srs_error_t package_stap_a(SrsRtcSource* source, SrsSharedPtrMessage* msg, SrsRtpPacket* pkt); - srs_error_t package_nalus(SrsSharedPtrMessage* msg, const std::vector& samples, std::vector& pkts); - srs_error_t package_single_nalu(SrsSharedPtrMessage* msg, SrsSample* sample, std::vector& pkts); - srs_error_t package_fu_a(SrsSharedPtrMessage* msg, SrsSample* sample, int fu_payload_size, std::vector& pkts); - srs_error_t consume_packets(std::vector& pkts); -}; - // Collect and build WebRTC RTP packets to AV frames. class SrsRtcFrameBuilder { diff --git a/trunk/src/app/srs_app_statistic.cpp b/trunk/src/app/srs_app_statistic.cpp index 9942c3490a..d5dba71584 100644 --- a/trunk/src/app/srs_app_statistic.cpp +++ b/trunk/src/app/srs_app_statistic.cpp @@ -490,14 +490,14 @@ void SrsStatistic::cleanup_stream(SrsStatisticStream* stream) // Do cleanup streams. if (true) { - std::map::iterator it; + std::map::iterator it; if ((it = streams.find(stream->id)) != streams.end()) { streams.erase(it); } } if (true) { - std::map::iterator it; + std::map::iterator it; if ((it = rstreams.find(stream->url)) != rstreams.end()) { rstreams.erase(it); } diff --git a/trunk/src/app/srs_app_stream_bridge.cpp b/trunk/src/app/srs_app_stream_bridge.cpp index f3327d016a..5516283595 100644 --- a/trunk/src/app/srs_app_stream_bridge.cpp +++ b/trunk/src/app/srs_app_stream_bridge.cpp @@ -7,6 +7,66 @@ #include #include +#include +#include +#include +#include +#include +#include +#include + +#include +using namespace std; + +// The RTP payload max size, reserved some paddings for SRTP as such: +// kRtpPacketSize = kRtpMaxPayloadSize + paddings +// For example, if kRtpPacketSize is 1500, recommend to set kRtpMaxPayloadSize to 1400, +// which reserves 100 bytes for SRTP or paddings. +// otherwise, the kRtpPacketSize must less than MTU, in webrtc source code, +// the rtp max size is assigned by kVideoMtu = 1200. +// so we set kRtpMaxPayloadSize = 1200. +// see @doc https://groups.google.com/g/discuss-webrtc/c/gH5ysR3SoZI +const int kRtpMaxPayloadSize = kRtpPacketSize - 300; + +// TODO: Add this function into SrsRtpMux class. +srs_error_t aac_raw_append_adts_header(SrsSharedPtrMessage* shared_audio, SrsFormat* format, char** pbuf, int* pnn_buf) +{ + srs_error_t err = srs_success; + + if (format->is_aac_sequence_header()) { + return err; + } + + // If no audio RAW frame, or not parsed for no sequence header, drop the packet. + if (format->audio->nb_samples == 0) { + srs_warn("RTC: Drop AAC %d bytes for no sample", shared_audio->size); + return err; + } + + if (format->audio->nb_samples != 1) { + return srs_error_new(ERROR_RTC_RTP_MUXER, "adts samples=%d", format->audio->nb_samples); + } + + int nb_buf = format->audio->samples[0].size + 7; + char* buf = new char[nb_buf]; + SrsBuffer stream(buf, nb_buf); + + // TODO: Add comment. + stream.write_1bytes(0xFF); + stream.write_1bytes(0xF9); + stream.write_1bytes(((format->acodec->aac_object - 1) << 6) | ((format->acodec->aac_sample_rate & 0x0F) << 2) | ((format->acodec->aac_channels & 0x04) >> 2)); + stream.write_1bytes(((format->acodec->aac_channels & 0x03) << 6) | ((nb_buf >> 11) & 0x03)); + stream.write_1bytes((nb_buf >> 3) & 0xFF); + stream.write_1bytes(((nb_buf & 0x07) << 5) | 0x1F); + stream.write_1bytes(0xFC); + + stream.write_bytes(format->audio->samples[0].bytes, format->audio->samples[0].size); + + *pbuf = buf; + *pnn_buf = nb_buf; + + return err; +} ISrsStreamBridge::ISrsStreamBridge() { @@ -53,3 +113,623 @@ srs_error_t SrsFrameToRtmpBridge::on_frame(SrsSharedPtrMessage* frame) return source_->on_frame(frame); } +#ifdef SRS_FFMPEG_FIT + +SrsRtmpToRtcBridge::SrsRtmpToRtcBridge(SrsRtcSource* source) +{ + req = NULL; + source_ = source; + format = new SrsRtmpFormat(); + codec_ = new SrsAudioTranscoder(); + latest_codec_ = SrsAudioCodecIdForbidden; + rtmp_to_rtc = false; + keep_bframe = false; + merge_nalus = false; + meta = new SrsMetaCache(); + audio_sequence = 0; + video_sequence = 0; + + // audio track ssrc + if (true) { + std::vector descs = source->get_track_desc("audio", "opus"); + if (!descs.empty()) { + audio_ssrc = descs.at(0)->ssrc_; + } + // Note we must use the PT of source, see https://github.com/ossrs/srs/pull/3079 + audio_payload_type_ = descs.empty() ? kAudioPayloadType : descs.front()->media_->pt_; + } + + // video track ssrc + if (true) { + std::vector descs = source->get_track_desc("video", "H264"); + if (!descs.empty()) { + video_ssrc = descs.at(0)->ssrc_; + } + // Note we must use the PT of source, see https://github.com/ossrs/srs/pull/3079 + video_payload_type_ = descs.empty() ? kVideoPayloadType : descs.front()->media_->pt_; + } +} + +SrsRtmpToRtcBridge::~SrsRtmpToRtcBridge() +{ + srs_freep(format); + srs_freep(codec_); + srs_freep(meta); +} + +srs_error_t SrsRtmpToRtcBridge::initialize(SrsRequest* r) +{ + srs_error_t err = srs_success; + + req = r; + rtmp_to_rtc = _srs_config->get_rtc_from_rtmp(req->vhost); + + if (rtmp_to_rtc) { + if ((err = format->initialize()) != srs_success) { + return srs_error_wrap(err, "format initialize"); + } + + // Setup the SPS/PPS parsing strategy. + format->try_annexb_first = _srs_config->try_annexb_first(r->vhost); + } + + keep_bframe = _srs_config->get_rtc_keep_bframe(req->vhost); + merge_nalus = _srs_config->get_rtc_server_merge_nalus(); + srs_trace("RTC bridge from RTMP, rtmp2rtc=%d, keep_bframe=%d, merge_nalus=%d", + rtmp_to_rtc, keep_bframe, merge_nalus); + + return err; +} + +srs_error_t SrsRtmpToRtcBridge::on_publish() +{ + srs_error_t err = srs_success; + + if (!rtmp_to_rtc) { + return err; + } + + // TODO: FIXME: Should sync with bridge? + if ((err = source_->on_publish()) != srs_success) { + return srs_error_wrap(err, "source publish"); + } + + // Reset the metadata cache, to make VLC happy when disable/enable stream. + // @see https://github.com/ossrs/srs/issues/1630#issuecomment-597979448 + meta->clear(); + + return err; +} + +void SrsRtmpToRtcBridge::on_unpublish() +{ + if (!rtmp_to_rtc) { + return; + } + + // Reset the metadata cache, to make VLC happy when disable/enable stream. + // @see https://github.com/ossrs/srs/issues/1630#issuecomment-597979448 + meta->update_previous_vsh(); + meta->update_previous_ash(); + + // @remark This bridge might be disposed here, so never use it. + // TODO: FIXME: Should sync with bridge? + source_->on_unpublish(); +} + +srs_error_t SrsRtmpToRtcBridge::on_frame(SrsSharedPtrMessage* frame) +{ + if (frame->is_audio()) { + return on_audio(frame); + } else if (frame->is_video()) { + return on_video(frame); + } + return srs_success; +} + +srs_error_t SrsRtmpToRtcBridge::on_audio(SrsSharedPtrMessage* msg) +{ + srs_error_t err = srs_success; + + if (!rtmp_to_rtc) { + return err; + } + + // TODO: FIXME: Support parsing OPUS for RTC. + if ((err = format->on_audio(msg)) != srs_success) { + return srs_error_wrap(err, "format consume audio"); + } + + // Try to init codec when startup or codec changed. + if (format->acodec && (err = init_codec(format->acodec->id)) != srs_success) { + return srs_error_wrap(err, "init codec"); + } + + // Ignore if no format->acodec, it means the codec is not parsed, or unknown codec. + // @issue https://github.com/ossrs/srs/issues/1506#issuecomment-562079474 + if (!format->acodec) { + return err; + } + + // ts support audio codec: aac/mp3 + SrsAudioCodecId acodec = format->acodec->id; + if (acodec != SrsAudioCodecIdAAC && acodec != SrsAudioCodecIdMP3) { + return err; + } + + // ignore sequence header + srs_assert(format->audio); + + if (format->acodec->id == SrsAudioCodecIdMP3) { + return transcode(format->audio); + } + + // When drop aac audio packet, never transcode. + if (acodec != SrsAudioCodecIdAAC) { + return err; + } + + char* adts_audio = NULL; + int nn_adts_audio = 0; + // TODO: FIXME: Reserve 7 bytes header when create shared message. + if ((err = aac_raw_append_adts_header(msg, format, &adts_audio, &nn_adts_audio)) != srs_success) { + return srs_error_wrap(err, "aac append header"); + } + + if (!adts_audio) { + return err; + } + + SrsAudioFrame aac; + aac.dts = format->audio->dts; + aac.cts = format->audio->cts; + if ((err = aac.add_sample(adts_audio, nn_adts_audio)) == srs_success) { + // If OK, transcode the AAC to Opus and consume it. + err = transcode(&aac); + } + + srs_freepa(adts_audio); + + return err; +} + +srs_error_t SrsRtmpToRtcBridge::init_codec(SrsAudioCodecId codec) +{ + srs_error_t err = srs_success; + + // Ignore if not changed. + if (latest_codec_ == codec) return err; + + // Create a new codec. + srs_freep(codec_); + codec_ = new SrsAudioTranscoder(); + + // Initialize the codec according to the codec in stream. + int bitrate = 48000; // The output bitrate in bps. + if ((err = codec_->initialize(codec, SrsAudioCodecIdOpus, kAudioChannel, kAudioSamplerate, bitrate)) != srs_success) { + return srs_error_wrap(err, "init codec=%d", codec); + } + + // Update the latest codec in stream. + if (latest_codec_ == SrsAudioCodecIdForbidden) { + srs_trace("RTMP2RTC: Init audio codec to %d(%s)", codec, srs_audio_codec_id2str(codec).c_str()); + } else { + srs_trace("RTMP2RTC: Switch audio codec %d(%s) to %d(%s)", latest_codec_, srs_audio_codec_id2str(latest_codec_).c_str(), + codec, srs_audio_codec_id2str(codec).c_str()); + } + latest_codec_ = codec; + + return err; +} + +srs_error_t SrsRtmpToRtcBridge::transcode(SrsAudioFrame* audio) +{ + srs_error_t err = srs_success; + + std::vector out_audios; + if ((err = codec_->transcode(audio, out_audios)) != srs_success) { + return srs_error_wrap(err, "recode error"); + } + + // Save OPUS packets in shared message. + if (out_audios.empty()) { + return err; + } + + for (std::vector::iterator it = out_audios.begin(); it != out_audios.end(); ++it) { + SrsAudioFrame* out_audio = *it; + + SrsRtpPacket* pkt = new SrsRtpPacket(); + SrsAutoFree(SrsRtpPacket, pkt); + + if ((err = package_opus(out_audio, pkt)) != srs_success) { + err = srs_error_wrap(err, "package opus"); + break; + } + + if ((err = source_->on_rtp(pkt)) != srs_success) { + err = srs_error_wrap(err, "consume opus"); + break; + } + } + + codec_->free_frames(out_audios); + + return err; +} + +srs_error_t SrsRtmpToRtcBridge::package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt) +{ + srs_error_t err = srs_success; + + pkt->header.set_payload_type(audio_payload_type_); + pkt->header.set_ssrc(audio_ssrc); + pkt->frame_type = SrsFrameTypeAudio; + pkt->header.set_marker(true); + pkt->header.set_sequence(audio_sequence++); + pkt->header.set_timestamp(audio->dts * 48); + + SrsRtpRawPayload* raw = new SrsRtpRawPayload(); + pkt->set_payload(raw, SrsRtspPacketPayloadTypeRaw); + + srs_assert(audio->nb_samples == 1); + raw->payload = pkt->wrap(audio->samples[0].bytes, audio->samples[0].size); + raw->nn_payload = audio->samples[0].size; + + return err; +} + +srs_error_t SrsRtmpToRtcBridge::on_video(SrsSharedPtrMessage* msg) +{ + srs_error_t err = srs_success; + + if (!rtmp_to_rtc) { + return err; + } + + // cache the sequence header if h264 + bool is_sequence_header = SrsFlvVideo::sh(msg->payload, msg->size); + if (is_sequence_header && (err = meta->update_vsh(msg)) != srs_success) { + return srs_error_wrap(err, "meta update video"); + } + + if ((err = format->on_video(msg)) != srs_success) { + return srs_error_wrap(err, "format consume video"); + } + + // Ignore if no format->vcodec, it means the codec is not parsed, or unsupport/unknown codec + // such as H.263 codec + if (!format->vcodec) { + return err; + } + + bool has_idr = false; + vector samples; + if ((err = filter(msg, format, has_idr, samples)) != srs_success) { + return srs_error_wrap(err, "filter video"); + } + int nn_samples = (int)samples.size(); + + // Well, for each IDR, we append a SPS/PPS before it, which is packaged in STAP-A. + if (has_idr) { + SrsRtpPacket* pkt = new SrsRtpPacket(); + SrsAutoFree(SrsRtpPacket, pkt); + + if ((err = package_stap_a(source_, msg, pkt)) != srs_success) { + return srs_error_wrap(err, "package stap-a"); + } + + if ((err = source_->on_rtp(pkt)) != srs_success) { + return srs_error_wrap(err, "consume sps/pps"); + } + } + + // If merge Nalus, we pcakges all NALUs(samples) as one NALU, in a RTP or FUA packet. + vector pkts; + if (merge_nalus && nn_samples > 1) { + if ((err = package_nalus(msg, samples, pkts)) != srs_success) { + return srs_error_wrap(err, "package nalus as one"); + } + } else { + // By default, we package each NALU(sample) to a RTP or FUA packet. + for (int i = 0; i < nn_samples; i++) { + SrsSample* sample = samples[i]; + + // We always ignore bframe here, if config to discard bframe, + // the bframe flag will not be set. + if (sample->bframe) { + continue; + } + + if (sample->size <= kRtpMaxPayloadSize) { + if ((err = package_single_nalu(msg, sample, pkts)) != srs_success) { + return srs_error_wrap(err, "package single nalu"); + } + } else { + if ((err = package_fu_a(msg, sample, kRtpMaxPayloadSize, pkts)) != srs_success) { + return srs_error_wrap(err, "package fu-a"); + } + } + } + } + + if (!pkts.empty()) { + pkts.back()->header.set_marker(true); + } + + return consume_packets(pkts); +} + +srs_error_t SrsRtmpToRtcBridge::filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, vector& samples) +{ + srs_error_t err = srs_success; + + // If IDR, we will insert SPS/PPS before IDR frame. + if (format->video && format->video->has_idr) { + has_idr = true; + } + + // Update samples to shared frame. + for (int i = 0; i < format->video->nb_samples; ++i) { + SrsSample* sample = &format->video->samples[i]; + + // Because RTC does not support B-frame, so we will drop them. + // TODO: Drop B-frame in better way, which not cause picture corruption. + if (!keep_bframe) { + if ((err = sample->parse_bframe()) != srs_success) { + return srs_error_wrap(err, "parse bframe"); + } + if (sample->bframe) { + continue; + } + } + + samples.push_back(sample); + } + + return err; +} + +srs_error_t SrsRtmpToRtcBridge::package_stap_a(SrsRtcSource* source, SrsSharedPtrMessage* msg, SrsRtpPacket* pkt) +{ + srs_error_t err = srs_success; + + SrsFormat* format = meta->vsh_format(); + if (!format || !format->vcodec) { + return err; + } + + // Note that the sps/pps may change, so we should copy it. + const vector& sps = format->vcodec->sequenceParameterSetNALUnit; + const vector& pps = format->vcodec->pictureParameterSetNALUnit; + if (sps.empty() || pps.empty()) { + return srs_error_new(ERROR_RTC_RTP_MUXER, "sps/pps empty"); + } + + pkt->header.set_payload_type(video_payload_type_); + pkt->header.set_ssrc(video_ssrc); + pkt->frame_type = SrsFrameTypeVideo; + pkt->nalu_type = (SrsAvcNaluType)kStapA; + pkt->header.set_marker(false); + pkt->header.set_sequence(video_sequence++); + pkt->header.set_timestamp(msg->timestamp * 90); + + SrsRtpSTAPPayload* stap = new SrsRtpSTAPPayload(); + pkt->set_payload(stap, SrsRtspPacketPayloadTypeSTAP); + + uint8_t header = sps[0]; + stap->nri = (SrsAvcNaluType)header; + + // Copy the SPS/PPS bytes, because it may change. + int size = (int)(sps.size() + pps.size()); + char* payload = pkt->wrap(size); + + if (true) { + SrsSample* sample = new SrsSample(); + sample->bytes = payload; + sample->size = (int)sps.size(); + stap->nalus.push_back(sample); + + memcpy(payload, (char*)&sps[0], sps.size()); + payload += (int)sps.size(); + } + + if (true) { + SrsSample* sample = new SrsSample(); + sample->bytes = payload; + sample->size = (int)pps.size(); + stap->nalus.push_back(sample); + + memcpy(payload, (char*)&pps[0], pps.size()); + payload += (int)pps.size(); + } + + srs_info("RTC STAP-A seq=%u, sps %d, pps %d bytes", pkt->header.get_sequence(), sps.size(), pps.size()); + + return err; +} + +srs_error_t SrsRtmpToRtcBridge::package_nalus(SrsSharedPtrMessage* msg, const vector& samples, vector& pkts) +{ + srs_error_t err = srs_success; + + SrsRtpRawNALUs* raw = new SrsRtpRawNALUs(); + SrsAvcNaluType first_nalu_type = SrsAvcNaluTypeReserved; + + for (int i = 0; i < (int)samples.size(); i++) { + SrsSample* sample = samples[i]; + + // We always ignore bframe here, if config to discard bframe, + // the bframe flag will not be set. + if (sample->bframe) { + continue; + } + + if (!sample->size) { + continue; + } + + if (first_nalu_type == SrsAvcNaluTypeReserved) { + first_nalu_type = SrsAvcNaluType((uint8_t)(sample->bytes[0] & kNalTypeMask)); + } + + raw->push_back(sample->copy()); + } + + // Ignore empty. + int nn_bytes = raw->nb_bytes(); + if (nn_bytes <= 0) { + srs_freep(raw); + return err; + } + + if (nn_bytes < kRtpMaxPayloadSize) { + // Package NALUs in a single RTP packet. + SrsRtpPacket* pkt = new SrsRtpPacket(); + pkts.push_back(pkt); + + pkt->header.set_payload_type(video_payload_type_); + pkt->header.set_ssrc(video_ssrc); + pkt->frame_type = SrsFrameTypeVideo; + pkt->nalu_type = (SrsAvcNaluType)first_nalu_type; + pkt->header.set_sequence(video_sequence++); + pkt->header.set_timestamp(msg->timestamp * 90); + pkt->set_payload(raw, SrsRtspPacketPayloadTypeNALU); + pkt->wrap(msg); + } else { + // We must free it, should never use RTP packets to free it, + // because more than one RTP packet will refer to it. + SrsAutoFree(SrsRtpRawNALUs, raw); + + // Package NALUs in FU-A RTP packets. + int fu_payload_size = kRtpMaxPayloadSize; + + // The first byte is store in FU-A header. + uint8_t header = raw->skip_first_byte(); + uint8_t nal_type = header & kNalTypeMask; + int nb_left = nn_bytes - 1; + + int num_of_packet = 1 + (nn_bytes - 1) / fu_payload_size; + for (int i = 0; i < num_of_packet; ++i) { + int packet_size = srs_min(nb_left, fu_payload_size); + + SrsRtpFUAPayload* fua = new SrsRtpFUAPayload(); + if ((err = raw->read_samples(fua->nalus, packet_size)) != srs_success) { + srs_freep(fua); + return srs_error_wrap(err, "read samples %d bytes, left %d, total %d", packet_size, nb_left, nn_bytes); + } + + SrsRtpPacket* pkt = new SrsRtpPacket(); + pkts.push_back(pkt); + + pkt->header.set_payload_type(video_payload_type_); + pkt->header.set_ssrc(video_ssrc); + pkt->frame_type = SrsFrameTypeVideo; + pkt->nalu_type = (SrsAvcNaluType)kFuA; + pkt->header.set_sequence(video_sequence++); + pkt->header.set_timestamp(msg->timestamp * 90); + + fua->nri = (SrsAvcNaluType)header; + fua->nalu_type = (SrsAvcNaluType)nal_type; + fua->start = bool(i == 0); + fua->end = bool(i == num_of_packet - 1); + + pkt->set_payload(fua, SrsRtspPacketPayloadTypeFUA); + pkt->wrap(msg); + + nb_left -= packet_size; + } + } + + return err; +} + +// Single NAL Unit Packet @see https://tools.ietf.org/html/rfc6184#section-5.6 +srs_error_t SrsRtmpToRtcBridge::package_single_nalu(SrsSharedPtrMessage* msg, SrsSample* sample, vector& pkts) +{ + srs_error_t err = srs_success; + + SrsRtpPacket* pkt = new SrsRtpPacket(); + pkts.push_back(pkt); + + pkt->header.set_payload_type(video_payload_type_); + pkt->header.set_ssrc(video_ssrc); + pkt->frame_type = SrsFrameTypeVideo; + pkt->header.set_sequence(video_sequence++); + pkt->header.set_timestamp(msg->timestamp * 90); + + SrsRtpRawPayload* raw = new SrsRtpRawPayload(); + pkt->set_payload(raw, SrsRtspPacketPayloadTypeRaw); + + raw->payload = sample->bytes; + raw->nn_payload = sample->size; + + pkt->wrap(msg); + + return err; +} + +srs_error_t SrsRtmpToRtcBridge::package_fu_a(SrsSharedPtrMessage* msg, SrsSample* sample, int fu_payload_size, vector& pkts) +{ + srs_error_t err = srs_success; + + char* p = sample->bytes + 1; + int nb_left = sample->size - 1; + uint8_t header = sample->bytes[0]; + uint8_t nal_type = header & kNalTypeMask; + + int num_of_packet = 1 + (nb_left - 1) / fu_payload_size; + for (int i = 0; i < num_of_packet; ++i) { + int packet_size = srs_min(nb_left, fu_payload_size); + + SrsRtpPacket* pkt = new SrsRtpPacket(); + pkts.push_back(pkt); + + pkt->header.set_payload_type(video_payload_type_); + pkt->header.set_ssrc(video_ssrc); + pkt->frame_type = SrsFrameTypeVideo; + pkt->header.set_sequence(video_sequence++); + pkt->header.set_timestamp(msg->timestamp * 90); + + SrsRtpFUAPayload2* fua = new SrsRtpFUAPayload2(); + pkt->set_payload(fua, SrsRtspPacketPayloadTypeFUA2); + + fua->nri = (SrsAvcNaluType)header; + fua->nalu_type = (SrsAvcNaluType)nal_type; + fua->start = bool(i == 0); + fua->end = bool(i == num_of_packet - 1); + + fua->payload = p; + fua->size = packet_size; + + pkt->wrap(msg); + + p += packet_size; + nb_left -= packet_size; + } + + return err; +} + +srs_error_t SrsRtmpToRtcBridge::consume_packets(vector& pkts) +{ + srs_error_t err = srs_success; + + // TODO: FIXME: Consume a range of packets. + for (int i = 0; i < (int)pkts.size(); i++) { + SrsRtpPacket* pkt = pkts[i]; + if ((err = source_->on_rtp(pkt)) != srs_success) { + err = srs_error_wrap(err, "consume sps/pps"); + break; + } + } + + for (int i = 0; i < (int)pkts.size(); i++) { + SrsRtpPacket* pkt = pkts[i]; + srs_freep(pkt); + } + + return err; +} +#endif + diff --git a/trunk/src/app/srs_app_stream_bridge.hpp b/trunk/src/app/srs_app_stream_bridge.hpp index 8abd098ef4..0f4c6800a9 100644 --- a/trunk/src/app/srs_app_stream_bridge.hpp +++ b/trunk/src/app/srs_app_stream_bridge.hpp @@ -9,9 +9,18 @@ #include +#include + +#include + class SrsRequest; class SrsSharedPtrMessage; class SrsLiveSource; +class SrsRtcSource; +class SrsRtmpFormat; +class SrsMetaCache; +class SrsAudioTranscoder; +class SrsRtpPacket; // A stream bridge is used to convert stream via different protocols, such as bridge for RTMP and RTC. Generally, we use // frame as message for bridge. A frame is a audio or video frame, such as an I/B/P frame, a general frame for decoder. @@ -45,5 +54,55 @@ class SrsFrameToRtmpBridge : public ISrsStreamBridge virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); }; +#ifdef SRS_FFMPEG_FIT + +// A bridge to covert RTMP to WebRTC stream. +class SrsRtmpToRtcBridge : public ISrsStreamBridge +{ +private: + SrsRequest* req; + SrsRtcSource* source_; + // The format, codec information. + SrsRtmpFormat* format; + // The metadata cache. + SrsMetaCache* meta; +private: + bool rtmp_to_rtc; + SrsAudioCodecId latest_codec_; + SrsAudioTranscoder* codec_; + bool keep_bframe; + bool merge_nalus; + uint16_t audio_sequence; + uint16_t video_sequence; + uint32_t audio_ssrc; + uint32_t video_ssrc; + uint8_t audio_payload_type_; + uint8_t video_payload_type_; +public: + SrsRtmpToRtcBridge(SrsRtcSource* source); + virtual ~SrsRtmpToRtcBridge(); +public: + virtual srs_error_t initialize(SrsRequest* r); + virtual srs_error_t on_publish(); + virtual void on_unpublish(); + virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); +private: + virtual srs_error_t on_audio(SrsSharedPtrMessage* msg); +private: + srs_error_t init_codec(SrsAudioCodecId codec); + srs_error_t transcode(SrsAudioFrame* audio); + srs_error_t package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt); +private: + virtual srs_error_t on_video(SrsSharedPtrMessage* msg); +private: + srs_error_t filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, std::vector& samples); + srs_error_t package_stap_a(SrsRtcSource* source, SrsSharedPtrMessage* msg, SrsRtpPacket* pkt); + srs_error_t package_nalus(SrsSharedPtrMessage* msg, const std::vector& samples, std::vector& pkts); + srs_error_t package_single_nalu(SrsSharedPtrMessage* msg, SrsSample* sample, std::vector& pkts); + srs_error_t package_fu_a(SrsSharedPtrMessage* msg, SrsSample* sample, int fu_payload_size, std::vector& pkts); + srs_error_t consume_packets(std::vector& pkts); +}; +#endif + #endif From e9dfe3d9a386cec18378a30b39aac03ecc910627 Mon Sep 17 00:00:00 2001 From: winlin Date: Sun, 29 Jan 2023 00:14:53 +0800 Subject: [PATCH 10/18] Rename SrsRtmpToRtcBridge to SrsFrameToRtcBridge --- trunk/src/app/srs_app_rtc_source.hpp | 2 +- trunk/src/app/srs_app_rtmp_conn.cpp | 6 +-- trunk/src/app/srs_app_srt_conn.cpp | 6 +-- trunk/src/app/srs_app_stream_bridge.cpp | 69 +++++++++---------------- trunk/src/app/srs_app_stream_bridge.hpp | 9 ++-- 5 files changed, 35 insertions(+), 57 deletions(-) diff --git a/trunk/src/app/srs_app_rtc_source.hpp b/trunk/src/app/srs_app_rtc_source.hpp index a593f4124f..66a1ffffe4 100644 --- a/trunk/src/app/srs_app_rtc_source.hpp +++ b/trunk/src/app/srs_app_rtc_source.hpp @@ -29,7 +29,7 @@ class SrsSharedPtrMessage; class SrsCommonMessage; class SrsMessageArray; class SrsRtcSource; -class SrsRtmpToRtcBridge; +class SrsFrameToRtcBridge; class SrsAudioTranscoder; class SrsRtpPacket; class SrsSample; diff --git a/trunk/src/app/srs_app_rtmp_conn.cpp b/trunk/src/app/srs_app_rtmp_conn.cpp index d0987ce4b5..f532cf1cff 100644 --- a/trunk/src/app/srs_app_rtmp_conn.cpp +++ b/trunk/src/app/srs_app_rtmp_conn.cpp @@ -1075,7 +1075,7 @@ srs_error_t SrsRtmpConn::acquire_publish(SrsLiveSource* source) // Check whether RTC stream is busy. #ifdef SRS_RTC - SrsRtcSource *rtc = NULL; + SrsRtcSource* rtc = NULL; bool rtc_server_enabled = _srs_config->get_rtc_server_enabled(); bool rtc_enabled = _srs_config->get_rtc_enabled(req->vhost); if (rtc_server_enabled && rtc_enabled && !info->edge) { @@ -1091,8 +1091,8 @@ srs_error_t SrsRtmpConn::acquire_publish(SrsLiveSource* source) // Bridge to RTC streaming. #if defined(SRS_RTC) && defined(SRS_FFMPEG_FIT) - if (rtc) { - SrsRtmpToRtcBridge* bridge = new SrsRtmpToRtcBridge(rtc); + if (rtc && _srs_config->get_rtc_from_rtmp(req->vhost)) { + SrsFrameToRtcBridge* bridge = new SrsFrameToRtcBridge(rtc); if ((err = bridge->initialize(req)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "bridge init"); diff --git a/trunk/src/app/srs_app_srt_conn.cpp b/trunk/src/app/srs_app_srt_conn.cpp index c16809801d..6fae11d427 100644 --- a/trunk/src/app/srs_app_srt_conn.cpp +++ b/trunk/src/app/srs_app_srt_conn.cpp @@ -380,7 +380,7 @@ srs_error_t SrsMpegtsSrtConn::acquire_publish() // Check whether RTC stream is busy. #ifdef SRS_RTC - SrsRtcSource *rtc = NULL; + SrsRtcSource* rtc = NULL; bool rtc_server_enabled = _srs_config->get_rtc_server_enabled(); bool rtc_enabled = _srs_config->get_rtc_enabled(req_->vhost); bool edge = _srs_config->get_vhost_is_edge(req_->vhost); @@ -397,8 +397,8 @@ srs_error_t SrsMpegtsSrtConn::acquire_publish() // Bridge to RTC streaming. #if defined(SRS_RTC) && defined(SRS_FFMPEG_FIT) - if (rtc) { - SrsRtmpToRtcBridge* bridge = new SrsRtmpToRtcBridge(rtc); + if (rtc && _srs_config->get_rtc_from_rtmp(req_->vhost)) { + SrsFrameToRtcBridge* bridge = new SrsFrameToRtcBridge(rtc); if ((err = bridge->initialize(req_)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "bridge init"); diff --git a/trunk/src/app/srs_app_stream_bridge.cpp b/trunk/src/app/srs_app_stream_bridge.cpp index 5516283595..d0e52e63c7 100644 --- a/trunk/src/app/srs_app_stream_bridge.cpp +++ b/trunk/src/app/srs_app_stream_bridge.cpp @@ -115,14 +115,13 @@ srs_error_t SrsFrameToRtmpBridge::on_frame(SrsSharedPtrMessage* frame) #ifdef SRS_FFMPEG_FIT -SrsRtmpToRtcBridge::SrsRtmpToRtcBridge(SrsRtcSource* source) +SrsFrameToRtcBridge::SrsFrameToRtcBridge(SrsRtcSource* source) { req = NULL; source_ = source; format = new SrsRtmpFormat(); codec_ = new SrsAudioTranscoder(); latest_codec_ = SrsAudioCodecIdForbidden; - rtmp_to_rtc = false; keep_bframe = false; merge_nalus = false; meta = new SrsMetaCache(); @@ -150,45 +149,37 @@ SrsRtmpToRtcBridge::SrsRtmpToRtcBridge(SrsRtcSource* source) } } -SrsRtmpToRtcBridge::~SrsRtmpToRtcBridge() +SrsFrameToRtcBridge::~SrsFrameToRtcBridge() { srs_freep(format); srs_freep(codec_); srs_freep(meta); } -srs_error_t SrsRtmpToRtcBridge::initialize(SrsRequest* r) +srs_error_t SrsFrameToRtcBridge::initialize(SrsRequest* r) { srs_error_t err = srs_success; req = r; - rtmp_to_rtc = _srs_config->get_rtc_from_rtmp(req->vhost); - if (rtmp_to_rtc) { - if ((err = format->initialize()) != srs_success) { - return srs_error_wrap(err, "format initialize"); - } - - // Setup the SPS/PPS parsing strategy. - format->try_annexb_first = _srs_config->try_annexb_first(r->vhost); + if ((err = format->initialize()) != srs_success) { + return srs_error_wrap(err, "format initialize"); } + // Setup the SPS/PPS parsing strategy. + format->try_annexb_first = _srs_config->try_annexb_first(r->vhost); + keep_bframe = _srs_config->get_rtc_keep_bframe(req->vhost); merge_nalus = _srs_config->get_rtc_server_merge_nalus(); - srs_trace("RTC bridge from RTMP, rtmp2rtc=%d, keep_bframe=%d, merge_nalus=%d", - rtmp_to_rtc, keep_bframe, merge_nalus); + srs_trace("RTC bridge from RTMP, keep_bframe=%d, merge_nalus=%d", keep_bframe, merge_nalus); return err; } -srs_error_t SrsRtmpToRtcBridge::on_publish() +srs_error_t SrsFrameToRtcBridge::on_publish() { srs_error_t err = srs_success; - if (!rtmp_to_rtc) { - return err; - } - // TODO: FIXME: Should sync with bridge? if ((err = source_->on_publish()) != srs_success) { return srs_error_wrap(err, "source publish"); @@ -201,12 +192,8 @@ srs_error_t SrsRtmpToRtcBridge::on_publish() return err; } -void SrsRtmpToRtcBridge::on_unpublish() +void SrsFrameToRtcBridge::on_unpublish() { - if (!rtmp_to_rtc) { - return; - } - // Reset the metadata cache, to make VLC happy when disable/enable stream. // @see https://github.com/ossrs/srs/issues/1630#issuecomment-597979448 meta->update_previous_vsh(); @@ -217,7 +204,7 @@ void SrsRtmpToRtcBridge::on_unpublish() source_->on_unpublish(); } -srs_error_t SrsRtmpToRtcBridge::on_frame(SrsSharedPtrMessage* frame) +srs_error_t SrsFrameToRtcBridge::on_frame(SrsSharedPtrMessage* frame) { if (frame->is_audio()) { return on_audio(frame); @@ -227,14 +214,10 @@ srs_error_t SrsRtmpToRtcBridge::on_frame(SrsSharedPtrMessage* frame) return srs_success; } -srs_error_t SrsRtmpToRtcBridge::on_audio(SrsSharedPtrMessage* msg) +srs_error_t SrsFrameToRtcBridge::on_audio(SrsSharedPtrMessage* msg) { srs_error_t err = srs_success; - if (!rtmp_to_rtc) { - return err; - } - // TODO: FIXME: Support parsing OPUS for RTC. if ((err = format->on_audio(msg)) != srs_success) { return srs_error_wrap(err, "format consume audio"); @@ -293,7 +276,7 @@ srs_error_t SrsRtmpToRtcBridge::on_audio(SrsSharedPtrMessage* msg) return err; } -srs_error_t SrsRtmpToRtcBridge::init_codec(SrsAudioCodecId codec) +srs_error_t SrsFrameToRtcBridge::init_codec(SrsAudioCodecId codec) { srs_error_t err = srs_success; @@ -315,14 +298,14 @@ srs_error_t SrsRtmpToRtcBridge::init_codec(SrsAudioCodecId codec) srs_trace("RTMP2RTC: Init audio codec to %d(%s)", codec, srs_audio_codec_id2str(codec).c_str()); } else { srs_trace("RTMP2RTC: Switch audio codec %d(%s) to %d(%s)", latest_codec_, srs_audio_codec_id2str(latest_codec_).c_str(), - codec, srs_audio_codec_id2str(codec).c_str()); + codec, srs_audio_codec_id2str(codec).c_str()); } latest_codec_ = codec; return err; } -srs_error_t SrsRtmpToRtcBridge::transcode(SrsAudioFrame* audio) +srs_error_t SrsFrameToRtcBridge::transcode(SrsAudioFrame* audio) { srs_error_t err = srs_success; @@ -358,7 +341,7 @@ srs_error_t SrsRtmpToRtcBridge::transcode(SrsAudioFrame* audio) return err; } -srs_error_t SrsRtmpToRtcBridge::package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt) +srs_error_t SrsFrameToRtcBridge::package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt) { srs_error_t err = srs_success; @@ -379,14 +362,10 @@ srs_error_t SrsRtmpToRtcBridge::package_opus(SrsAudioFrame* audio, SrsRtpPacket* return err; } -srs_error_t SrsRtmpToRtcBridge::on_video(SrsSharedPtrMessage* msg) +srs_error_t SrsFrameToRtcBridge::on_video(SrsSharedPtrMessage* msg) { srs_error_t err = srs_success; - if (!rtmp_to_rtc) { - return err; - } - // cache the sequence header if h264 bool is_sequence_header = SrsFlvVideo::sh(msg->payload, msg->size); if (is_sequence_header && (err = meta->update_vsh(msg)) != srs_success) { @@ -460,7 +439,7 @@ srs_error_t SrsRtmpToRtcBridge::on_video(SrsSharedPtrMessage* msg) return consume_packets(pkts); } -srs_error_t SrsRtmpToRtcBridge::filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, vector& samples) +srs_error_t SrsFrameToRtcBridge::filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, vector& samples) { srs_error_t err = srs_success; @@ -490,7 +469,7 @@ srs_error_t SrsRtmpToRtcBridge::filter(SrsSharedPtrMessage* msg, SrsFormat* form return err; } -srs_error_t SrsRtmpToRtcBridge::package_stap_a(SrsRtcSource* source, SrsSharedPtrMessage* msg, SrsRtpPacket* pkt) +srs_error_t SrsFrameToRtcBridge::package_stap_a(SrsRtcSource* source, SrsSharedPtrMessage* msg, SrsRtpPacket* pkt) { srs_error_t err = srs_success; @@ -549,7 +528,7 @@ srs_error_t SrsRtmpToRtcBridge::package_stap_a(SrsRtcSource* source, SrsSharedPt return err; } -srs_error_t SrsRtmpToRtcBridge::package_nalus(SrsSharedPtrMessage* msg, const vector& samples, vector& pkts) +srs_error_t SrsFrameToRtcBridge::package_nalus(SrsSharedPtrMessage* msg, const vector& samples, vector& pkts) { srs_error_t err = srs_success; @@ -645,7 +624,7 @@ srs_error_t SrsRtmpToRtcBridge::package_nalus(SrsSharedPtrMessage* msg, const ve } // Single NAL Unit Packet @see https://tools.ietf.org/html/rfc6184#section-5.6 -srs_error_t SrsRtmpToRtcBridge::package_single_nalu(SrsSharedPtrMessage* msg, SrsSample* sample, vector& pkts) +srs_error_t SrsFrameToRtcBridge::package_single_nalu(SrsSharedPtrMessage* msg, SrsSample* sample, vector& pkts) { srs_error_t err = srs_success; @@ -669,7 +648,7 @@ srs_error_t SrsRtmpToRtcBridge::package_single_nalu(SrsSharedPtrMessage* msg, Sr return err; } -srs_error_t SrsRtmpToRtcBridge::package_fu_a(SrsSharedPtrMessage* msg, SrsSample* sample, int fu_payload_size, vector& pkts) +srs_error_t SrsFrameToRtcBridge::package_fu_a(SrsSharedPtrMessage* msg, SrsSample* sample, int fu_payload_size, vector& pkts) { srs_error_t err = srs_success; @@ -711,7 +690,7 @@ srs_error_t SrsRtmpToRtcBridge::package_fu_a(SrsSharedPtrMessage* msg, SrsSample return err; } -srs_error_t SrsRtmpToRtcBridge::consume_packets(vector& pkts) +srs_error_t SrsFrameToRtcBridge::consume_packets(vector& pkts) { srs_error_t err = srs_success; diff --git a/trunk/src/app/srs_app_stream_bridge.hpp b/trunk/src/app/srs_app_stream_bridge.hpp index 0f4c6800a9..fc1bc9f9b3 100644 --- a/trunk/src/app/srs_app_stream_bridge.hpp +++ b/trunk/src/app/srs_app_stream_bridge.hpp @@ -56,8 +56,8 @@ class SrsFrameToRtmpBridge : public ISrsStreamBridge #ifdef SRS_FFMPEG_FIT -// A bridge to covert RTMP to WebRTC stream. -class SrsRtmpToRtcBridge : public ISrsStreamBridge +// A bridge to covert AV frame to WebRTC stream. +class SrsFrameToRtcBridge : public ISrsStreamBridge { private: SrsRequest* req; @@ -67,7 +67,6 @@ class SrsRtmpToRtcBridge : public ISrsStreamBridge // The metadata cache. SrsMetaCache* meta; private: - bool rtmp_to_rtc; SrsAudioCodecId latest_codec_; SrsAudioTranscoder* codec_; bool keep_bframe; @@ -79,8 +78,8 @@ class SrsRtmpToRtcBridge : public ISrsStreamBridge uint8_t audio_payload_type_; uint8_t video_payload_type_; public: - SrsRtmpToRtcBridge(SrsRtcSource* source); - virtual ~SrsRtmpToRtcBridge(); + SrsFrameToRtcBridge(SrsRtcSource* source); + virtual ~SrsFrameToRtcBridge(); public: virtual srs_error_t initialize(SrsRequest* r); virtual srs_error_t on_publish(); From 368af2913bdf05f83d92113b14ca6136379190fb Mon Sep 17 00:00:00 2001 From: winlin Date: Sun, 29 Jan 2023 01:03:12 +0800 Subject: [PATCH 11/18] Extract rtc rtp builder from AV frame. --- trunk/src/app/srs_app_rtc_source.cpp | 628 ++++++++++++++++++++++++ trunk/src/app/srs_app_rtc_source.hpp | 51 +- trunk/src/app/srs_app_stream_bridge.cpp | 622 ++--------------------- trunk/src/app/srs_app_stream_bridge.hpp | 37 +- 4 files changed, 711 insertions(+), 627 deletions(-) diff --git a/trunk/src/app/srs_app_rtc_source.cpp b/trunk/src/app/srs_app_rtc_source.cpp index 4efe9a5fe5..56879d460d 100644 --- a/trunk/src/app/srs_app_rtc_source.cpp +++ b/trunk/src/app/srs_app_rtc_source.cpp @@ -52,8 +52,63 @@ SrsPps* _srs_pps_rmnack = NULL; extern SrsPps* _srs_pps_aloss2; +const int kAudioChannel = 2; +const int kAudioSamplerate = 48000; + +const int kVideoSamplerate = 90000; + using namespace std; +// The RTP payload max size, reserved some paddings for SRTP as such: +// kRtpPacketSize = kRtpMaxPayloadSize + paddings +// For example, if kRtpPacketSize is 1500, recommend to set kRtpMaxPayloadSize to 1400, +// which reserves 100 bytes for SRTP or paddings. +// otherwise, the kRtpPacketSize must less than MTU, in webrtc source code, +// the rtp max size is assigned by kVideoMtu = 1200. +// so we set kRtpMaxPayloadSize = 1200. +// see @doc https://groups.google.com/g/discuss-webrtc/c/gH5ysR3SoZI +const int kRtpMaxPayloadSize = kRtpPacketSize - 300; + +// TODO: Add this function into SrsRtpMux class. +srs_error_t aac_raw_append_adts_header(SrsSharedPtrMessage* shared_audio, SrsFormat* format, char** pbuf, int* pnn_buf) +{ + srs_error_t err = srs_success; + + if (format->is_aac_sequence_header()) { + return err; + } + + // If no audio RAW frame, or not parsed for no sequence header, drop the packet. + if (format->audio->nb_samples == 0) { + srs_warn("RTC: Drop AAC %d bytes for no sample", shared_audio->size); + return err; + } + + if (format->audio->nb_samples != 1) { + return srs_error_new(ERROR_RTC_RTP_MUXER, "adts samples=%d", format->audio->nb_samples); + } + + int nb_buf = format->audio->samples[0].size + 7; + char* buf = new char[nb_buf]; + SrsBuffer stream(buf, nb_buf); + + // TODO: Add comment. + stream.write_1bytes(0xFF); + stream.write_1bytes(0xF9); + stream.write_1bytes(((format->acodec->aac_object - 1) << 6) | ((format->acodec->aac_sample_rate & 0x0F) << 2) | ((format->acodec->aac_channels & 0x04) >> 2)); + stream.write_1bytes(((format->acodec->aac_channels & 0x03) << 6) | ((nb_buf >> 11) & 0x03)); + stream.write_1bytes((nb_buf >> 3) & 0xFF); + stream.write_1bytes(((nb_buf & 0x07) << 5) | 0x1F); + stream.write_1bytes(0xFC); + + stream.write_bytes(format->audio->samples[0].bytes, format->audio->samples[0].size); + + *pbuf = buf; + *pnn_buf = nb_buf; + + return err; +} + uint64_t SrsNtp::kMagicNtpFractionalUnit = 1ULL << 32; SrsNtp::SrsNtp() @@ -673,6 +728,578 @@ srs_error_t SrsRtcSource::on_timer(srs_utime_t interval) #ifdef SRS_FFMPEG_FIT +SrsRtcRtpBuilder::SrsRtcRtpBuilder(SrsFrameToRtcBridge* bridge, uint32_t assrc, uint8_t apt, uint32_t vssrc, uint8_t vpt) +{ + req = NULL; + bridge_ = bridge; + format = new SrsRtmpFormat(); + codec_ = new SrsAudioTranscoder(); + latest_codec_ = SrsAudioCodecIdForbidden; + keep_bframe = false; + merge_nalus = false; + meta = new SrsMetaCache(); + audio_sequence = 0; + video_sequence = 0; + + audio_ssrc_ = assrc; + audio_payload_type_ = apt; + video_ssrc_ = vssrc; + video_payload_type_ = vpt; +} + +SrsRtcRtpBuilder::~SrsRtcRtpBuilder() +{ + srs_freep(format); + srs_freep(codec_); + srs_freep(meta); +} + +srs_error_t SrsRtcRtpBuilder::initialize(SrsRequest* r) +{ + srs_error_t err = srs_success; + + req = r; + + if ((err = format->initialize()) != srs_success) { + return srs_error_wrap(err, "format initialize"); + } + + // Setup the SPS/PPS parsing strategy. + format->try_annexb_first = _srs_config->try_annexb_first(r->vhost); + + keep_bframe = _srs_config->get_rtc_keep_bframe(req->vhost); + merge_nalus = _srs_config->get_rtc_server_merge_nalus(); + srs_trace("RTC bridge from RTMP, keep_bframe=%d, merge_nalus=%d", keep_bframe, merge_nalus); + + return err; +} + +srs_error_t SrsRtcRtpBuilder::on_publish() +{ + srs_error_t err = srs_success; + + // Reset the metadata cache, to make VLC happy when disable/enable stream. + // @see https://github.com/ossrs/srs/issues/1630#issuecomment-597979448 + meta->clear(); + + return err; +} + +void SrsRtcRtpBuilder::on_unpublish() +{ + // Reset the metadata cache, to make VLC happy when disable/enable stream. + // @see https://github.com/ossrs/srs/issues/1630#issuecomment-597979448 + meta->update_previous_vsh(); + meta->update_previous_ash(); +} + +srs_error_t SrsRtcRtpBuilder::on_frame(SrsSharedPtrMessage* frame) +{ + if (frame->is_audio()) { + return on_audio(frame); + } else if (frame->is_video()) { + return on_video(frame); + } + return srs_success; +} + +srs_error_t SrsRtcRtpBuilder::on_audio(SrsSharedPtrMessage* msg) +{ + srs_error_t err = srs_success; + + // TODO: FIXME: Support parsing OPUS for RTC. + if ((err = format->on_audio(msg)) != srs_success) { + return srs_error_wrap(err, "format consume audio"); + } + + // Try to init codec when startup or codec changed. + if (format->acodec && (err = init_codec(format->acodec->id)) != srs_success) { + return srs_error_wrap(err, "init codec"); + } + + // Ignore if no format->acodec, it means the codec is not parsed, or unknown codec. + // @issue https://github.com/ossrs/srs/issues/1506#issuecomment-562079474 + if (!format->acodec) { + return err; + } + + // ts support audio codec: aac/mp3 + SrsAudioCodecId acodec = format->acodec->id; + if (acodec != SrsAudioCodecIdAAC && acodec != SrsAudioCodecIdMP3) { + return err; + } + + // ignore sequence header + srs_assert(format->audio); + + if (format->acodec->id == SrsAudioCodecIdMP3) { + return transcode(format->audio); + } + + // When drop aac audio packet, never transcode. + if (acodec != SrsAudioCodecIdAAC) { + return err; + } + + char* adts_audio = NULL; + int nn_adts_audio = 0; + // TODO: FIXME: Reserve 7 bytes header when create shared message. + if ((err = aac_raw_append_adts_header(msg, format, &adts_audio, &nn_adts_audio)) != srs_success) { + return srs_error_wrap(err, "aac append header"); + } + + if (!adts_audio) { + return err; + } + + SrsAudioFrame aac; + aac.dts = format->audio->dts; + aac.cts = format->audio->cts; + if ((err = aac.add_sample(adts_audio, nn_adts_audio)) == srs_success) { + // If OK, transcode the AAC to Opus and consume it. + err = transcode(&aac); + } + + srs_freepa(adts_audio); + + return err; +} + +srs_error_t SrsRtcRtpBuilder::init_codec(SrsAudioCodecId codec) +{ + srs_error_t err = srs_success; + + // Ignore if not changed. + if (latest_codec_ == codec) return err; + + // Create a new codec. + srs_freep(codec_); + codec_ = new SrsAudioTranscoder(); + + // Initialize the codec according to the codec in stream. + int bitrate = 48000; // The output bitrate in bps. + if ((err = codec_->initialize(codec, SrsAudioCodecIdOpus, kAudioChannel, kAudioSamplerate, bitrate)) != srs_success) { + return srs_error_wrap(err, "init codec=%d", codec); + } + + // Update the latest codec in stream. + if (latest_codec_ == SrsAudioCodecIdForbidden) { + srs_trace("RTMP2RTC: Init audio codec to %d(%s)", codec, srs_audio_codec_id2str(codec).c_str()); + } else { + srs_trace("RTMP2RTC: Switch audio codec %d(%s) to %d(%s)", latest_codec_, srs_audio_codec_id2str(latest_codec_).c_str(), + codec, srs_audio_codec_id2str(codec).c_str()); + } + latest_codec_ = codec; + + return err; +} + +srs_error_t SrsRtcRtpBuilder::transcode(SrsAudioFrame* audio) +{ + srs_error_t err = srs_success; + + std::vector out_audios; + if ((err = codec_->transcode(audio, out_audios)) != srs_success) { + return srs_error_wrap(err, "recode error"); + } + + // Save OPUS packets in shared message. + if (out_audios.empty()) { + return err; + } + + for (std::vector::iterator it = out_audios.begin(); it != out_audios.end(); ++it) { + SrsAudioFrame* out_audio = *it; + + SrsRtpPacket* pkt = new SrsRtpPacket(); + SrsAutoFree(SrsRtpPacket, pkt); + + if ((err = package_opus(out_audio, pkt)) != srs_success) { + err = srs_error_wrap(err, "package opus"); + break; + } + + if ((err = bridge_->on_rtp(pkt)) != srs_success) { + err = srs_error_wrap(err, "consume opus"); + break; + } + } + + codec_->free_frames(out_audios); + + return err; +} + +srs_error_t SrsRtcRtpBuilder::package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt) +{ + srs_error_t err = srs_success; + + pkt->header.set_payload_type(audio_payload_type_); + pkt->header.set_ssrc(audio_ssrc_); + pkt->frame_type = SrsFrameTypeAudio; + pkt->header.set_marker(true); + pkt->header.set_sequence(audio_sequence++); + pkt->header.set_timestamp(audio->dts * 48); + + SrsRtpRawPayload* raw = new SrsRtpRawPayload(); + pkt->set_payload(raw, SrsRtspPacketPayloadTypeRaw); + + srs_assert(audio->nb_samples == 1); + raw->payload = pkt->wrap(audio->samples[0].bytes, audio->samples[0].size); + raw->nn_payload = audio->samples[0].size; + + return err; +} + +srs_error_t SrsRtcRtpBuilder::on_video(SrsSharedPtrMessage* msg) +{ + srs_error_t err = srs_success; + + // cache the sequence header if h264 + bool is_sequence_header = SrsFlvVideo::sh(msg->payload, msg->size); + if (is_sequence_header && (err = meta->update_vsh(msg)) != srs_success) { + return srs_error_wrap(err, "meta update video"); + } + + if ((err = format->on_video(msg)) != srs_success) { + return srs_error_wrap(err, "format consume video"); + } + + // Ignore if no format->vcodec, it means the codec is not parsed, or unsupport/unknown codec + // such as H.263 codec + if (!format->vcodec) { + return err; + } + + bool has_idr = false; + vector samples; + if ((err = filter(msg, format, has_idr, samples)) != srs_success) { + return srs_error_wrap(err, "filter video"); + } + int nn_samples = (int)samples.size(); + + // Well, for each IDR, we append a SPS/PPS before it, which is packaged in STAP-A. + if (has_idr) { + SrsRtpPacket* pkt = new SrsRtpPacket(); + SrsAutoFree(SrsRtpPacket, pkt); + + if ((err = package_stap_a(msg, pkt)) != srs_success) { + return srs_error_wrap(err, "package stap-a"); + } + + if ((err = bridge_->on_rtp(pkt)) != srs_success) { + return srs_error_wrap(err, "consume sps/pps"); + } + } + + // If merge Nalus, we pcakges all NALUs(samples) as one NALU, in a RTP or FUA packet. + vector pkts; + if (merge_nalus && nn_samples > 1) { + if ((err = package_nalus(msg, samples, pkts)) != srs_success) { + return srs_error_wrap(err, "package nalus as one"); + } + } else { + // By default, we package each NALU(sample) to a RTP or FUA packet. + for (int i = 0; i < nn_samples; i++) { + SrsSample* sample = samples[i]; + + // We always ignore bframe here, if config to discard bframe, + // the bframe flag will not be set. + if (sample->bframe) { + continue; + } + + if (sample->size <= kRtpMaxPayloadSize) { + if ((err = package_single_nalu(msg, sample, pkts)) != srs_success) { + return srs_error_wrap(err, "package single nalu"); + } + } else { + if ((err = package_fu_a(msg, sample, kRtpMaxPayloadSize, pkts)) != srs_success) { + return srs_error_wrap(err, "package fu-a"); + } + } + } + } + + if (!pkts.empty()) { + pkts.back()->header.set_marker(true); + } + + return consume_packets(pkts); +} + +srs_error_t SrsRtcRtpBuilder::filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, vector& samples) +{ + srs_error_t err = srs_success; + + // If IDR, we will insert SPS/PPS before IDR frame. + if (format->video && format->video->has_idr) { + has_idr = true; + } + + // Update samples to shared frame. + for (int i = 0; i < format->video->nb_samples; ++i) { + SrsSample* sample = &format->video->samples[i]; + + // Because RTC does not support B-frame, so we will drop them. + // TODO: Drop B-frame in better way, which not cause picture corruption. + if (!keep_bframe) { + if ((err = sample->parse_bframe()) != srs_success) { + return srs_error_wrap(err, "parse bframe"); + } + if (sample->bframe) { + continue; + } + } + + samples.push_back(sample); + } + + return err; +} + +srs_error_t SrsRtcRtpBuilder::package_stap_a(SrsSharedPtrMessage* msg, SrsRtpPacket* pkt) +{ + srs_error_t err = srs_success; + + SrsFormat* format = meta->vsh_format(); + if (!format || !format->vcodec) { + return err; + } + + // Note that the sps/pps may change, so we should copy it. + const vector& sps = format->vcodec->sequenceParameterSetNALUnit; + const vector& pps = format->vcodec->pictureParameterSetNALUnit; + if (sps.empty() || pps.empty()) { + return srs_error_new(ERROR_RTC_RTP_MUXER, "sps/pps empty"); + } + + pkt->header.set_payload_type(video_payload_type_); + pkt->header.set_ssrc(video_ssrc_); + pkt->frame_type = SrsFrameTypeVideo; + pkt->nalu_type = (SrsAvcNaluType)kStapA; + pkt->header.set_marker(false); + pkt->header.set_sequence(video_sequence++); + pkt->header.set_timestamp(msg->timestamp * 90); + + SrsRtpSTAPPayload* stap = new SrsRtpSTAPPayload(); + pkt->set_payload(stap, SrsRtspPacketPayloadTypeSTAP); + + uint8_t header = sps[0]; + stap->nri = (SrsAvcNaluType)header; + + // Copy the SPS/PPS bytes, because it may change. + int size = (int)(sps.size() + pps.size()); + char* payload = pkt->wrap(size); + + if (true) { + SrsSample* sample = new SrsSample(); + sample->bytes = payload; + sample->size = (int)sps.size(); + stap->nalus.push_back(sample); + + memcpy(payload, (char*)&sps[0], sps.size()); + payload += (int)sps.size(); + } + + if (true) { + SrsSample* sample = new SrsSample(); + sample->bytes = payload; + sample->size = (int)pps.size(); + stap->nalus.push_back(sample); + + memcpy(payload, (char*)&pps[0], pps.size()); + payload += (int)pps.size(); + } + + srs_info("RTC STAP-A seq=%u, sps %d, pps %d bytes", pkt->header.get_sequence(), sps.size(), pps.size()); + + return err; +} + +srs_error_t SrsRtcRtpBuilder::package_nalus(SrsSharedPtrMessage* msg, const vector& samples, vector& pkts) +{ + srs_error_t err = srs_success; + + SrsRtpRawNALUs* raw = new SrsRtpRawNALUs(); + SrsAvcNaluType first_nalu_type = SrsAvcNaluTypeReserved; + + for (int i = 0; i < (int)samples.size(); i++) { + SrsSample* sample = samples[i]; + + // We always ignore bframe here, if config to discard bframe, + // the bframe flag will not be set. + if (sample->bframe) { + continue; + } + + if (!sample->size) { + continue; + } + + if (first_nalu_type == SrsAvcNaluTypeReserved) { + first_nalu_type = SrsAvcNaluType((uint8_t)(sample->bytes[0] & kNalTypeMask)); + } + + raw->push_back(sample->copy()); + } + + // Ignore empty. + int nn_bytes = raw->nb_bytes(); + if (nn_bytes <= 0) { + srs_freep(raw); + return err; + } + + if (nn_bytes < kRtpMaxPayloadSize) { + // Package NALUs in a single RTP packet. + SrsRtpPacket* pkt = new SrsRtpPacket(); + pkts.push_back(pkt); + + pkt->header.set_payload_type(video_payload_type_); + pkt->header.set_ssrc(video_ssrc_); + pkt->frame_type = SrsFrameTypeVideo; + pkt->nalu_type = (SrsAvcNaluType)first_nalu_type; + pkt->header.set_sequence(video_sequence++); + pkt->header.set_timestamp(msg->timestamp * 90); + pkt->set_payload(raw, SrsRtspPacketPayloadTypeNALU); + pkt->wrap(msg); + } else { + // We must free it, should never use RTP packets to free it, + // because more than one RTP packet will refer to it. + SrsAutoFree(SrsRtpRawNALUs, raw); + + // Package NALUs in FU-A RTP packets. + int fu_payload_size = kRtpMaxPayloadSize; + + // The first byte is store in FU-A header. + uint8_t header = raw->skip_first_byte(); + uint8_t nal_type = header & kNalTypeMask; + int nb_left = nn_bytes - 1; + + int num_of_packet = 1 + (nn_bytes - 1) / fu_payload_size; + for (int i = 0; i < num_of_packet; ++i) { + int packet_size = srs_min(nb_left, fu_payload_size); + + SrsRtpFUAPayload* fua = new SrsRtpFUAPayload(); + if ((err = raw->read_samples(fua->nalus, packet_size)) != srs_success) { + srs_freep(fua); + return srs_error_wrap(err, "read samples %d bytes, left %d, total %d", packet_size, nb_left, nn_bytes); + } + + SrsRtpPacket* pkt = new SrsRtpPacket(); + pkts.push_back(pkt); + + pkt->header.set_payload_type(video_payload_type_); + pkt->header.set_ssrc(video_ssrc_); + pkt->frame_type = SrsFrameTypeVideo; + pkt->nalu_type = (SrsAvcNaluType)kFuA; + pkt->header.set_sequence(video_sequence++); + pkt->header.set_timestamp(msg->timestamp * 90); + + fua->nri = (SrsAvcNaluType)header; + fua->nalu_type = (SrsAvcNaluType)nal_type; + fua->start = bool(i == 0); + fua->end = bool(i == num_of_packet - 1); + + pkt->set_payload(fua, SrsRtspPacketPayloadTypeFUA); + pkt->wrap(msg); + + nb_left -= packet_size; + } + } + + return err; +} + +// Single NAL Unit Packet @see https://tools.ietf.org/html/rfc6184#section-5.6 +srs_error_t SrsRtcRtpBuilder::package_single_nalu(SrsSharedPtrMessage* msg, SrsSample* sample, vector& pkts) +{ + srs_error_t err = srs_success; + + SrsRtpPacket* pkt = new SrsRtpPacket(); + pkts.push_back(pkt); + + pkt->header.set_payload_type(video_payload_type_); + pkt->header.set_ssrc(video_ssrc_); + pkt->frame_type = SrsFrameTypeVideo; + pkt->header.set_sequence(video_sequence++); + pkt->header.set_timestamp(msg->timestamp * 90); + + SrsRtpRawPayload* raw = new SrsRtpRawPayload(); + pkt->set_payload(raw, SrsRtspPacketPayloadTypeRaw); + + raw->payload = sample->bytes; + raw->nn_payload = sample->size; + + pkt->wrap(msg); + + return err; +} + +srs_error_t SrsRtcRtpBuilder::package_fu_a(SrsSharedPtrMessage* msg, SrsSample* sample, int fu_payload_size, vector& pkts) +{ + srs_error_t err = srs_success; + + char* p = sample->bytes + 1; + int nb_left = sample->size - 1; + uint8_t header = sample->bytes[0]; + uint8_t nal_type = header & kNalTypeMask; + + int num_of_packet = 1 + (nb_left - 1) / fu_payload_size; + for (int i = 0; i < num_of_packet; ++i) { + int packet_size = srs_min(nb_left, fu_payload_size); + + SrsRtpPacket* pkt = new SrsRtpPacket(); + pkts.push_back(pkt); + + pkt->header.set_payload_type(video_payload_type_); + pkt->header.set_ssrc(video_ssrc_); + pkt->frame_type = SrsFrameTypeVideo; + pkt->header.set_sequence(video_sequence++); + pkt->header.set_timestamp(msg->timestamp * 90); + + SrsRtpFUAPayload2* fua = new SrsRtpFUAPayload2(); + pkt->set_payload(fua, SrsRtspPacketPayloadTypeFUA2); + + fua->nri = (SrsAvcNaluType)header; + fua->nalu_type = (SrsAvcNaluType)nal_type; + fua->start = bool(i == 0); + fua->end = bool(i == num_of_packet - 1); + + fua->payload = p; + fua->size = packet_size; + + pkt->wrap(msg); + + p += packet_size; + nb_left -= packet_size; + } + + return err; +} + +srs_error_t SrsRtcRtpBuilder::consume_packets(vector& pkts) +{ + srs_error_t err = srs_success; + + // TODO: FIXME: Consume a range of packets. + for (int i = 0; i < (int)pkts.size(); i++) { + SrsRtpPacket* pkt = pkts[i]; + if ((err = bridge_->on_rtp(pkt)) != srs_success) { + err = srs_error_wrap(err, "consume sps/pps"); + break; + } + } + + for (int i = 0; i < (int)pkts.size(); i++) { + SrsRtpPacket* pkt = pkts[i]; + srs_freep(pkt); + } + + return err; +} + SrsRtcFrameBuilder::SrsRtcFrameBuilder(ISrsStreamBridge* bridge) { bridge_ = bridge; @@ -1180,6 +1807,7 @@ bool SrsRtcFrameBuilder::check_frame_complete(const uint16_t start, const uint16 return fu_s_c == fu_e_c; } + #endif SrsCodecPayload::SrsCodecPayload() diff --git a/trunk/src/app/srs_app_rtc_source.hpp b/trunk/src/app/srs_app_rtc_source.hpp index 66a1ffffe4..8e8453a4e4 100644 --- a/trunk/src/app/srs_app_rtc_source.hpp +++ b/trunk/src/app/srs_app_rtc_source.hpp @@ -45,12 +45,8 @@ class SrsLiveSource; // Firefox defaults as 109, Chrome is 111. const int kAudioPayloadType = 111; -const int kAudioChannel = 2; -const int kAudioSamplerate = 48000; - // Firefox defaults as 126, Chrome is 102. const int kVideoPayloadType = 102; -const int kVideoSamplerate = 90000; class SrsNtp { @@ -252,6 +248,53 @@ class SrsRtcSource : public ISrsFastTimer #ifdef SRS_FFMPEG_FIT +// Convert AV frame to RTC RTP packets. +class SrsRtcRtpBuilder +{ +private: + SrsRequest* req; + SrsFrameToRtcBridge* bridge_; + // The format, codec information. + SrsRtmpFormat* format; + // The metadata cache. + SrsMetaCache* meta; +private: + SrsAudioCodecId latest_codec_; + SrsAudioTranscoder* codec_; + bool keep_bframe; + bool merge_nalus; + uint16_t audio_sequence; + uint16_t video_sequence; +private: + uint32_t audio_ssrc_; + uint32_t video_ssrc_; + uint8_t audio_payload_type_; + uint8_t video_payload_type_; +public: + SrsRtcRtpBuilder(SrsFrameToRtcBridge* bridge, uint32_t assrc, uint8_t apt, uint32_t vssrc, uint8_t vpt); + virtual ~SrsRtcRtpBuilder(); +public: + virtual srs_error_t initialize(SrsRequest* r); + virtual srs_error_t on_publish(); + virtual void on_unpublish(); + virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); +private: + virtual srs_error_t on_audio(SrsSharedPtrMessage* msg); +private: + srs_error_t init_codec(SrsAudioCodecId codec); + srs_error_t transcode(SrsAudioFrame* audio); + srs_error_t package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt); +private: + virtual srs_error_t on_video(SrsSharedPtrMessage* msg); +private: + srs_error_t filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, std::vector& samples); + srs_error_t package_stap_a(SrsSharedPtrMessage* msg, SrsRtpPacket* pkt); + srs_error_t package_nalus(SrsSharedPtrMessage* msg, const std::vector& samples, std::vector& pkts); + srs_error_t package_single_nalu(SrsSharedPtrMessage* msg, SrsSample* sample, std::vector& pkts); + srs_error_t package_fu_a(SrsSharedPtrMessage* msg, SrsSample* sample, int fu_payload_size, std::vector& pkts); + srs_error_t consume_packets(std::vector& pkts); +}; + // Collect and build WebRTC RTP packets to AV frames. class SrsRtcFrameBuilder { diff --git a/trunk/src/app/srs_app_stream_bridge.cpp b/trunk/src/app/srs_app_stream_bridge.cpp index d0e52e63c7..0a4e786982 100644 --- a/trunk/src/app/srs_app_stream_bridge.cpp +++ b/trunk/src/app/srs_app_stream_bridge.cpp @@ -18,56 +18,6 @@ #include using namespace std; -// The RTP payload max size, reserved some paddings for SRTP as such: -// kRtpPacketSize = kRtpMaxPayloadSize + paddings -// For example, if kRtpPacketSize is 1500, recommend to set kRtpMaxPayloadSize to 1400, -// which reserves 100 bytes for SRTP or paddings. -// otherwise, the kRtpPacketSize must less than MTU, in webrtc source code, -// the rtp max size is assigned by kVideoMtu = 1200. -// so we set kRtpMaxPayloadSize = 1200. -// see @doc https://groups.google.com/g/discuss-webrtc/c/gH5ysR3SoZI -const int kRtpMaxPayloadSize = kRtpPacketSize - 300; - -// TODO: Add this function into SrsRtpMux class. -srs_error_t aac_raw_append_adts_header(SrsSharedPtrMessage* shared_audio, SrsFormat* format, char** pbuf, int* pnn_buf) -{ - srs_error_t err = srs_success; - - if (format->is_aac_sequence_header()) { - return err; - } - - // If no audio RAW frame, or not parsed for no sequence header, drop the packet. - if (format->audio->nb_samples == 0) { - srs_warn("RTC: Drop AAC %d bytes for no sample", shared_audio->size); - return err; - } - - if (format->audio->nb_samples != 1) { - return srs_error_new(ERROR_RTC_RTP_MUXER, "adts samples=%d", format->audio->nb_samples); - } - - int nb_buf = format->audio->samples[0].size + 7; - char* buf = new char[nb_buf]; - SrsBuffer stream(buf, nb_buf); - - // TODO: Add comment. - stream.write_1bytes(0xFF); - stream.write_1bytes(0xF9); - stream.write_1bytes(((format->acodec->aac_object - 1) << 6) | ((format->acodec->aac_sample_rate & 0x0F) << 2) | ((format->acodec->aac_channels & 0x04) >> 2)); - stream.write_1bytes(((format->acodec->aac_channels & 0x03) << 6) | ((nb_buf >> 11) & 0x03)); - stream.write_1bytes((nb_buf >> 3) & 0xFF); - stream.write_1bytes(((nb_buf & 0x07) << 5) | 0x1F); - stream.write_1bytes(0xFC); - - stream.write_bytes(format->audio->samples[0].bytes, format->audio->samples[0].size); - - *pbuf = buf; - *pnn_buf = nb_buf; - - return err; -} - ISrsStreamBridge::ISrsStreamBridge() { } @@ -113,20 +63,15 @@ srs_error_t SrsFrameToRtmpBridge::on_frame(SrsSharedPtrMessage* frame) return source_->on_frame(frame); } -#ifdef SRS_FFMPEG_FIT - SrsFrameToRtcBridge::SrsFrameToRtcBridge(SrsRtcSource* source) { - req = NULL; source_ = source; - format = new SrsRtmpFormat(); - codec_ = new SrsAudioTranscoder(); - latest_codec_ = SrsAudioCodecIdForbidden; - keep_bframe = false; - merge_nalus = false; - meta = new SrsMetaCache(); - audio_sequence = 0; - video_sequence = 0; + +#ifdef SRS_FFMPEG_FIT + uint32_t audio_ssrc = 0; + uint8_t audio_payload_type = 0; + uint32_t video_ssrc = 0; + uint8_t video_payload_type = 0; // audio track ssrc if (true) { @@ -135,7 +80,7 @@ SrsFrameToRtcBridge::SrsFrameToRtcBridge(SrsRtcSource* source) audio_ssrc = descs.at(0)->ssrc_; } // Note we must use the PT of source, see https://github.com/ossrs/srs/pull/3079 - audio_payload_type_ = descs.empty() ? kAudioPayloadType : descs.front()->media_->pt_; + audio_payload_type = descs.empty() ? kAudioPayloadType : descs.front()->media_->pt_; } // video track ssrc @@ -145,35 +90,27 @@ SrsFrameToRtcBridge::SrsFrameToRtcBridge(SrsRtcSource* source) video_ssrc = descs.at(0)->ssrc_; } // Note we must use the PT of source, see https://github.com/ossrs/srs/pull/3079 - video_payload_type_ = descs.empty() ? kVideoPayloadType : descs.front()->media_->pt_; + video_payload_type = descs.empty() ? kVideoPayloadType : descs.front()->media_->pt_; } + + rtp_builder_ = new SrsRtcRtpBuilder(this, audio_ssrc, audio_payload_type, video_ssrc, video_payload_type); +#endif } SrsFrameToRtcBridge::~SrsFrameToRtcBridge() { - srs_freep(format); - srs_freep(codec_); - srs_freep(meta); +#ifdef SRS_FFMPEG_FIT + srs_freep(rtp_builder_); +#endif } srs_error_t SrsFrameToRtcBridge::initialize(SrsRequest* r) { - srs_error_t err = srs_success; - - req = r; - - if ((err = format->initialize()) != srs_success) { - return srs_error_wrap(err, "format initialize"); - } - - // Setup the SPS/PPS parsing strategy. - format->try_annexb_first = _srs_config->try_annexb_first(r->vhost); - - keep_bframe = _srs_config->get_rtc_keep_bframe(req->vhost); - merge_nalus = _srs_config->get_rtc_server_merge_nalus(); - srs_trace("RTC bridge from RTMP, keep_bframe=%d, merge_nalus=%d", keep_bframe, merge_nalus); - - return err; +#ifdef SRS_FFMPEG_FIT + return rtp_builder_->initialize(r); +#else + return srs_success; +#endif } srs_error_t SrsFrameToRtcBridge::on_publish() @@ -185,19 +122,20 @@ srs_error_t SrsFrameToRtcBridge::on_publish() return srs_error_wrap(err, "source publish"); } - // Reset the metadata cache, to make VLC happy when disable/enable stream. - // @see https://github.com/ossrs/srs/issues/1630#issuecomment-597979448 - meta->clear(); +#ifdef SRS_FFMPEG_FIT + if ((err = rtp_builder_->on_publish()) != srs_success) { + return srs_error_wrap(err, "rtp builder publish"); + } +#endif return err; } void SrsFrameToRtcBridge::on_unpublish() { - // Reset the metadata cache, to make VLC happy when disable/enable stream. - // @see https://github.com/ossrs/srs/issues/1630#issuecomment-597979448 - meta->update_previous_vsh(); - meta->update_previous_ash(); +#ifdef SRS_FFMPEG_FIT + rtp_builder_->on_unpublish(); +#endif // @remark This bridge might be disposed here, so never use it. // TODO: FIXME: Should sync with bridge? @@ -206,509 +144,15 @@ void SrsFrameToRtcBridge::on_unpublish() srs_error_t SrsFrameToRtcBridge::on_frame(SrsSharedPtrMessage* frame) { - if (frame->is_audio()) { - return on_audio(frame); - } else if (frame->is_video()) { - return on_video(frame); - } +#ifdef SRS_FFMPEG_FIT + return rtp_builder_->on_frame(frame); +#else return srs_success; +#endif } -srs_error_t SrsFrameToRtcBridge::on_audio(SrsSharedPtrMessage* msg) -{ - srs_error_t err = srs_success; - - // TODO: FIXME: Support parsing OPUS for RTC. - if ((err = format->on_audio(msg)) != srs_success) { - return srs_error_wrap(err, "format consume audio"); - } - - // Try to init codec when startup or codec changed. - if (format->acodec && (err = init_codec(format->acodec->id)) != srs_success) { - return srs_error_wrap(err, "init codec"); - } - - // Ignore if no format->acodec, it means the codec is not parsed, or unknown codec. - // @issue https://github.com/ossrs/srs/issues/1506#issuecomment-562079474 - if (!format->acodec) { - return err; - } - - // ts support audio codec: aac/mp3 - SrsAudioCodecId acodec = format->acodec->id; - if (acodec != SrsAudioCodecIdAAC && acodec != SrsAudioCodecIdMP3) { - return err; - } - - // ignore sequence header - srs_assert(format->audio); - - if (format->acodec->id == SrsAudioCodecIdMP3) { - return transcode(format->audio); - } - - // When drop aac audio packet, never transcode. - if (acodec != SrsAudioCodecIdAAC) { - return err; - } - - char* adts_audio = NULL; - int nn_adts_audio = 0; - // TODO: FIXME: Reserve 7 bytes header when create shared message. - if ((err = aac_raw_append_adts_header(msg, format, &adts_audio, &nn_adts_audio)) != srs_success) { - return srs_error_wrap(err, "aac append header"); - } - - if (!adts_audio) { - return err; - } - - SrsAudioFrame aac; - aac.dts = format->audio->dts; - aac.cts = format->audio->cts; - if ((err = aac.add_sample(adts_audio, nn_adts_audio)) == srs_success) { - // If OK, transcode the AAC to Opus and consume it. - err = transcode(&aac); - } - - srs_freepa(adts_audio); - - return err; -} - -srs_error_t SrsFrameToRtcBridge::init_codec(SrsAudioCodecId codec) -{ - srs_error_t err = srs_success; - - // Ignore if not changed. - if (latest_codec_ == codec) return err; - - // Create a new codec. - srs_freep(codec_); - codec_ = new SrsAudioTranscoder(); - - // Initialize the codec according to the codec in stream. - int bitrate = 48000; // The output bitrate in bps. - if ((err = codec_->initialize(codec, SrsAudioCodecIdOpus, kAudioChannel, kAudioSamplerate, bitrate)) != srs_success) { - return srs_error_wrap(err, "init codec=%d", codec); - } - - // Update the latest codec in stream. - if (latest_codec_ == SrsAudioCodecIdForbidden) { - srs_trace("RTMP2RTC: Init audio codec to %d(%s)", codec, srs_audio_codec_id2str(codec).c_str()); - } else { - srs_trace("RTMP2RTC: Switch audio codec %d(%s) to %d(%s)", latest_codec_, srs_audio_codec_id2str(latest_codec_).c_str(), - codec, srs_audio_codec_id2str(codec).c_str()); - } - latest_codec_ = codec; - - return err; -} - -srs_error_t SrsFrameToRtcBridge::transcode(SrsAudioFrame* audio) -{ - srs_error_t err = srs_success; - - std::vector out_audios; - if ((err = codec_->transcode(audio, out_audios)) != srs_success) { - return srs_error_wrap(err, "recode error"); - } - - // Save OPUS packets in shared message. - if (out_audios.empty()) { - return err; - } - - for (std::vector::iterator it = out_audios.begin(); it != out_audios.end(); ++it) { - SrsAudioFrame* out_audio = *it; - - SrsRtpPacket* pkt = new SrsRtpPacket(); - SrsAutoFree(SrsRtpPacket, pkt); - - if ((err = package_opus(out_audio, pkt)) != srs_success) { - err = srs_error_wrap(err, "package opus"); - break; - } - - if ((err = source_->on_rtp(pkt)) != srs_success) { - err = srs_error_wrap(err, "consume opus"); - break; - } - } - - codec_->free_frames(out_audios); - - return err; -} - -srs_error_t SrsFrameToRtcBridge::package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt) -{ - srs_error_t err = srs_success; - - pkt->header.set_payload_type(audio_payload_type_); - pkt->header.set_ssrc(audio_ssrc); - pkt->frame_type = SrsFrameTypeAudio; - pkt->header.set_marker(true); - pkt->header.set_sequence(audio_sequence++); - pkt->header.set_timestamp(audio->dts * 48); - - SrsRtpRawPayload* raw = new SrsRtpRawPayload(); - pkt->set_payload(raw, SrsRtspPacketPayloadTypeRaw); - - srs_assert(audio->nb_samples == 1); - raw->payload = pkt->wrap(audio->samples[0].bytes, audio->samples[0].size); - raw->nn_payload = audio->samples[0].size; - - return err; -} - -srs_error_t SrsFrameToRtcBridge::on_video(SrsSharedPtrMessage* msg) -{ - srs_error_t err = srs_success; - - // cache the sequence header if h264 - bool is_sequence_header = SrsFlvVideo::sh(msg->payload, msg->size); - if (is_sequence_header && (err = meta->update_vsh(msg)) != srs_success) { - return srs_error_wrap(err, "meta update video"); - } - - if ((err = format->on_video(msg)) != srs_success) { - return srs_error_wrap(err, "format consume video"); - } - - // Ignore if no format->vcodec, it means the codec is not parsed, or unsupport/unknown codec - // such as H.263 codec - if (!format->vcodec) { - return err; - } - - bool has_idr = false; - vector samples; - if ((err = filter(msg, format, has_idr, samples)) != srs_success) { - return srs_error_wrap(err, "filter video"); - } - int nn_samples = (int)samples.size(); - - // Well, for each IDR, we append a SPS/PPS before it, which is packaged in STAP-A. - if (has_idr) { - SrsRtpPacket* pkt = new SrsRtpPacket(); - SrsAutoFree(SrsRtpPacket, pkt); - - if ((err = package_stap_a(source_, msg, pkt)) != srs_success) { - return srs_error_wrap(err, "package stap-a"); - } - - if ((err = source_->on_rtp(pkt)) != srs_success) { - return srs_error_wrap(err, "consume sps/pps"); - } - } - - // If merge Nalus, we pcakges all NALUs(samples) as one NALU, in a RTP or FUA packet. - vector pkts; - if (merge_nalus && nn_samples > 1) { - if ((err = package_nalus(msg, samples, pkts)) != srs_success) { - return srs_error_wrap(err, "package nalus as one"); - } - } else { - // By default, we package each NALU(sample) to a RTP or FUA packet. - for (int i = 0; i < nn_samples; i++) { - SrsSample* sample = samples[i]; - - // We always ignore bframe here, if config to discard bframe, - // the bframe flag will not be set. - if (sample->bframe) { - continue; - } - - if (sample->size <= kRtpMaxPayloadSize) { - if ((err = package_single_nalu(msg, sample, pkts)) != srs_success) { - return srs_error_wrap(err, "package single nalu"); - } - } else { - if ((err = package_fu_a(msg, sample, kRtpMaxPayloadSize, pkts)) != srs_success) { - return srs_error_wrap(err, "package fu-a"); - } - } - } - } - - if (!pkts.empty()) { - pkts.back()->header.set_marker(true); - } - - return consume_packets(pkts); -} - -srs_error_t SrsFrameToRtcBridge::filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, vector& samples) -{ - srs_error_t err = srs_success; - - // If IDR, we will insert SPS/PPS before IDR frame. - if (format->video && format->video->has_idr) { - has_idr = true; - } - - // Update samples to shared frame. - for (int i = 0; i < format->video->nb_samples; ++i) { - SrsSample* sample = &format->video->samples[i]; - - // Because RTC does not support B-frame, so we will drop them. - // TODO: Drop B-frame in better way, which not cause picture corruption. - if (!keep_bframe) { - if ((err = sample->parse_bframe()) != srs_success) { - return srs_error_wrap(err, "parse bframe"); - } - if (sample->bframe) { - continue; - } - } - - samples.push_back(sample); - } - - return err; -} - -srs_error_t SrsFrameToRtcBridge::package_stap_a(SrsRtcSource* source, SrsSharedPtrMessage* msg, SrsRtpPacket* pkt) -{ - srs_error_t err = srs_success; - - SrsFormat* format = meta->vsh_format(); - if (!format || !format->vcodec) { - return err; - } - - // Note that the sps/pps may change, so we should copy it. - const vector& sps = format->vcodec->sequenceParameterSetNALUnit; - const vector& pps = format->vcodec->pictureParameterSetNALUnit; - if (sps.empty() || pps.empty()) { - return srs_error_new(ERROR_RTC_RTP_MUXER, "sps/pps empty"); - } - - pkt->header.set_payload_type(video_payload_type_); - pkt->header.set_ssrc(video_ssrc); - pkt->frame_type = SrsFrameTypeVideo; - pkt->nalu_type = (SrsAvcNaluType)kStapA; - pkt->header.set_marker(false); - pkt->header.set_sequence(video_sequence++); - pkt->header.set_timestamp(msg->timestamp * 90); - - SrsRtpSTAPPayload* stap = new SrsRtpSTAPPayload(); - pkt->set_payload(stap, SrsRtspPacketPayloadTypeSTAP); - - uint8_t header = sps[0]; - stap->nri = (SrsAvcNaluType)header; - - // Copy the SPS/PPS bytes, because it may change. - int size = (int)(sps.size() + pps.size()); - char* payload = pkt->wrap(size); - - if (true) { - SrsSample* sample = new SrsSample(); - sample->bytes = payload; - sample->size = (int)sps.size(); - stap->nalus.push_back(sample); - - memcpy(payload, (char*)&sps[0], sps.size()); - payload += (int)sps.size(); - } - - if (true) { - SrsSample* sample = new SrsSample(); - sample->bytes = payload; - sample->size = (int)pps.size(); - stap->nalus.push_back(sample); - - memcpy(payload, (char*)&pps[0], pps.size()); - payload += (int)pps.size(); - } - - srs_info("RTC STAP-A seq=%u, sps %d, pps %d bytes", pkt->header.get_sequence(), sps.size(), pps.size()); - - return err; -} - -srs_error_t SrsFrameToRtcBridge::package_nalus(SrsSharedPtrMessage* msg, const vector& samples, vector& pkts) -{ - srs_error_t err = srs_success; - - SrsRtpRawNALUs* raw = new SrsRtpRawNALUs(); - SrsAvcNaluType first_nalu_type = SrsAvcNaluTypeReserved; - - for (int i = 0; i < (int)samples.size(); i++) { - SrsSample* sample = samples[i]; - - // We always ignore bframe here, if config to discard bframe, - // the bframe flag will not be set. - if (sample->bframe) { - continue; - } - - if (!sample->size) { - continue; - } - - if (first_nalu_type == SrsAvcNaluTypeReserved) { - first_nalu_type = SrsAvcNaluType((uint8_t)(sample->bytes[0] & kNalTypeMask)); - } - - raw->push_back(sample->copy()); - } - - // Ignore empty. - int nn_bytes = raw->nb_bytes(); - if (nn_bytes <= 0) { - srs_freep(raw); - return err; - } - - if (nn_bytes < kRtpMaxPayloadSize) { - // Package NALUs in a single RTP packet. - SrsRtpPacket* pkt = new SrsRtpPacket(); - pkts.push_back(pkt); - - pkt->header.set_payload_type(video_payload_type_); - pkt->header.set_ssrc(video_ssrc); - pkt->frame_type = SrsFrameTypeVideo; - pkt->nalu_type = (SrsAvcNaluType)first_nalu_type; - pkt->header.set_sequence(video_sequence++); - pkt->header.set_timestamp(msg->timestamp * 90); - pkt->set_payload(raw, SrsRtspPacketPayloadTypeNALU); - pkt->wrap(msg); - } else { - // We must free it, should never use RTP packets to free it, - // because more than one RTP packet will refer to it. - SrsAutoFree(SrsRtpRawNALUs, raw); - - // Package NALUs in FU-A RTP packets. - int fu_payload_size = kRtpMaxPayloadSize; - - // The first byte is store in FU-A header. - uint8_t header = raw->skip_first_byte(); - uint8_t nal_type = header & kNalTypeMask; - int nb_left = nn_bytes - 1; - - int num_of_packet = 1 + (nn_bytes - 1) / fu_payload_size; - for (int i = 0; i < num_of_packet; ++i) { - int packet_size = srs_min(nb_left, fu_payload_size); - - SrsRtpFUAPayload* fua = new SrsRtpFUAPayload(); - if ((err = raw->read_samples(fua->nalus, packet_size)) != srs_success) { - srs_freep(fua); - return srs_error_wrap(err, "read samples %d bytes, left %d, total %d", packet_size, nb_left, nn_bytes); - } - - SrsRtpPacket* pkt = new SrsRtpPacket(); - pkts.push_back(pkt); - - pkt->header.set_payload_type(video_payload_type_); - pkt->header.set_ssrc(video_ssrc); - pkt->frame_type = SrsFrameTypeVideo; - pkt->nalu_type = (SrsAvcNaluType)kFuA; - pkt->header.set_sequence(video_sequence++); - pkt->header.set_timestamp(msg->timestamp * 90); - - fua->nri = (SrsAvcNaluType)header; - fua->nalu_type = (SrsAvcNaluType)nal_type; - fua->start = bool(i == 0); - fua->end = bool(i == num_of_packet - 1); - - pkt->set_payload(fua, SrsRtspPacketPayloadTypeFUA); - pkt->wrap(msg); - - nb_left -= packet_size; - } - } - - return err; -} - -// Single NAL Unit Packet @see https://tools.ietf.org/html/rfc6184#section-5.6 -srs_error_t SrsFrameToRtcBridge::package_single_nalu(SrsSharedPtrMessage* msg, SrsSample* sample, vector& pkts) -{ - srs_error_t err = srs_success; - - SrsRtpPacket* pkt = new SrsRtpPacket(); - pkts.push_back(pkt); - - pkt->header.set_payload_type(video_payload_type_); - pkt->header.set_ssrc(video_ssrc); - pkt->frame_type = SrsFrameTypeVideo; - pkt->header.set_sequence(video_sequence++); - pkt->header.set_timestamp(msg->timestamp * 90); - - SrsRtpRawPayload* raw = new SrsRtpRawPayload(); - pkt->set_payload(raw, SrsRtspPacketPayloadTypeRaw); - - raw->payload = sample->bytes; - raw->nn_payload = sample->size; - - pkt->wrap(msg); - - return err; -} - -srs_error_t SrsFrameToRtcBridge::package_fu_a(SrsSharedPtrMessage* msg, SrsSample* sample, int fu_payload_size, vector& pkts) -{ - srs_error_t err = srs_success; - - char* p = sample->bytes + 1; - int nb_left = sample->size - 1; - uint8_t header = sample->bytes[0]; - uint8_t nal_type = header & kNalTypeMask; - - int num_of_packet = 1 + (nb_left - 1) / fu_payload_size; - for (int i = 0; i < num_of_packet; ++i) { - int packet_size = srs_min(nb_left, fu_payload_size); - - SrsRtpPacket* pkt = new SrsRtpPacket(); - pkts.push_back(pkt); - - pkt->header.set_payload_type(video_payload_type_); - pkt->header.set_ssrc(video_ssrc); - pkt->frame_type = SrsFrameTypeVideo; - pkt->header.set_sequence(video_sequence++); - pkt->header.set_timestamp(msg->timestamp * 90); - - SrsRtpFUAPayload2* fua = new SrsRtpFUAPayload2(); - pkt->set_payload(fua, SrsRtspPacketPayloadTypeFUA2); - - fua->nri = (SrsAvcNaluType)header; - fua->nalu_type = (SrsAvcNaluType)nal_type; - fua->start = bool(i == 0); - fua->end = bool(i == num_of_packet - 1); - - fua->payload = p; - fua->size = packet_size; - - pkt->wrap(msg); - - p += packet_size; - nb_left -= packet_size; - } - - return err; -} - -srs_error_t SrsFrameToRtcBridge::consume_packets(vector& pkts) +srs_error_t SrsFrameToRtcBridge::on_rtp(SrsRtpPacket* pkt) { - srs_error_t err = srs_success; - - // TODO: FIXME: Consume a range of packets. - for (int i = 0; i < (int)pkts.size(); i++) { - SrsRtpPacket* pkt = pkts[i]; - if ((err = source_->on_rtp(pkt)) != srs_success) { - err = srs_error_wrap(err, "consume sps/pps"); - break; - } - } - - for (int i = 0; i < (int)pkts.size(); i++) { - SrsRtpPacket* pkt = pkts[i]; - srs_freep(pkt); - } - - return err; + return source_->on_rtp(pkt); } -#endif diff --git a/trunk/src/app/srs_app_stream_bridge.hpp b/trunk/src/app/srs_app_stream_bridge.hpp index fc1bc9f9b3..def547dd46 100644 --- a/trunk/src/app/srs_app_stream_bridge.hpp +++ b/trunk/src/app/srs_app_stream_bridge.hpp @@ -21,6 +21,7 @@ class SrsRtmpFormat; class SrsMetaCache; class SrsAudioTranscoder; class SrsRtpPacket; +class SrsRtcRtpBuilder; // A stream bridge is used to convert stream via different protocols, such as bridge for RTMP and RTC. Generally, we use // frame as message for bridge. A frame is a audio or video frame, such as an I/B/P frame, a general frame for decoder. @@ -54,29 +55,12 @@ class SrsFrameToRtmpBridge : public ISrsStreamBridge virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); }; -#ifdef SRS_FFMPEG_FIT - // A bridge to covert AV frame to WebRTC stream. class SrsFrameToRtcBridge : public ISrsStreamBridge { private: - SrsRequest* req; SrsRtcSource* source_; - // The format, codec information. - SrsRtmpFormat* format; - // The metadata cache. - SrsMetaCache* meta; -private: - SrsAudioCodecId latest_codec_; - SrsAudioTranscoder* codec_; - bool keep_bframe; - bool merge_nalus; - uint16_t audio_sequence; - uint16_t video_sequence; - uint32_t audio_ssrc; - uint32_t video_ssrc; - uint8_t audio_payload_type_; - uint8_t video_payload_type_; + SrsRtcRtpBuilder* rtp_builder_; public: SrsFrameToRtcBridge(SrsRtcSource* source); virtual ~SrsFrameToRtcBridge(); @@ -85,23 +69,8 @@ class SrsFrameToRtcBridge : public ISrsStreamBridge virtual srs_error_t on_publish(); virtual void on_unpublish(); virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); -private: - virtual srs_error_t on_audio(SrsSharedPtrMessage* msg); -private: - srs_error_t init_codec(SrsAudioCodecId codec); - srs_error_t transcode(SrsAudioFrame* audio); - srs_error_t package_opus(SrsAudioFrame* audio, SrsRtpPacket* pkt); -private: - virtual srs_error_t on_video(SrsSharedPtrMessage* msg); -private: - srs_error_t filter(SrsSharedPtrMessage* msg, SrsFormat* format, bool& has_idr, std::vector& samples); - srs_error_t package_stap_a(SrsRtcSource* source, SrsSharedPtrMessage* msg, SrsRtpPacket* pkt); - srs_error_t package_nalus(SrsSharedPtrMessage* msg, const std::vector& samples, std::vector& pkts); - srs_error_t package_single_nalu(SrsSharedPtrMessage* msg, SrsSample* sample, std::vector& pkts); - srs_error_t package_fu_a(SrsSharedPtrMessage* msg, SrsSample* sample, int fu_payload_size, std::vector& pkts); - srs_error_t consume_packets(std::vector& pkts); + srs_error_t on_rtp(SrsRtpPacket* pkt); }; -#endif #endif From f139e6c6c6169eae971bf54de2fa51a447228237 Mon Sep 17 00:00:00 2001 From: winlin Date: Sun, 29 Jan 2023 01:25:25 +0800 Subject: [PATCH 12/18] Use composite bridge for coverting SRT to RTMP and RTC. --- trunk/src/app/srs_app_srt_conn.cpp | 14 ++--- trunk/src/app/srs_app_stream_bridge.cpp | 68 +++++++++++++++++++++++++ trunk/src/app/srs_app_stream_bridge.hpp | 19 +++++++ 3 files changed, 92 insertions(+), 9 deletions(-) diff --git a/trunk/src/app/srs_app_srt_conn.cpp b/trunk/src/app/srs_app_srt_conn.cpp index 6fae11d427..5d5a43f210 100644 --- a/trunk/src/app/srs_app_srt_conn.cpp +++ b/trunk/src/app/srs_app_srt_conn.cpp @@ -395,20 +395,16 @@ srs_error_t SrsMpegtsSrtConn::acquire_publish() } #endif - // Bridge to RTC streaming. + // Bridge to RTMP and RTC streaming. + SrsCompositeBridge* bridge = new SrsCompositeBridge(); + bridge->append(new SrsFrameToRtmpBridge(live_source)); + #if defined(SRS_RTC) && defined(SRS_FFMPEG_FIT) if (rtc && _srs_config->get_rtc_from_rtmp(req_->vhost)) { - SrsFrameToRtcBridge* bridge = new SrsFrameToRtcBridge(rtc); - if ((err = bridge->initialize(req_)) != srs_success) { - srs_freep(bridge); - return srs_error_wrap(err, "bridge init"); - } - - live_source->set_bridge(bridge); + bridge->append(new SrsFrameToRtcBridge(rtc)); } #endif - SrsFrameToRtmpBridge* bridge = new SrsFrameToRtmpBridge(live_source); if ((err = bridge->initialize(req_)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "create bridge"); diff --git a/trunk/src/app/srs_app_stream_bridge.cpp b/trunk/src/app/srs_app_stream_bridge.cpp index 0a4e786982..bbd4057214 100644 --- a/trunk/src/app/srs_app_stream_bridge.cpp +++ b/trunk/src/app/srs_app_stream_bridge.cpp @@ -156,3 +156,71 @@ srs_error_t SrsFrameToRtcBridge::on_rtp(SrsRtpPacket* pkt) return source_->on_rtp(pkt); } +SrsCompositeBridge::SrsCompositeBridge() +{ +} + +SrsCompositeBridge::~SrsCompositeBridge() +{ + for (vector::iterator it = bridges_.begin(); it != bridges_.end(); ++it) { + ISrsStreamBridge* bridge = *it; + srs_freep(bridge); + } +} + +srs_error_t SrsCompositeBridge::initialize(SrsRequest* r) +{ + srs_error_t err = srs_success; + + for (vector::iterator it = bridges_.begin(); it != bridges_.end(); ++it) { + ISrsStreamBridge* bridge = *it; + if ((err = bridge->initialize(r)) != srs_success) { + return err; + } + } + + return err; +} + +srs_error_t SrsCompositeBridge::on_publish() +{ + srs_error_t err = srs_success; + + for (vector::iterator it = bridges_.begin(); it != bridges_.end(); ++it) { + ISrsStreamBridge* bridge = *it; + if ((err = bridge->on_publish()) != srs_success) { + return err; + } + } + + return err; +} + +void SrsCompositeBridge::on_unpublish() +{ + for (vector::iterator it = bridges_.begin(); it != bridges_.end(); ++it) { + ISrsStreamBridge* bridge = *it; + bridge->on_unpublish(); + } +} + +srs_error_t SrsCompositeBridge::on_frame(SrsSharedPtrMessage* frame) +{ + srs_error_t err = srs_success; + + for (vector::iterator it = bridges_.begin(); it != bridges_.end(); ++it) { + ISrsStreamBridge* bridge = *it; + if ((err = bridge->on_frame(frame)) != srs_success) { + return err; + } + } + + return err; +} + +SrsCompositeBridge* SrsCompositeBridge::append(ISrsStreamBridge* bridge) +{ + bridges_.push_back(bridge); + return this; +} + diff --git a/trunk/src/app/srs_app_stream_bridge.hpp b/trunk/src/app/srs_app_stream_bridge.hpp index def547dd46..5af2ead10f 100644 --- a/trunk/src/app/srs_app_stream_bridge.hpp +++ b/trunk/src/app/srs_app_stream_bridge.hpp @@ -72,5 +72,24 @@ class SrsFrameToRtcBridge : public ISrsStreamBridge srs_error_t on_rtp(SrsRtpPacket* pkt); }; +// A bridge chain, a set of bridges. +class SrsCompositeBridge : public ISrsStreamBridge +{ +public: + SrsCompositeBridge(); + virtual ~SrsCompositeBridge(); +public: + srs_error_t initialize(SrsRequest* r); +public: + virtual srs_error_t on_publish(); + virtual void on_unpublish(); +public: + virtual srs_error_t on_frame(SrsSharedPtrMessage* frame); +public: + SrsCompositeBridge* append(ISrsStreamBridge* bridge); +private: + std::vector bridges_; +}; + #endif From 75f6cd56845b20ed225de843997b849cfc78d136 Mon Sep 17 00:00:00 2001 From: winlin Date: Sun, 29 Jan 2023 01:33:00 +0800 Subject: [PATCH 13/18] Fix build fail. --- trunk/src/app/srs_app_stream_bridge.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/trunk/src/app/srs_app_stream_bridge.cpp b/trunk/src/app/srs_app_stream_bridge.cpp index bbd4057214..82eee47529 100644 --- a/trunk/src/app/srs_app_stream_bridge.cpp +++ b/trunk/src/app/srs_app_stream_bridge.cpp @@ -65,9 +65,11 @@ srs_error_t SrsFrameToRtmpBridge::on_frame(SrsSharedPtrMessage* frame) SrsFrameToRtcBridge::SrsFrameToRtcBridge(SrsRtcSource* source) { +#ifdef SRS_RTC source_ = source; +#endif -#ifdef SRS_FFMPEG_FIT +#if defined(SRS_RTC) && defined(SRS_FFMPEG_FIT) uint32_t audio_ssrc = 0; uint8_t audio_payload_type = 0; uint32_t video_ssrc = 0; @@ -117,10 +119,12 @@ srs_error_t SrsFrameToRtcBridge::on_publish() { srs_error_t err = srs_success; +#ifdef SRS_RTC // TODO: FIXME: Should sync with bridge? if ((err = source_->on_publish()) != srs_success) { return srs_error_wrap(err, "source publish"); } +#endif #ifdef SRS_FFMPEG_FIT if ((err = rtp_builder_->on_publish()) != srs_success) { @@ -137,9 +141,11 @@ void SrsFrameToRtcBridge::on_unpublish() rtp_builder_->on_unpublish(); #endif +#ifdef SRS_RTC // @remark This bridge might be disposed here, so never use it. // TODO: FIXME: Should sync with bridge? source_->on_unpublish(); +#endif } srs_error_t SrsFrameToRtcBridge::on_frame(SrsSharedPtrMessage* frame) @@ -153,7 +159,11 @@ srs_error_t SrsFrameToRtcBridge::on_frame(SrsSharedPtrMessage* frame) srs_error_t SrsFrameToRtcBridge::on_rtp(SrsRtpPacket* pkt) { +#ifdef SRS_RTC return source_->on_rtp(pkt); +#else + return srs_success; +#endif } SrsCompositeBridge::SrsCompositeBridge() From d5efe6ae1690684be6208ef67bd90ae7f9dad76c Mon Sep 17 00:00:00 2001 From: winlin Date: Sat, 25 Feb 2023 09:02:25 +0800 Subject: [PATCH 14/18] Always use composite bridge. --- trunk/src/app/srs_app_rtc_conn.cpp | 4 +++- trunk/src/app/srs_app_rtmp_conn.cpp | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/trunk/src/app/srs_app_rtc_conn.cpp b/trunk/src/app/srs_app_rtc_conn.cpp index eb5ea2aa46..56d9c63ac3 100644 --- a/trunk/src/app/srs_app_rtc_conn.cpp +++ b/trunk/src/app/srs_app_rtc_conn.cpp @@ -1197,7 +1197,9 @@ srs_error_t SrsRtcPublishStream::initialize(SrsRequest* r, SrsRtcSourceDescripti // especially for stream merging. rtmp->set_cache(false); - SrsFrameToRtmpBridge* bridge = new SrsFrameToRtmpBridge(rtmp); + SrsCompositeBridge* bridge = new SrsCompositeBridge(); + bridge->append(new SrsFrameToRtmpBridge(rtmp)); + if ((err = bridge->initialize(r)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "create bridge"); diff --git a/trunk/src/app/srs_app_rtmp_conn.cpp b/trunk/src/app/srs_app_rtmp_conn.cpp index f532cf1cff..e3b6664047 100644 --- a/trunk/src/app/srs_app_rtmp_conn.cpp +++ b/trunk/src/app/srs_app_rtmp_conn.cpp @@ -1092,7 +1092,9 @@ srs_error_t SrsRtmpConn::acquire_publish(SrsLiveSource* source) // Bridge to RTC streaming. #if defined(SRS_RTC) && defined(SRS_FFMPEG_FIT) if (rtc && _srs_config->get_rtc_from_rtmp(req->vhost)) { - SrsFrameToRtcBridge* bridge = new SrsFrameToRtcBridge(rtc); + SrsCompositeBridge* bridge = new SrsCompositeBridge(); + bridge->append(new SrsFrameToRtcBridge(rtc)); + if ((err = bridge->initialize(req)) != srs_success) { srs_freep(bridge); return srs_error_wrap(err, "bridge init"); From 1f8f06da13932167beaaa8926beec49dcab73fd9 Mon Sep 17 00:00:00 2001 From: winlin Date: Tue, 28 Mar 2023 09:59:19 +0800 Subject: [PATCH 15/18] Check whether SRT stream is busy when publishing RTMP or RTC. --- trunk/src/app/srs_app_config.hpp | 1 + trunk/src/app/srs_app_rtc_conn.cpp | 17 +++++++++ trunk/src/app/srs_app_rtmp_conn.cpp | 17 +++++++++ trunk/src/app/srs_app_srt_conn.cpp | 58 ++++++++++++++--------------- 4 files changed, 64 insertions(+), 29 deletions(-) diff --git a/trunk/src/app/srs_app_config.hpp b/trunk/src/app/srs_app_config.hpp index f0028b357b..1e83e97565 100644 --- a/trunk/src/app/srs_app_config.hpp +++ b/trunk/src/app/srs_app_config.hpp @@ -692,6 +692,7 @@ class SrsConfig private: SrsConfDirective* get_srt(std::string vhost); public: + // TODO: FIXME: Rename to get_vhost_srt_enabled. bool get_srt_enabled(std::string vhost); bool get_srt_to_rtmp(std::string vhost); diff --git a/trunk/src/app/srs_app_rtc_conn.cpp b/trunk/src/app/srs_app_rtc_conn.cpp index dd2db782dc..3ca1ac1640 100644 --- a/trunk/src/app/srs_app_rtc_conn.cpp +++ b/trunk/src/app/srs_app_rtc_conn.cpp @@ -48,6 +48,7 @@ using namespace std; #include #include #include +#include SrsPps* _srs_pps_sstuns = NULL; SrsPps* _srs_pps_srtcps = NULL; @@ -1185,6 +1186,22 @@ srs_error_t SrsRtcPublishStream::initialize(SrsRequest* r, SrsRtcSourceDescripti return srs_error_new(ERROR_SYSTEM_STREAM_BUSY, "rtmp stream %s busy", r->get_stream_url().c_str()); } + // Check whether SRT stream is busy. +#ifdef SRS_SRT + SrsSrtSource* srt = NULL; + bool srt_server_enabled = _srs_config->get_srt_enabled(); + bool srt_enabled = _srs_config->get_srt_enabled(r->vhost); + if (srt_server_enabled && srt_enabled) { + if ((err = _srs_srt_sources->fetch_or_create(r, &srt)) != srs_success) { + return srs_error_wrap(err, "create source"); + } + + if (!srt->can_publish()) { + return srs_error_new(ERROR_SYSTEM_STREAM_BUSY, "srt stream %s busy", r->get_stream_url().c_str()); + } + } +#endif + // Bridge to rtmp #if defined(SRS_RTC) && defined(SRS_FFMPEG_FIT) bool rtc_to_rtmp = _srs_config->get_rtc_to_rtmp(req_->vhost); diff --git a/trunk/src/app/srs_app_rtmp_conn.cpp b/trunk/src/app/srs_app_rtmp_conn.cpp index 54bd187274..54a231168c 100644 --- a/trunk/src/app/srs_app_rtmp_conn.cpp +++ b/trunk/src/app/srs_app_rtmp_conn.cpp @@ -39,6 +39,7 @@ using namespace std; #include #include #include +#include // the timeout in srs_utime_t to wait encoder to republish // if timeout, close the connection. @@ -1095,6 +1096,22 @@ srs_error_t SrsRtmpConn::acquire_publish(SrsLiveSource* source) } #endif + // Check whether SRT stream is busy. +#ifdef SRS_SRT + SrsSrtSource* srt = NULL; + bool srt_server_enabled = _srs_config->get_srt_enabled(); + bool srt_enabled = _srs_config->get_srt_enabled(req->vhost); + if (srt_server_enabled && srt_enabled && !info->edge) { + if ((err = _srs_srt_sources->fetch_or_create(req, &srt)) != srs_success) { + return srs_error_wrap(err, "create source"); + } + + if (!srt->can_publish()) { + return srs_error_new(ERROR_SYSTEM_STREAM_BUSY, "srt stream %s busy", req->get_stream_url().c_str()); + } + } +#endif + // Bridge to RTC streaming. #if defined(SRS_RTC) && defined(SRS_FFMPEG_FIT) if (rtc && _srs_config->get_rtc_from_rtmp(req->vhost)) { diff --git a/trunk/src/app/srs_app_srt_conn.cpp b/trunk/src/app/srs_app_srt_conn.cpp index 5d5a43f210..d3bc88aa23 100644 --- a/trunk/src/app/srs_app_srt_conn.cpp +++ b/trunk/src/app/srs_app_srt_conn.cpp @@ -357,44 +357,44 @@ srs_error_t SrsMpegtsSrtConn::acquire_publish() return srs_error_new(ERROR_SRT_SOURCE_BUSY, "srt stream %s busy", req_->get_stream_url().c_str()); } - if (_srs_config->get_srt_to_rtmp(req_->vhost)) { - // Check rtmp stream is busy. - SrsLiveSource *live_source = _srs_sources->fetch(req_); - if (live_source && !live_source->can_publish(false)) { - return srs_error_new(ERROR_SYSTEM_STREAM_BUSY, "live_source stream %s busy", req_->get_stream_url().c_str()); - } + // Check rtmp stream is busy. + SrsLiveSource *live_source = _srs_sources->fetch(req_); + if (live_source && !live_source->can_publish(false)) { + return srs_error_new(ERROR_SYSTEM_STREAM_BUSY, "live_source stream %s busy", req_->get_stream_url().c_str()); + } - if ((err = _srs_sources->fetch_or_create(req_, _srs_hybrid->srs()->instance(), &live_source)) != srs_success) { - return srs_error_wrap(err, "create source"); - } + if ((err = _srs_sources->fetch_or_create(req_, _srs_hybrid->srs()->instance(), &live_source)) != srs_success) { + return srs_error_wrap(err, "create source"); + } - srs_assert(live_source != NULL); - - bool enabled_cache = _srs_config->get_gop_cache(req_->vhost); - int gcmf = _srs_config->get_gop_cache_max_frames(req_->vhost); - live_source->set_cache(enabled_cache); - live_source->set_gop_cache_max_frames(gcmf); + srs_assert(live_source != NULL); - // srt->rtmp->rtc - // TODO: FIXME: the code below is repeat in srs_app_rtmp_conn.cpp, refactor it later, use function instead. + bool enabled_cache = _srs_config->get_gop_cache(req_->vhost); + int gcmf = _srs_config->get_gop_cache_max_frames(req_->vhost); + live_source->set_cache(enabled_cache); + live_source->set_gop_cache_max_frames(gcmf); - // Check whether RTC stream is busy. + // srt->rtmp->rtc + // TODO: FIXME: the code below is repeat in srs_app_rtmp_conn.cpp, refactor it later, use function instead. + + // Check whether RTC stream is busy. #ifdef SRS_RTC - SrsRtcSource* rtc = NULL; - bool rtc_server_enabled = _srs_config->get_rtc_server_enabled(); - bool rtc_enabled = _srs_config->get_rtc_enabled(req_->vhost); - bool edge = _srs_config->get_vhost_is_edge(req_->vhost); - if (rtc_server_enabled && rtc_enabled && ! edge) { - if ((err = _srs_rtc_sources->fetch_or_create(req_, &rtc)) != srs_success) { - return srs_error_wrap(err, "create source"); - } + SrsRtcSource* rtc = NULL; + bool rtc_server_enabled = _srs_config->get_rtc_server_enabled(); + bool rtc_enabled = _srs_config->get_rtc_enabled(req_->vhost); + bool edge = _srs_config->get_vhost_is_edge(req_->vhost); + if (rtc_server_enabled && rtc_enabled && ! edge) { + if ((err = _srs_rtc_sources->fetch_or_create(req_, &rtc)) != srs_success) { + return srs_error_wrap(err, "create source"); + } - if (!rtc->can_publish()) { - return srs_error_new(ERROR_SYSTEM_STREAM_BUSY, "rtc stream %s busy", req_->get_stream_url().c_str()); - } + if (!rtc->can_publish()) { + return srs_error_new(ERROR_SYSTEM_STREAM_BUSY, "rtc stream %s busy", req_->get_stream_url().c_str()); } + } #endif + if (_srs_config->get_srt_to_rtmp(req_->vhost)) { // Bridge to RTMP and RTC streaming. SrsCompositeBridge* bridge = new SrsCompositeBridge(); bridge->append(new SrsFrameToRtmpBridge(live_source)); From 2f8bc8747f19a5245e8528bdddc6bcd501d37853 Mon Sep 17 00:00:00 2001 From: winlin Date: Fri, 31 Mar 2023 12:05:54 +0800 Subject: [PATCH 16/18] Update Release note. --- .github/workflows/release.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4786486a8d..5944c5f85b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -416,6 +416,8 @@ jobs: tag: ${{ github.ref }} name: Release ${{ env.SRS_TAG }} body: | + If you would like to support SRS, please consider contributing to our [OpenCollective](https://opencollective.com/srs-server). + [${{ github.sha }}](https://github.com/ossrs/srs/commit/${{ github.sha }}) ${{ github.event.head_commit.message }} From 1d84e6895d8fa67ff06582a22453e6e0c4ddf7b4 Mon Sep 17 00:00:00 2001 From: winlin Date: Sat, 1 Apr 2023 20:59:06 +0800 Subject: [PATCH 17/18] Update --- trunk/conf/http.api.auth.conf | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 trunk/conf/http.api.auth.conf diff --git a/trunk/conf/http.api.auth.conf b/trunk/conf/http.api.auth.conf new file mode 100644 index 0000000000..6ba7dee688 --- /dev/null +++ b/trunk/conf/http.api.auth.conf @@ -0,0 +1,21 @@ + +listen 1935; +max_connections 1000; +daemon off; +srs_log_tank console; +http_api { + enabled on; + listen 1985; + auth { + enabled on; + username admin; + password admin; + } +} +http_server { + enabled on; + listen 8080; + dir ./objs/nginx/html; +} +vhost __defaultVhost__ { +} From 0b69b5ea31e66208acd4f69fa26a7584c7873b93 Mon Sep 17 00:00:00 2001 From: winlin Date: Sat, 1 Apr 2023 21:13:43 +0800 Subject: [PATCH 18/18] Update --- trunk/doc/CHANGELOG.md | 3 ++- trunk/src/core/srs_core_version6.hpp | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/trunk/doc/CHANGELOG.md b/trunk/doc/CHANGELOG.md index bebc38b065..5f21d1450e 100644 --- a/trunk/doc/CHANGELOG.md +++ b/trunk/doc/CHANGELOG.md @@ -8,9 +8,10 @@ The changelog for SRS. ## SRS 6.0 Changelog +* v5.0, 2023-04-01, Merge [#3392](https://github.com/ossrs/srs/pull/3392): Support composited bridges for 1:N protocols converting. v6.0.41 (#3392) * v5.0, 2023-04-01, Merge [#3458](https://github.com/ossrs/srs/pull/3450): API: Support HTTP basic authentication for API. v6.0.40 (#3458) * v6.0, 2023-03-27, Merge [#3450](https://github.com/ossrs/srs/pull/3450): WebRTC: Error message carries the SDP when failed. v6.0.39 (#3450) -* v6.0, 2023-03-25, Merge [#3477](https://github.com/ossrs/srs/pull/3477): Remove unneccessary NULL check in srs_freep. v6.0.38 (#3477) +* v6.0, 2023-03-25, Merge [#3477](https://github.com/ossrs/srs/pull/3477): Remove unnecessary NULL check in srs_freep. v6.0.38 (#3477) * v6.0, 2023-03-25, Merge [#3455](https://github.com/ossrs/srs/pull/3455): RTC: Call on_play before create session, for it might be freed for timeout. v6.0.37 (#3455) * v6.0, 2023-03-22, Merge [#3427](https://github.com/ossrs/srs/pull/3427): WHIP: Support DELETE resource for Larix Broadcaster. v6.0.36 (#3427) * v6.0, 2023-03-20, Merge [#3460](https://github.com/ossrs/srs/pull/3460): WebRTC: Support WHIP/WHEP players. v6.0.35 (#3460) diff --git a/trunk/src/core/srs_core_version6.hpp b/trunk/src/core/srs_core_version6.hpp index 4082877c12..f9df4209e9 100644 --- a/trunk/src/core/srs_core_version6.hpp +++ b/trunk/src/core/srs_core_version6.hpp @@ -9,6 +9,6 @@ #define VERSION_MAJOR 6 #define VERSION_MINOR 0 -#define VERSION_REVISION 40 +#define VERSION_REVISION 41 #endif