Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix bugs for RTC2RTMP. #2768

Merged
merged 1 commit into from
Dec 4, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 52 additions & 46 deletions trunk/src/app/srs_app_rtc_source.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1278,7 +1278,7 @@ SrsRtmpFromRtcBridger::SrsRtmpFromRtcBridger(SrsLiveSource *src)
is_first_audio = true;
is_first_video = true;
format = NULL;
key_frame_ts_ = -1;
rtp_key_frame_ts_ = -1;
header_sn_ = 0;
memset(cache_video_pkts_, 0, sizeof(cache_video_pkts_));
}
Expand Down Expand Up @@ -1504,24 +1504,24 @@ srs_error_t SrsRtmpFromRtcBridger::packet_video_key_frame(SrsRtpPacket* pkt)
}
}

if (-1 == key_frame_ts_) {
key_frame_ts_ = pkt->get_avsync_time();
if (-1 == rtp_key_frame_ts_) {
rtp_key_frame_ts_ = pkt->header.get_timestamp();
header_sn_ = pkt->header.get_sequence();
lost_sn_ = header_sn_ + 1;
// Received key frame and clean cache of old p frame pkts
clear_cached_video();
srs_trace("set ts=%lld, header=%hu, lost=%hu", key_frame_ts_, header_sn_, lost_sn_);
xiaozhihong marked this conversation as resolved.
Show resolved Hide resolved
} else if (key_frame_ts_ != pkt->get_avsync_time()) {
srs_trace("set ts=%u, header=%hu, lost=%hu", (uint32_t)rtp_key_frame_ts_, header_sn_, lost_sn_);
} else if (rtp_key_frame_ts_ != pkt->header.get_timestamp()) {
//new key frame, clean cache
int64_t old_ts = key_frame_ts_;
int64_t old_ts = rtp_key_frame_ts_;
uint16_t old_header_sn = header_sn_;
uint16_t old_lost_sn = lost_sn_;
key_frame_ts_ = pkt->get_avsync_time();
rtp_key_frame_ts_ = pkt->header.get_timestamp();
header_sn_ = pkt->header.get_sequence();
lost_sn_ = header_sn_ + 1;
clear_cached_video();
srs_trace("drop old ts=%lld, header=%hu, lost=%hu, set new ts=%lld, header=%hu, lost=%hu",
old_ts, old_header_sn, old_lost_sn, key_frame_ts_, header_sn_, lost_sn_);
srs_warn("drop old ts=%u, header=%hu, lost=%hu, set new ts=%u, header=%hu, lost=%hu",
(uint32_t)old_ts, old_header_sn, old_lost_sn, (uint32_t)rtp_key_frame_ts_, header_sn_, lost_sn_);
}

uint16_t index = cache_index(pkt->header.get_sequence());
Expand Down Expand Up @@ -1561,9 +1561,10 @@ srs_error_t SrsRtmpFromRtcBridger::packet_video_rtmp(const uint16_t start, const
srs_error_t err = srs_success;

int nb_payload = 0;
uint16_t cnt = end - start + 1;
int16_t cnt = srs_rtp_seq_distance(start, end) + 1;
srs_assert(cnt >= 1);

for (uint16_t i = 0; i < cnt; ++i) {
for (uint16_t i = 0; i < (uint16_t)cnt; ++i) {
uint16_t sn = start + i;
uint16_t index = cache_index(sn);
SrsRtpPacket* pkt = cache_video_pkts_[index].pkt;
Expand Down Expand Up @@ -1615,7 +1616,7 @@ srs_error_t SrsRtmpFromRtcBridger::packet_video_rtmp(const uint16_t start, const
SrsBuffer payload(rtmp.payload, rtmp.size);
if (pkt->is_keyframe()) {
payload.write_1bytes(0x17); // type(4 bits): key frame; code(4bits): avc
key_frame_ts_ = -1;
rtp_key_frame_ts_ = -1;
} else {
payload.write_1bytes(0x27); // type(4 bits): inter frame; code(4bits): avc
}
Expand All @@ -1625,7 +1626,7 @@ srs_error_t SrsRtmpFromRtcBridger::packet_video_rtmp(const uint16_t start, const
payload.write_1bytes(0x0);

int nalu_len = 0;
for (uint16_t i = 0; i < cnt; ++i) {
for (uint16_t i = 0; i < (uint16_t)cnt; ++i) {
uint16_t index = cache_index((start + i));
SrsRtpPacket* pkt = cache_video_pkts_[index].pkt;

Expand Down Expand Up @@ -1664,10 +1665,10 @@ srs_error_t SrsRtmpFromRtcBridger::packet_video_rtmp(const uint16_t start, const
if (stap_payload) {
for (int j = 0; j < (int)stap_payload->nalus.size(); ++j) {
SrsSample* sample = stap_payload->nalus.at(j);
if (sample->size > 0) {
payload.write_4bytes(sample->size);
if (sample->size > 0) {
payload.write_4bytes(sample->size);
payload.write_bytes(sample->bytes, sample->size);
}
}
}
srs_freep(pkt);
continue;
Expand Down Expand Up @@ -1726,7 +1727,7 @@ int32_t SrsRtmpFromRtcBridger::find_next_lost_sn(uint16_t current_sn, uint16_t&
}
}

srs_error("the cache is mess. the packet count of video frame is more than %u", s_cache_size);
srs_error("cache overflow. the packet count of video frame is more than %u", s_cache_size);
return -2;
}

Expand All @@ -1746,10 +1747,12 @@ void SrsRtmpFromRtcBridger::clear_cached_video()

bool SrsRtmpFromRtcBridger::check_frame_complete(const uint16_t start, const uint16_t end)
{
uint16_t cnt = (end - start + 1);
int16_t cnt = srs_rtp_seq_distance(start, end) + 1;
srs_assert(cnt >= 1);

uint16_t fu_s_c = 0;
uint16_t fu_e_c = 0;
for (uint16_t i = 0; i < cnt; ++i) {
for (uint16_t i = 0; i < (uint16_t)cnt; ++i) {
int index = cache_index((start + i));
SrsRtpPacket* pkt = cache_video_pkts_[index].pkt;

Expand Down Expand Up @@ -2264,6 +2267,8 @@ SrsRtcRecvTrack::SrsRtcRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescripti

last_sender_report_rtp_time_ = 0;
last_sender_report_rtp_time1_ = 0;
rate_ = 0.0;

last_sender_report_sys_time_ = 0;
}

Expand Down Expand Up @@ -2299,40 +2304,41 @@ void SrsRtcRecvTrack::update_send_report_time(const SrsNtp& ntp, uint32_t rtp_ti

// TODO: FIXME: Use system wall clock.
last_sender_report_sys_time_ = srs_update_system_time();
}

int64_t SrsRtcRecvTrack::cal_avsync_time(uint32_t rtp_time)
{
// Have no recv at least 2 sender reports, can't calculate sync time.
// TODO: FIXME: use the sample rate from sdp.
if (last_sender_report_rtp_time1_ <= 0) {
return -1;
}

// WebRTC using sender report to sync audio/video timestamp, because audio video have different timebase,
// typical audio opus is 48000Hz, video is 90000Hz.
// We using two sender report point to calculate avsync timestamp(clock time) with any given rtp timestamp.
// For example, there are two history sender report of audio as below.
// sender_report1: rtp_time1 = 10000, ntp_time1 = 40000
// sender_report : rtp_time = 10960, ntp_time = 40020
// (rtp_time - rtp_time1) / (ntp_time - ntp_time1) = 960 / 20 = 48,
// Now we can calcualte ntp time(ntp_x) of any given rtp timestamp(rtp_x),
// (rtp_x - rtp_time) / (ntp_x - ntp_time) = 48 => ntp_x = (rtp_x - rtp_time) / 48 + ntp_time;
double sys_time_elapsed = static_cast<double>(last_sender_report_ntp_.system_ms_) - static_cast<double>(last_sender_report_ntp1_.system_ms_);
if (last_sender_report_rtp_time1_ > 0) {
// WebRTC using sender report to sync audio/video timestamp, because audio video have different timebase,
// typical audio opus is 48000Hz, video is 90000Hz.
// We using two sender report point to calculate avsync timestamp(clock time) with any given rtp timestamp.
// For example, there are two history sender report of audio as below.
// sender_report1: rtp_time1 = 10000, ntp_time1 = 40000
// sender_report : rtp_time = 10960, ntp_time = 40020
// (rtp_time - rtp_time1) / (ntp_time - ntp_time1) = 960 / 20 = 48,
// Now we can calcualte ntp time(ntp_x) of any given rtp timestamp(rtp_x),
// (rtp_x - rtp_time) / (ntp_x - ntp_time) = 48 => ntp_x = (rtp_x - rtp_time) / 48 + ntp_time;
double sys_time_elapsed = static_cast<double>(last_sender_report_ntp_.system_ms_) - static_cast<double>(last_sender_report_ntp1_.system_ms_);

// Check sys_time_elapsed is equal to zero.
if (fpclassify(sys_time_elapsed) == FP_ZERO) {
return;
Copy link
Contributor

@chen-guanghua chen-guanghua Dec 2, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fpclassify was introduced in C++11. Should we consider older versions of gcc?

https://en.cppreference.com/w/cpp/numeric/math/fpclassify

TRANS_BY_GPT3

Copy link
Collaborator Author

@xiaozhihong xiaozhihong Dec 3, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

image
This is a function in the standard C library, and there is also a corresponding version in std:: library.

TRANS_BY_GPT3

}

double rtp_time_elpased = static_cast<double>(last_sender_report_rtp_time_) - static_cast<double>(last_sender_report_rtp_time1_);
double rate = round(rtp_time_elpased / sys_time_elapsed);

// Check sys_time_elapsed is equal to zero.
if (fpclassify(sys_time_elapsed) == FP_ZERO) {
return -1;
// TODO: FIXME: use the sample rate from sdp.
if (rate > 0) {
rate_ = rate;
}
}

double rtp_time_elpased = static_cast<double>(last_sender_report_rtp_time_) - static_cast<double>(last_sender_report_rtp_time1_);
int rate = round(rtp_time_elpased / sys_time_elapsed);
}

if (rate <= 0) {
int64_t SrsRtcRecvTrack::cal_avsync_time(uint32_t rtp_time)
{
if (rate_ < 0.001) {
return -1;
}

double delta = round((rtp_time - last_sender_report_rtp_time_) / rate);
double delta = round((rtp_time - last_sender_report_rtp_time_) / rate_);

int64_t avsync_time = delta + last_sender_report_ntp_.system_ms_;

Expand Down
3 changes: 2 additions & 1 deletion trunk/src/app/srs_app_rtc_source.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ class SrsRtmpFromRtcBridger : public ISrsRtcSourceBridger
RtcPacketCache cache_video_pkts_[s_cache_size];
uint16_t header_sn_;
uint16_t lost_sn_;
int64_t key_frame_ts_;
int64_t rtp_key_frame_ts_;
public:
SrsRtmpFromRtcBridger(SrsLiveSource *src);
virtual ~SrsRtmpFromRtcBridger();
Expand Down Expand Up @@ -527,6 +527,7 @@ class SrsRtcRecvTrack
SrsNtp last_sender_report_ntp1_;
int64_t last_sender_report_rtp_time1_;

double rate_;
uint64_t last_sender_report_sys_time_;
public:
SrsRtcRecvTrack(SrsRtcConnection* session, SrsRtcTrackDescription* stream_descs, bool is_audio);
Expand Down
75 changes: 75 additions & 0 deletions trunk/src/utest/srs_utest_rtc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1142,3 +1142,78 @@ VOID TEST(KernelRTCTest, SyncTimestampBySenderReportConsecutive)
}
}
}

VOID TEST(KernelRTCTest, SyncTimestampBySenderReportDuplicated)
{
SrsRtcConnection s(NULL, SrsContextId());
SrsRtcPublishStream publish(&s, SrsContextId());

SrsRtcTrackDescription video_ds;
video_ds.type_ = "video";
video_ds.id_ = "VMo22nfLDn122nfnDNL2";
video_ds.ssrc_ = 200;

SrsRtcVideoRecvTrack* video = new SrsRtcVideoRecvTrack(&s, &video_ds);
publish.video_tracks_.push_back(video);

publish.set_all_tracks_status(true);

SrsRtcSource* rtc_source = new SrsRtcSource();
SrsAutoFree(SrsRtcSource, rtc_source);

srand(time(NULL));

if (true)
{
SrsRtpPacket* video_rtp_pkt = new SrsRtpPacket();
SrsAutoFree(SrsRtpPacket, video_rtp_pkt);

uint32_t video_absolute_ts = srs_get_system_time();
uint32_t video_rtp_ts = random();

video_rtp_pkt->header.set_timestamp(video_rtp_ts);
video->on_rtp(rtc_source, video_rtp_pkt);
// No received any sender report, can not calculate absolute time, expect equal to -1.
EXPECT_EQ(video_rtp_pkt->get_avsync_time(), -1);

SrsNtp ntp = SrsNtp::from_time_ms(video_absolute_ts);

SrsRtcpSR* video_sr = new SrsRtcpSR();
SrsAutoFree(SrsRtcpSR, video_sr);
video_sr->set_ssrc(200);

video_sr->set_ntp(ntp.ntp_);
video_sr->set_rtp_ts(video_rtp_ts);
publish.on_rtcp_sr(video_sr);

// Video timebase 90000, fps=25
video_rtp_ts += 3600;
video_absolute_ts += 40;
video_rtp_pkt->header.set_timestamp(video_rtp_ts);
video->on_rtp(rtc_source, video_rtp_pkt);

// Received one sender report, can not calculate absolute time, expect equal to -1.
EXPECT_EQ(video_rtp_pkt->get_avsync_time(), -1);

ntp = SrsNtp::from_time_ms(video_absolute_ts);
video_sr->set_ntp(ntp.ntp_);
video_sr->set_rtp_ts(video_rtp_ts);
publish.on_rtcp_sr(video_sr);

for (int i = 0; i <= 1000; ++i) {
// Video timebase 90000, fps=25
video_rtp_ts += 3600;
video_absolute_ts += 40;
video_rtp_pkt->header.set_timestamp(video_rtp_ts);
video->on_rtp(rtc_source, video_rtp_pkt);
EXPECT_NEAR(video_rtp_pkt->get_avsync_time(), video_absolute_ts, 1);
// Duplicate 3 sender report packets.
if (i % 3 == 0) {
ntp = SrsNtp::from_time_ms(video_absolute_ts);
video_sr->set_ntp(ntp.ntp_);
video_sr->set_rtp_ts(video_rtp_ts);
}
publish.on_rtcp_sr(video_sr);
}
}
}