From 0bea5b1886b4d23f7bdd44157beb6867e7d1b0af Mon Sep 17 00:00:00 2001 From: Joel Dice Date: Wed, 30 Jul 2014 08:39:45 -0600 Subject: [PATCH 1/2] several tweaks to better support high-resolution screen capture and testing * don't resize input frames to a "standard" WebRTC resolution * make SrtpSession::Init public so we can call it before any peers are created and thereby avoid a data race * make PacedSender::ShouldSendNextPacket notify listener that the queue has been flushed. This allows us to throttle frame creation so we don't get ahead of the network and introduce unecessary latency. It's a total hack, though, since it only allows one listener per process, whereas we would preferably support multiple peers, each listening to its own PacedSender. * use the default vp8 config instead of overriding the defaults. CBR does not seem to work as well for screen content, and none of the other overrides seem to add any value -- this may be worth revisiting, though. * don't delay rendering video frames on the decode side. This speeds up the integration test by avoiding added latency, but would have no effect on a stand-alone screen capture program, since it wouldn't include a decoder. --- talk/media/webrtc/webrtcvideoengine.cc | 7 ++++++- talk/session/media/srtpfilter.h | 6 +++++- webrtc/modules/pacing/paced_sender.cc | 15 +++++++++++++++ .../modules/video_coding/codecs/vp8/vp8_impl.cc | 2 ++ .../main/source/spatial_resampler.cc | 4 ++++ .../modules/video_render/video_render_frames.cc | 7 ++++++- 6 files changed, 38 insertions(+), 3 deletions(-) diff --git a/talk/media/webrtc/webrtcvideoengine.cc b/talk/media/webrtc/webrtcvideoengine.cc index 7afe5de6b..a99f4e0b8 100644 --- a/talk/media/webrtc/webrtcvideoengine.cc +++ b/talk/media/webrtc/webrtcvideoengine.cc @@ -1178,8 +1178,13 @@ bool WebRtcVideoEngine::CanSendCodec(const VideoCodec& requested, out->preference = requested.preference; out->params = requested.params; out->framerate = rtc::_min(requested.framerate, local_max->framerate); +#ifdef ECOVATE_NO_SCALING + out->width = requested.width; + out->height = requested.height; +#else out->width = 0; - out->height = 0; + out->height = 0; +#endif out->params = requested.params; out->feedback_params = requested.feedback_params; diff --git a/talk/session/media/srtpfilter.h b/talk/session/media/srtpfilter.h index 51600236a..dbdc08e8b 100644 --- a/talk/session/media/srtpfilter.h +++ b/talk/session/media/srtpfilter.h @@ -227,12 +227,16 @@ class SrtpSession { sigslot::repeater3 SignalSrtpError; + // This was previously private, but because it's not thread-safe, we + // need to make it public and call it from application code prior to + // opening several WebRTC peer connections in parallel: + static bool Init(); + private: bool SetKey(int type, const std::string& cs, const uint8* key, int len); // Returns send stream current packet index from srtp db. bool GetSendStreamPacketIndex(void* data, int in_len, int64* index); - static bool Init(); void HandleEvent(const srtp_event_data_t* ev); static void HandleEventThunk(srtp_event_data_t* ev); diff --git a/webrtc/modules/pacing/paced_sender.cc b/webrtc/modules/pacing/paced_sender.cc index 6204a9a06..b4b300037 100644 --- a/webrtc/modules/pacing/paced_sender.cc +++ b/webrtc/modules/pacing/paced_sender.cc @@ -20,6 +20,18 @@ #include "webrtc/system_wrappers/interface/critical_section_wrapper.h" #include "webrtc/system_wrappers/interface/trace_event.h" +namespace ecovate { + +struct Listener { + virtual ~Listener() { } + + virtual void onSendQueueEmpty() = 0; +}; + +Listener* listener = 0; + +} // namespace ecovate + namespace { // Time limit in milliseconds between packet bursts. const int kMinPacketLimitMs = 5; @@ -372,6 +384,9 @@ bool PacedSender::ShouldSendNextPacket(paced_sender::PacketList** packet_list) { *packet_list = low_priority_packets_.get(); return true; } + if (ecovate::listener) { + ecovate::listener->onSendQueueEmpty(); + } return false; } diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc index 4901edff3..f7d5b09f3 100644 --- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc +++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc @@ -185,6 +185,7 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst, config_->rc_target_bitrate = inst->startBitrate; // in kbit/s temporal_layers_->ConfigureBitrates(inst->startBitrate, inst->maxBitrate, inst->maxFramerate, config_); +#ifndef ECOVATE_USE_VP8_DEFAULTS // setting the time base of the codec config_->g_timebase.num = 1; config_->g_timebase.den = 90000; @@ -252,6 +253,7 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst, } else { config_->kf_mode = VPX_KF_DISABLED; } +#endif // not ECOVATE_USE_VP8_DEFAULTS switch (inst->codecSpecific.VP8.complexity) { case kComplexityHigh: cpu_speed_ = -5; diff --git a/webrtc/modules/video_processing/main/source/spatial_resampler.cc b/webrtc/modules/video_processing/main/source/spatial_resampler.cc index fd90c8f76..d12a2aa70 100644 --- a/webrtc/modules/video_processing/main/source/spatial_resampler.cc +++ b/webrtc/modules/video_processing/main/source/spatial_resampler.cc @@ -14,7 +14,11 @@ namespace webrtc { VPMSimpleSpatialResampler::VPMSimpleSpatialResampler() +#ifdef ECOVATE_NO_SCALING + : resampling_mode_(kNoRescaling), +#else : resampling_mode_(kFastRescaling), +#endif target_width_(0), target_height_(0), scaler_() {} diff --git a/webrtc/modules/video_render/video_render_frames.cc b/webrtc/modules/video_render/video_render_frames.cc index d790877e3..60b949f25 100644 --- a/webrtc/modules/video_render/video_render_frames.cc +++ b/webrtc/modules/video_render/video_render_frames.cc @@ -105,7 +105,12 @@ I420VideoFrame* VideoRenderFrames::FrameToRender() { FrameList::iterator iter = incoming_frames_.begin(); while(iter != incoming_frames_.end()) { I420VideoFrame* oldest_frame_in_list = *iter; - if (oldest_frame_in_list->render_time_ms() <= +#ifdef ECOVATE_NO_VIDEO_DELAY + bool delay = false; +#else + bool delay = true; +#endif + if ((! delay) || oldest_frame_in_list->render_time_ms() <= TickTime::MillisecondTimestamp() + render_delay_ms_) { // This is the oldest one so far and it's OK to render. if (render_frame) { From c9e2dae1eef0da58ef17f1c620c88de9693c6dde Mon Sep 17 00:00:00 2001 From: Joel Dice Date: Tue, 9 Sep 2014 11:37:42 -0600 Subject: [PATCH 2/2] add support for registering multiple ecovate::Listener instances ecovate::Listener is a hack to allow us to listen for certain events that the WebRTC stack doesn't ordinarly report to the application, such as when the network layer is ready to send and when it's ready for more data. Ideally, we'd pass this listener all the way down the stack, but that would require changing a lot of code, so we use a hack: acquire a global lock and update a global reference to the desired listener before calling PeerConnection::SetLocalDescription, then clear the global reference and global lock. That way, each instance of PeerConnection gets a listener appropriate to that listener, whereas the previous implementation only worked if there was only one PeerConnection per process. This commit also includes a couple of minor changes to ensure every frame the application gives to the WebRTC stack makes it all the way to the network (e.g. no frame dropping and no multithreading-induced nondeterminism). This makes writing a reliable integration test much easier and aids debugging. --- talk/media/webrtc/webrtcvideoengine.cc | 13 +++++++++- talk/media/webrtc/webrtcvideoengine.h | 20 ++++++++++++++++ webrtc/modules/pacing/include/paced_sender.h | 20 ++++++++++++++++ webrtc/modules/pacing/paced_sender.cc | 24 +++++++++++-------- .../video_coding/main/source/receiver.cc | 1 + .../video_coding/main/source/video_sender.cc | 3 +++ webrtc/video_engine/vie_capturer.cc | 4 ++++ 7 files changed, 74 insertions(+), 11 deletions(-) diff --git a/talk/media/webrtc/webrtcvideoengine.cc b/talk/media/webrtc/webrtcvideoengine.cc index a99f4e0b8..ef8f703eb 100644 --- a/talk/media/webrtc/webrtcvideoengine.cc +++ b/talk/media/webrtc/webrtcvideoengine.cc @@ -1575,7 +1575,11 @@ WebRtcVideoMediaChannel::WebRtcVideoMediaChannel( send_fec_type_(-1), sending_(false), ratio_w_(0), - ratio_h_(0) { + ratio_h_(0), + listener_(ecovate::listener) { + if (listener_) { + listener_->incrementReferenceCount(); + } engine->RegisterChannel(this); } @@ -1609,6 +1613,10 @@ WebRtcVideoMediaChannel::~WebRtcVideoMediaChannel() { if (worker_thread()) { worker_thread()->Clear(this); } + + if (listener_) { + listener_->decrementReferenceCount(); + } } bool WebRtcVideoMediaChannel::SetRecvCodecs( @@ -2153,6 +2161,9 @@ bool WebRtcVideoMediaChannel::StartSend() { success = false; } } + if (success && listener_) { + listener_->onSendMedia(); + } return success; } diff --git a/talk/media/webrtc/webrtcvideoengine.h b/talk/media/webrtc/webrtcvideoengine.h index f467b9757..f8b4432dd 100644 --- a/talk/media/webrtc/webrtcvideoengine.h +++ b/talk/media/webrtc/webrtcvideoengine.h @@ -45,6 +45,24 @@ #error "Bogus include." #endif +namespace ecovate { + +struct Listener { + virtual ~Listener() { } + + virtual void incrementReferenceCount() = 0; + + virtual void decrementReferenceCount() = 0; + + virtual void onSendQueueEmpty() = 0; + + virtual void onSendMedia() = 0; +}; + +extern Listener* listener; + +} // namespace ecovate + namespace webrtc { class VideoCaptureModule; class VideoDecoder; @@ -460,6 +478,8 @@ class WebRtcVideoMediaChannel : public rtc::MessageHandler, // aspect ratio int ratio_w_; int ratio_h_; + + ecovate::Listener* listener_; }; } // namespace cricket diff --git a/webrtc/modules/pacing/include/paced_sender.h b/webrtc/modules/pacing/include/paced_sender.h index ddd8e53b7..86b3ba26c 100644 --- a/webrtc/modules/pacing/include/paced_sender.h +++ b/webrtc/modules/pacing/include/paced_sender.h @@ -19,6 +19,24 @@ #include "webrtc/system_wrappers/interface/thread_annotations.h" #include "webrtc/typedefs.h" +namespace ecovate { + +struct Listener { + virtual ~Listener() { } + + virtual void incrementReferenceCount() = 0; + + virtual void decrementReferenceCount() = 0; + + virtual void onSendQueueEmpty() = 0; + + virtual void onSendMedia() = 0; +}; + +extern Listener* listener; + +} // namespace ecovate + namespace webrtc { class Clock; class CriticalSectionWrapper; @@ -157,6 +175,8 @@ class PacedSender : public Module { GUARDED_BY(critsect_); scoped_ptr low_priority_packets_ GUARDED_BY(critsect_); + + ecovate::Listener* listener_; }; } // namespace webrtc #endif // WEBRTC_MODULES_PACED_SENDER_H_ diff --git a/webrtc/modules/pacing/paced_sender.cc b/webrtc/modules/pacing/paced_sender.cc index b4b300037..e6df46bda 100644 --- a/webrtc/modules/pacing/paced_sender.cc +++ b/webrtc/modules/pacing/paced_sender.cc @@ -22,12 +22,6 @@ namespace ecovate { -struct Listener { - virtual ~Listener() { } - - virtual void onSendQueueEmpty() = 0; -}; - Listener* listener = 0; } // namespace ecovate @@ -158,11 +152,21 @@ PacedSender::PacedSender(Clock* clock, capture_time_ms_last_sent_(0), high_priority_packets_(new paced_sender::PacketList), normal_priority_packets_(new paced_sender::PacketList), - low_priority_packets_(new paced_sender::PacketList) { + low_priority_packets_(new paced_sender::PacketList), + listener_(ecovate::listener) +{ + if (listener_) { + listener_->incrementReferenceCount(); + } + UpdateBytesPerInterval(kMinPacketLimitMs); } -PacedSender::~PacedSender() {} +PacedSender::~PacedSender() { + if (listener_) { + listener_->decrementReferenceCount(); + } +} void PacedSender::Pause() { CriticalSectionScoped cs(critsect_.get()); @@ -384,8 +388,8 @@ bool PacedSender::ShouldSendNextPacket(paced_sender::PacketList** packet_list) { *packet_list = low_priority_packets_.get(); return true; } - if (ecovate::listener) { - ecovate::listener->onSendQueueEmpty(); + if (listener_) { + listener_->onSendQueueEmpty(); } return false; } diff --git a/webrtc/modules/video_coding/main/source/receiver.cc b/webrtc/modules/video_coding/main/source/receiver.cc index e179423a7..1dacbcdc3 100644 --- a/webrtc/modules/video_coding/main/source/receiver.cc +++ b/webrtc/modules/video_coding/main/source/receiver.cc @@ -197,6 +197,7 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding( timing_->IncomingTimestamp(frame_timestamp, last_packet_time_ms); } } + return frame; } diff --git a/webrtc/modules/video_coding/main/source/video_sender.cc b/webrtc/modules/video_coding/main/source/video_sender.cc index 38ecc5479..f8d1d5d7c 100644 --- a/webrtc/modules/video_coding/main/source/video_sender.cc +++ b/webrtc/modules/video_coding/main/source/video_sender.cc @@ -359,9 +359,12 @@ int32_t VideoSender::AddVideoFrame(const I420VideoFrame& videoFrame, if (_nextFrameTypes[0] == kFrameEmpty) { return VCM_OK; } +#ifndef ECOVATE_NO_FRAME_DROP if (_mediaOpt.DropFrame()) { return VCM_OK; } +#endif + _mediaOpt.UpdateContentData(contentMetrics); int32_t ret = _encoder->Encode(videoFrame, codecSpecificInfo, _nextFrameTypes); diff --git a/webrtc/video_engine/vie_capturer.cc b/webrtc/video_engine/vie_capturer.cc index 231dcfb3b..6bc703676 100644 --- a/webrtc/video_engine/vie_capturer.cc +++ b/webrtc/video_engine/vie_capturer.cc @@ -346,6 +346,9 @@ void ViECapturer::OnIncomingCapturedFrame(const int32_t capture_id, TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", video_frame.render_time_ms(), "render_time", video_frame.render_time_ms()); +#ifdef ECOVATE_NO_FRAME_DROP + DeliverI420Frame(&video_frame); +#else if (video_frame.native_handle() != NULL) { captured_frame_.reset(video_frame.CloneFrame()); } else { @@ -356,6 +359,7 @@ void ViECapturer::OnIncomingCapturedFrame(const int32_t capture_id, capture_event_.Set(); overuse_detector_->FrameCaptured(captured_frame_->width(), captured_frame_->height()); +#endif } void ViECapturer::OnCaptureDelayChanged(const int32_t id,