diff --git a/app/streaming/video/ffmpeg-renderers/d3d11va.cpp b/app/streaming/video/ffmpeg-renderers/d3d11va.cpp index a79af193..8df61477 100644 --- a/app/streaming/video/ffmpeg-renderers/d3d11va.cpp +++ b/app/streaming/video/ffmpeg-renderers/d3d11va.cpp @@ -584,11 +584,6 @@ void D3D11VARenderer::setHdrMode(bool enabled) void D3D11VARenderer::renderFrame(AVFrame* frame) { - if (frame == nullptr) { - // End of stream - nothing to do for us - return; - } - // Acquire the context lock for rendering to prevent concurrent // access from inside FFmpeg's decoding code lockContext(this); diff --git a/app/streaming/video/ffmpeg-renderers/drm.cpp b/app/streaming/video/ffmpeg-renderers/drm.cpp index 417f007f..5100d0e8 100644 --- a/app/streaming/video/ffmpeg-renderers/drm.cpp +++ b/app/streaming/video/ffmpeg-renderers/drm.cpp @@ -475,11 +475,6 @@ void DrmRenderer::renderFrame(AVFrame* frame) AVDRMFrameDescriptor mappedFrame; AVDRMFrameDescriptor* drmFrame; - if (frame == nullptr) { - // End of stream - nothing to do for us - return; - } - // If we are acting as the frontend renderer, we'll need to have the backend // map this frame into a DRM PRIME descriptor that we can render. if (m_BackendRenderer != nullptr) { diff --git a/app/streaming/video/ffmpeg-renderers/dxva2.cpp b/app/streaming/video/ffmpeg-renderers/dxva2.cpp index 671d018a..d061c06b 100644 --- a/app/streaming/video/ffmpeg-renderers/dxva2.cpp +++ b/app/streaming/video/ffmpeg-renderers/dxva2.cpp @@ -978,11 +978,6 @@ int DXVA2Renderer::getDecoderColorspace() void DXVA2Renderer::renderFrame(AVFrame *frame) { - if (frame == nullptr) { - // End of stream - nothing to do for us - return; - } - IDirect3DSurface9* surface = reinterpret_cast(frame->data[3]); HRESULT hr; diff --git a/app/streaming/video/ffmpeg-renderers/eglvid.cpp b/app/streaming/video/ffmpeg-renderers/eglvid.cpp index 407bacfa..705ad7b5 100644 --- a/app/streaming/video/ffmpeg-renderers/eglvid.cpp +++ b/app/streaming/video/ffmpeg-renderers/eglvid.cpp @@ -814,16 +814,16 @@ bool EGLRenderer::specialize() { return err == GL_NO_ERROR; } +void EGLRenderer::cleanupRenderContext() +{ + // Detach the context from the render thread so the destructor can attach it + SDL_GL_MakeCurrent(m_Window, nullptr); +} + void EGLRenderer::renderFrame(AVFrame* frame) { EGLImage imgs[EGL_MAX_PLANES]; - if (frame == nullptr) { - // End of stream - unbind the GL context - SDL_GL_MakeCurrent(m_Window, nullptr); - return; - } - // Attach our GL context to the render thread // NB: It should already be current, unless the SDL render event watcher // performs a rendering operation (like a viewport update on resize) on diff --git a/app/streaming/video/ffmpeg-renderers/eglvid.h b/app/streaming/video/ffmpeg-renderers/eglvid.h index 317e790e..c3572626 100644 --- a/app/streaming/video/ffmpeg-renderers/eglvid.h +++ b/app/streaming/video/ffmpeg-renderers/eglvid.h @@ -11,6 +11,7 @@ public: virtual ~EGLRenderer() override; virtual bool initialize(PDECODER_PARAMETERS params) override; virtual bool prepareDecoderContext(AVCodecContext* context, AVDictionary** options) override; + virtual void cleanupRenderContext() override; virtual void renderFrame(AVFrame* frame) override; virtual bool testRenderFrame(AVFrame* frame) override; virtual void notifyOverlayUpdated(Overlay::OverlayType) override; diff --git a/app/streaming/video/ffmpeg-renderers/mmal.cpp b/app/streaming/video/ffmpeg-renderers/mmal.cpp index eb0c9d51..3ebe82b6 100644 --- a/app/streaming/video/ffmpeg-renderers/mmal.cpp +++ b/app/streaming/video/ffmpeg-renderers/mmal.cpp @@ -345,11 +345,6 @@ bool MmalRenderer::needsTestFrame() void MmalRenderer::renderFrame(AVFrame* frame) { - if (frame == nullptr) { - // End of stream - nothing to do for us - return; - } - MMAL_BUFFER_HEADER_T* buffer = (MMAL_BUFFER_HEADER_T*)frame->data[3]; MMAL_STATUS_T status; diff --git a/app/streaming/video/ffmpeg-renderers/pacer/pacer.cpp b/app/streaming/video/ffmpeg-renderers/pacer/pacer.cpp index dedcaff2..d216bb6f 100644 --- a/app/streaming/video/ffmpeg-renderers/pacer/pacer.cpp +++ b/app/streaming/video/ffmpeg-renderers/pacer/pacer.cpp @@ -46,8 +46,9 @@ Pacer::~Pacer() SDL_WaitThread(m_RenderThread, nullptr); } else { - // Send a null AVFrame to indicate end of stream on the main thread - m_VsyncRenderer->renderFrame(nullptr); + // Notify the renderer that it is being destroyed soon + // NB: This must happen on the same thread that calls renderFrame(). + m_VsyncRenderer->cleanupRenderContext(); } // Delete any remaining unconsumed frames @@ -110,8 +111,9 @@ int Pacer::renderThread(void* context) me->renderLastFrameAndUnlock(); } - // Send a null AVFrame to indicate end of stream on the render thread - me->m_VsyncRenderer->renderFrame(nullptr); + // Notify the renderer that it is being destroyed soon + // NB: This must happen on the same thread that calls renderFrame(). + me->m_VsyncRenderer->cleanupRenderContext(); return 0; } diff --git a/app/streaming/video/ffmpeg-renderers/renderer.h b/app/streaming/video/ffmpeg-renderers/renderer.h index 570c34b3..3ec93b7e 100644 --- a/app/streaming/video/ffmpeg-renderers/renderer.h +++ b/app/streaming/video/ffmpeg-renderers/renderer.h @@ -102,6 +102,11 @@ public: virtual bool prepareDecoderContext(AVCodecContext* context, AVDictionary** options) = 0; virtual void renderFrame(AVFrame* frame) = 0; + // Called on the same thread as renderFrame() during destruction of the renderer + virtual void cleanupRenderContext() { + // Nothing + } + virtual bool testRenderFrame(AVFrame*) { // If the renderer doesn't provide an explicit test routine, // we will always assume that any returned AVFrame can be diff --git a/app/streaming/video/ffmpeg-renderers/sdlvid.cpp b/app/streaming/video/ffmpeg-renderers/sdlvid.cpp index e87eecc6..132d78e1 100644 --- a/app/streaming/video/ffmpeg-renderers/sdlvid.cpp +++ b/app/streaming/video/ffmpeg-renderers/sdlvid.cpp @@ -344,11 +344,6 @@ void SdlRenderer::renderFrame(AVFrame* frame) int err; AVFrame* swFrame = nullptr; - if (frame == nullptr) { - // End of stream - nothing to do for us - return; - } - if (frame->hw_frames_ctx != nullptr && frame->format != AV_PIX_FMT_CUDA) { #ifdef HAVE_CUDA ReadbackRetry: diff --git a/app/streaming/video/ffmpeg-renderers/vaapi.cpp b/app/streaming/video/ffmpeg-renderers/vaapi.cpp index 51015c5e..214771d2 100644 --- a/app/streaming/video/ffmpeg-renderers/vaapi.cpp +++ b/app/streaming/video/ffmpeg-renderers/vaapi.cpp @@ -351,11 +351,6 @@ int VAAPIRenderer::getDecoderColorspace() void VAAPIRenderer::renderFrame(AVFrame* frame) { - if (frame == nullptr) { - // End of stream - nothing to do for us - return; - } - VASurfaceID surface = (VASurfaceID)(uintptr_t)frame->data[3]; AVHWDeviceContext* deviceContext = (AVHWDeviceContext*)m_HwContext->data; AVVAAPIDeviceContext* vaDeviceContext = (AVVAAPIDeviceContext*)deviceContext->hwctx; diff --git a/app/streaming/video/ffmpeg-renderers/vdpau.cpp b/app/streaming/video/ffmpeg-renderers/vdpau.cpp index 4b53adb0..91d11537 100644 --- a/app/streaming/video/ffmpeg-renderers/vdpau.cpp +++ b/app/streaming/video/ffmpeg-renderers/vdpau.cpp @@ -476,11 +476,6 @@ void VDPAURenderer::renderOverlay(VdpOutputSurface destination, Overlay::Overlay void VDPAURenderer::renderFrame(AVFrame* frame) { - if (frame == nullptr) { - // End of stream - nothing to do for us - return; - } - VdpStatus status; VdpVideoSurface videoSurface = (VdpVideoSurface)(uintptr_t)frame->data[3]; diff --git a/app/streaming/video/ffmpeg-renderers/vt.mm b/app/streaming/video/ffmpeg-renderers/vt.mm index ca168c54..3e532642 100644 --- a/app/streaming/video/ffmpeg-renderers/vt.mm +++ b/app/streaming/video/ffmpeg-renderers/vt.mm @@ -161,11 +161,6 @@ public: // Caller frees frame after we return virtual void renderFrame(AVFrame* frame) override { - if (frame == nullptr) { - // End of stream - nothing to do for us - return; - } - OSStatus status; CVPixelBufferRef pixBuf = reinterpret_cast(frame->data[3]);