M116 stage 2
This commit is contained in:
parent
f4c9b948cb
commit
4b3fde8a39
11 changed files with 74 additions and 2679 deletions
|
@ -48,7 +48,7 @@
|
|||
#endif // BUILDFLAG(ENABLE_WIDEVINE)
|
||||
|
||||
#if BUILDFLAG(IS_ANDROID)
|
||||
#include "media/base/android/media_drm_bridge.h"
|
||||
#include "components/cdm/common/android_cdm_registration.h"
|
||||
#endif // BUILDFLAG(IS_ANDROID)
|
||||
|
||||
namespace {
|
||||
|
@ -348,34 +348,6 @@ void AddMediaFoundationClearKey(std::vector<content::CdmInfo>* cdms) {
|
|||
}
|
||||
#endif // BUILDFLAG(IS_WIN)
|
||||
|
||||
#if BUILDFLAG(IS_ANDROID)
|
||||
void AddOtherAndroidKeySystems(std::vector<content::CdmInfo>* cdms) {
|
||||
// CdmInfo needs a CdmType, but on Android it is not used as the key system
|
||||
// is supported by MediaDrm. Using a random value as something needs to be
|
||||
// specified, but must be different than other CdmTypes specified.
|
||||
// (On Android the key system is identified by UUID, and that mapping is
|
||||
// maintained by MediaDrmBridge.)
|
||||
const media::CdmType kAndroidCdmType{0x2e9dabb9c171c28cull,
|
||||
0xf455252ec70b52adull};
|
||||
|
||||
// MediaDrmBridge returns a list of key systems available on the device
|
||||
// that are not Widevine. Register them with no capabilities specified so
|
||||
// that lazy evaluation can figure out what is supported when requested.
|
||||
// We don't know if either software secure or hardware secure support is
|
||||
// available, so register them both. Lazy evaluation will remove them
|
||||
// if they aren't supported.
|
||||
const auto key_system_names =
|
||||
media::MediaDrmBridge::GetPlatformKeySystemNames();
|
||||
for (const auto& key_system : key_system_names) {
|
||||
DVLOG(3) << __func__ << " key_system:" << key_system;
|
||||
cdms->push_back(content::CdmInfo(key_system, Robustness::kSoftwareSecure,
|
||||
absl::nullopt, kAndroidCdmType));
|
||||
cdms->push_back(content::CdmInfo(key_system, Robustness::kHardwareSecure,
|
||||
absl::nullopt, kAndroidCdmType));
|
||||
}
|
||||
}
|
||||
#endif // BUILDFLAG(IS_ANDROID)
|
||||
|
||||
} // namespace
|
||||
|
||||
void RegisterCdmInfo(std::vector<content::CdmInfo>* cdms) {
|
||||
|
@ -396,7 +368,7 @@ void RegisterCdmInfo(std::vector<content::CdmInfo>* cdms) {
|
|||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_ANDROID)
|
||||
AddOtherAndroidKeySystems(cdms);
|
||||
cdm::AddOtherAndroidCdms(cdms);
|
||||
#endif // BUILDFLAG(IS_ANDROID)
|
||||
|
||||
DVLOG(3) << __func__ << " done with " << cdms->size() << " cdms";
|
||||
|
|
|
@ -371,6 +371,11 @@
|
|||
<structure type="chrome_scaled_image" name="IDR_TAILORED_SECURITY_UNCONSENTED" file="common/tailored_security_unconsented.png" />
|
||||
<structure type="chrome_scaled_image" name="IDR_TAILORED_SECURITY_UNCONSENTED_UPDATED" file="common/safer_with_google_shield.png" />
|
||||
</if>
|
||||
<if expr="_google_chrome">
|
||||
<if expr="not is_android">
|
||||
<structure type="chrome_scaled_image" name="IDR_SUCCESS_GREEN_CHECKMARK" file="google_chrome/success_green_checkmark.png" />
|
||||
</if>
|
||||
</if>
|
||||
</structures>
|
||||
</release>
|
||||
</grit>
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
#endif // BUILDFLAG(ENABLE_WIDEVINE)
|
||||
|
||||
#if BUILDFLAG(IS_ANDROID)
|
||||
#include "media/base/android/media_drm_bridge.h"
|
||||
#include "components/cdm/common/android_cdm_registration.h"
|
||||
#endif // BUILDFLAG(IS_ANDROID)
|
||||
|
||||
namespace {
|
||||
|
@ -348,34 +348,6 @@ void AddMediaFoundationClearKey(std::vector<content::CdmInfo>* cdms) {
|
|||
}
|
||||
#endif // BUILDFLAG(IS_WIN)
|
||||
|
||||
#if BUILDFLAG(IS_ANDROID)
|
||||
void AddOtherAndroidKeySystems(std::vector<content::CdmInfo>* cdms) {
|
||||
// CdmInfo needs a CdmType, but on Android it is not used as the key system
|
||||
// is supported by MediaDrm. Using a random value as something needs to be
|
||||
// specified, but must be different than other CdmTypes specified.
|
||||
// (On Android the key system is identified by UUID, and that mapping is
|
||||
// maintained by MediaDrmBridge.)
|
||||
const media::CdmType kAndroidCdmType{0x2e9dabb9c171c28cull,
|
||||
0xf455252ec70b52adull};
|
||||
|
||||
// MediaDrmBridge returns a list of key systems available on the device
|
||||
// that are not Widevine. Register them with no capabilities specified so
|
||||
// that lazy evaluation can figure out what is supported when requested.
|
||||
// We don't know if either software secure or hardware secure support is
|
||||
// available, so register them both. Lazy evaluation will remove them
|
||||
// if they aren't supported.
|
||||
const auto key_system_names =
|
||||
media::MediaDrmBridge::GetPlatformKeySystemNames();
|
||||
for (const auto& key_system : key_system_names) {
|
||||
DVLOG(3) << __func__ << " key_system:" << key_system;
|
||||
cdms->push_back(content::CdmInfo(key_system, Robustness::kSoftwareSecure,
|
||||
absl::nullopt, kAndroidCdmType));
|
||||
cdms->push_back(content::CdmInfo(key_system, Robustness::kHardwareSecure,
|
||||
absl::nullopt, kAndroidCdmType));
|
||||
}
|
||||
}
|
||||
#endif // BUILDFLAG(IS_ANDROID)
|
||||
|
||||
} // namespace
|
||||
|
||||
void RegisterCdmInfo(std::vector<content::CdmInfo>* cdms) {
|
||||
|
@ -396,7 +368,7 @@ void RegisterCdmInfo(std::vector<content::CdmInfo>* cdms) {
|
|||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_ANDROID)
|
||||
AddOtherAndroidKeySystems(cdms);
|
||||
cdm::AddOtherAndroidCdms(cdms);
|
||||
#endif // BUILDFLAG(IS_ANDROID)
|
||||
|
||||
DVLOG(3) << __func__ << " done with " << cdms->size() << " cdms";
|
||||
|
|
|
@ -336,12 +336,12 @@ BASE_FEATURE(kPlatformHEVCDecoderSupport,
|
|||
"PlatformHEVCDecoderSupport",
|
||||
base::FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC)
|
||||
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE)
|
||||
// Enables HEVC hardware accelerated encoding for Windows and Mac.
|
||||
BASE_FEATURE(kPlatformHEVCEncoderSupport,
|
||||
"PlatformHEVCEncoderSupport",
|
||||
base::FEATURE_ENABLED_BY_DEFAULT);
|
||||
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC)
|
||||
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE)
|
||||
#endif // BUILDFLAG(ENABLE_PLATFORM_HEVC)
|
||||
|
||||
// Only decode preload=metadata elements upon visibility.
|
||||
|
@ -428,6 +428,11 @@ BASE_FEATURE(kCdmProcessSiteIsolation,
|
|||
"CdmProcessSiteIsolation",
|
||||
base::FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
// Enables the "Copy Video Frame" context menu item.
|
||||
BASE_FEATURE(kContextMenuCopyVideoFrame,
|
||||
"ContextMenuCopyVideoFrame",
|
||||
base::FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
#if BUILDFLAG(CHROME_WIDE_ECHO_CANCELLATION)
|
||||
// If echo cancellation for a mic signal is requested, mix and cancel all audio
|
||||
// playback going to a specific output device in the audio service.
|
||||
|
@ -526,6 +531,13 @@ BASE_FEATURE(kCrOSDspBasedNsAllowed,
|
|||
BASE_FEATURE(kCrOSDspBasedAgcAllowed,
|
||||
"CrOSDspBasedAgcAllowed",
|
||||
base::FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
BASE_FEATURE(kIgnoreUiGains,
|
||||
"IgnoreUiGains",
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
BASE_FEATURE(kShowForceRespectUiGainsToggle,
|
||||
"ShowForceRespectUiGainsToggle",
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
#endif
|
||||
|
||||
// Make MSE garbage collection algorithm more aggressive when we are under
|
||||
|
@ -812,6 +824,19 @@ BASE_FEATURE(kVideoBlitColorAccuracy,
|
|||
"video-blit-color-accuracy",
|
||||
base::FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
// Use the new VideoToolboxVideoDecoder for hardware decoding.
|
||||
BASE_FEATURE(kVideoToolboxVideoDecoder,
|
||||
"VideoToolboxVideoDecoder",
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
#endif // BUILDFLAG(IS_APPLE)
|
||||
|
||||
// Inform webrtc with correct video color space information whenever
|
||||
// possible.
|
||||
BASE_FEATURE(kWebRTCColorAccuracy,
|
||||
"WebRTCColorAccuracy",
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
// Enable VP9 k-SVC decoding with HW decoder for webrtc use case.
|
||||
BASE_FEATURE(kVp9kSVCHWDecoding,
|
||||
"Vp9kSVCHWDecoding",
|
||||
|
@ -1163,6 +1188,11 @@ BASE_FEATURE(kMediaFoundationD3D11VideoCapture,
|
|||
"MediaFoundationD3D11VideoCapture",
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
// Enable zero-copy based on MediaFoundation video capture with D3D11.
|
||||
BASE_FEATURE(kMediaFoundationD3D11VideoCaptureZeroCopy,
|
||||
"MediaFoundationD3D11VideoCaptureZeroCopy",
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
// Enables VP8 decode acceleration for Windows.
|
||||
const base::Feature MEDIA_EXPORT kMediaFoundationVP8Decoding{
|
||||
"MediaFoundationVP8Decoding", base::FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
@ -1182,22 +1212,11 @@ BASE_FEATURE(kMediaFoundationClearPlayback,
|
|||
const base::Feature MEDIA_EXPORT kWasapiRawAudioCapture{
|
||||
"WASAPIRawAudioCapture", base::FEATURE_ENABLED_BY_DEFAULT};
|
||||
|
||||
// Emulates audio capture timestamps instead of using timestamps from the actual
|
||||
// audio device.
|
||||
// See crbug.com/1315231 for more details.
|
||||
const base::Feature MEDIA_EXPORT kUseFakeAudioCaptureTimestamps{
|
||||
"UseFakeAudioCaptureTimestamps", base::FEATURE_DISABLED_BY_DEFAULT};
|
||||
|
||||
// Enable VP9 kSVC decoding with HW decoder for webrtc use case on Windows.
|
||||
BASE_FEATURE(kD3D11Vp9kSVCHWDecoding,
|
||||
"D3D11Vp9kSVCHWDecoding",
|
||||
base::FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
// Controls whether the DXVA video decoder is enabled on Windows.
|
||||
BASE_FEATURE(kDXVAVideoDecoding,
|
||||
"DXVAVideoDecoding",
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
// The Media Foundation Rendering Strategy determines which presentation mode
|
||||
// Media Foundation Renderer should use for presenting clear content. This
|
||||
// strategy has no impact for protected content, which must always use Direct
|
||||
|
@ -1273,7 +1292,13 @@ const base::Feature MEDIA_EXPORT kExposeOutOfProcessVideoDecodingToLacros{
|
|||
// Spawn utility processes to perform hardware decode acceleration instead of
|
||||
// using the GPU process.
|
||||
const base::Feature MEDIA_EXPORT kUseOutOfProcessVideoDecoding{
|
||||
"UseOutOfProcessVideoDecoding", base::FEATURE_DISABLED_BY_DEFAULT};
|
||||
"UseOutOfProcessVideoDecoding",
|
||||
#if BUILDFLAG(IS_CHROMEOS_LACROS)
|
||||
base::FEATURE_ENABLED_BY_DEFAULT
|
||||
#else
|
||||
base::FEATURE_DISABLED_BY_DEFAULT
|
||||
#endif
|
||||
};
|
||||
#endif // BUILDFLAG(ALLOW_OOP_VIDEO_DECODER)
|
||||
|
||||
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
|
||||
|
@ -1424,6 +1449,14 @@ BASE_FEATURE(kAudioFocusLossSuspendMediaSession,
|
|||
"AudioFocusMediaSession",
|
||||
base::FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
#if !BUILDFLAG(IS_ANDROID)
|
||||
// Hides the media metadata from the OS' media player if running in an Incognito
|
||||
// session.
|
||||
BASE_FEATURE(kHideIncognitoMediaMetadata,
|
||||
"HideIncognitoMediaMetadata",
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
#endif
|
||||
|
||||
// Enables the internal Media Session logic without enabling the Media Session
|
||||
// service.
|
||||
BASE_FEATURE(kInternalMediaSession,
|
||||
|
@ -1461,6 +1494,10 @@ BASE_FEATURE(kCastStreamingAv1,
|
|||
"CastStreamingAv1",
|
||||
base::FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
BASE_FEATURE(kCastStreamingPerformanceOverlay,
|
||||
"CastStreamingPerformanceOverlay",
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
// Controls whether mirroring negotiations will include the VP9 codec for video
|
||||
// encoding.
|
||||
//
|
||||
|
@ -1474,9 +1511,15 @@ BASE_FEATURE(kCastStreamingVp9,
|
|||
// Enables use of Fuchsia's Mediacodec service for encoding.
|
||||
BASE_FEATURE(kFuchsiaMediacodecVideoEncoder,
|
||||
"FuchsiaMediacodecVideoEncoder",
|
||||
base::FEATURE_ENABLED_BY_DEFAULT);
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
#endif // BUILDFLAG(IS_FUCHSIA)
|
||||
|
||||
// Controls whether to pre-dispatch more decode tasks when pending decodes is
|
||||
// smaller than maximum supported decodes as advertiszed by decoder.
|
||||
BASE_FEATURE(kVideoDecodeBatching,
|
||||
"VideoDecodeBatching",
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
bool IsChromeWideEchoCancellationEnabled() {
|
||||
#if BUILDFLAG(CHROME_WIDE_ECHO_CANCELLATION)
|
||||
#if BUILDFLAG(IS_CHROMEOS_DEVICE)
|
||||
|
|
|
@ -1,251 +0,0 @@
|
|||
// Copyright 2023 The Chromium Authors and Alex313031
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "media/gpu/gpu_video_decode_accelerator_factory.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "build/build_config.h"
|
||||
#include "gpu/config/gpu_preferences.h"
|
||||
#include "media/base/media_switches.h"
|
||||
#include "media/gpu/buildflags.h"
|
||||
#include "media/gpu/gpu_video_accelerator_util.h"
|
||||
#include "media/gpu/macros.h"
|
||||
#include "media/gpu/media_gpu_export.h"
|
||||
#include "media/media_buildflags.h"
|
||||
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
#include "base/win/windows_version.h"
|
||||
#include "media/gpu/windows/dxva_video_decode_accelerator_win.h"
|
||||
#endif
|
||||
#if BUILDFLAG(IS_MAC)
|
||||
#include "media/gpu/mac/vt_video_decode_accelerator_mac.h"
|
||||
#endif
|
||||
#if BUILDFLAG(USE_VAAPI)
|
||||
#include "media/gpu/vaapi/vaapi_video_decode_accelerator.h"
|
||||
#include "ui/gl/gl_implementation.h"
|
||||
#elif BUILDFLAG(USE_V4L2_CODEC) && \
|
||||
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH))
|
||||
#include "media/gpu/v4l2/v4l2_device.h"
|
||||
#include "media/gpu/v4l2/legacy/v4l2_slice_video_decode_accelerator.h"
|
||||
#include "media/gpu/v4l2/legacy/v4l2_video_decode_accelerator.h"
|
||||
#include "ui/gl/gl_surface_egl.h"
|
||||
#endif
|
||||
|
||||
namespace media {
|
||||
|
||||
namespace {
|
||||
|
||||
gpu::VideoDecodeAcceleratorCapabilities GetDecoderCapabilitiesInternal(
|
||||
const gpu::GpuPreferences& gpu_preferences,
|
||||
const gpu::GpuDriverBugWorkarounds& workarounds) {
|
||||
if (gpu_preferences.disable_accelerated_video_decode)
|
||||
return gpu::VideoDecodeAcceleratorCapabilities();
|
||||
|
||||
// Query VDAs for their capabilities and construct a set of supported
|
||||
// profiles for current platform. This must be done in the same order as in
|
||||
// CreateVDA(), as we currently preserve additional capabilities (such as
|
||||
// resolutions supported) only for the first VDA supporting the given codec
|
||||
// profile (instead of calculating a superset).
|
||||
// TODO(posciak,henryhsu): improve this so that we choose a superset of
|
||||
// resolutions and other supported profile parameters.
|
||||
VideoDecodeAccelerator::Capabilities capabilities;
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
capabilities.supported_profiles =
|
||||
DXVAVideoDecodeAccelerator::GetSupportedProfiles(gpu_preferences,
|
||||
workarounds);
|
||||
#elif BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
|
||||
#if BUILDFLAG(USE_VAAPI)
|
||||
capabilities.supported_profiles =
|
||||
VaapiVideoDecodeAccelerator::GetSupportedProfiles();
|
||||
#elif BUILDFLAG(USE_V4L2_CODEC) && \
|
||||
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH))
|
||||
GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
|
||||
V4L2VideoDecodeAccelerator::GetSupportedProfiles(),
|
||||
&capabilities.supported_profiles);
|
||||
GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
|
||||
V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles(),
|
||||
&capabilities.supported_profiles);
|
||||
#endif
|
||||
#elif BUILDFLAG(IS_MAC)
|
||||
capabilities.supported_profiles =
|
||||
VTVideoDecodeAccelerator::GetSupportedProfiles(workarounds);
|
||||
#endif
|
||||
|
||||
return GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeCapabilities(
|
||||
capabilities);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// static
|
||||
MEDIA_GPU_EXPORT std::unique_ptr<GpuVideoDecodeAcceleratorFactory>
|
||||
GpuVideoDecodeAcceleratorFactory::Create(
|
||||
const GpuVideoDecodeGLClient& gl_client) {
|
||||
return base::WrapUnique(new GpuVideoDecodeAcceleratorFactory(gl_client));
|
||||
}
|
||||
|
||||
// static
|
||||
MEDIA_GPU_EXPORT gpu::VideoDecodeAcceleratorCapabilities
|
||||
GpuVideoDecodeAcceleratorFactory::GetDecoderCapabilities(
|
||||
const gpu::GpuPreferences& gpu_preferences,
|
||||
const gpu::GpuDriverBugWorkarounds& workarounds) {
|
||||
// Cache the capabilities so that they will not be computed more than once per
|
||||
// GPU process. It is assumed that |gpu_preferences| and |workarounds| do not
|
||||
// change between calls.
|
||||
// TODO(sandersd): Move cache to GpuMojoMediaClient once
|
||||
// |video_decode_accelerator_capabilities| is removed from GPUInfo.
|
||||
static gpu::VideoDecodeAcceleratorCapabilities capabilities =
|
||||
GetDecoderCapabilitiesInternal(gpu_preferences, workarounds);
|
||||
|
||||
#if BUILDFLAG(USE_V4L2_CODEC) && \
|
||||
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH))
|
||||
// V4L2-only: the decoder devices may not be visible at the time the GPU
|
||||
// process is starting. If the capabilities vector is empty, try to query the
|
||||
// devices again in the hope that they will have appeared in the meantime.
|
||||
// TODO(crbug.com/948147): trigger query when an device add/remove event
|
||||
// (e.g. via udev) has happened instead.
|
||||
if (capabilities.supported_profiles.empty()) {
|
||||
VLOGF(1) << "Capabilities empty, querying again...";
|
||||
capabilities = GetDecoderCapabilitiesInternal(gpu_preferences, workarounds);
|
||||
}
|
||||
#endif
|
||||
|
||||
return capabilities;
|
||||
}
|
||||
|
||||
MEDIA_GPU_EXPORT std::unique_ptr<VideoDecodeAccelerator>
|
||||
GpuVideoDecodeAcceleratorFactory::CreateVDA(
|
||||
VideoDecodeAccelerator::Client* client,
|
||||
const VideoDecodeAccelerator::Config& config,
|
||||
const gpu::GpuDriverBugWorkarounds& workarounds,
|
||||
const gpu::GpuPreferences& gpu_preferences,
|
||||
MediaLog* media_log) {
|
||||
DCHECK(thread_checker_.CalledOnValidThread());
|
||||
|
||||
if (gpu_preferences.disable_accelerated_video_decode)
|
||||
return nullptr;
|
||||
|
||||
// Array of Create..VDA() function pointers, potentially usable on current
|
||||
// platform. This list is ordered by priority, from most to least preferred,
|
||||
// if applicable. This list must be in the same order as the querying order
|
||||
// in GetDecoderCapabilities() above.
|
||||
using CreateVDAFp = std::unique_ptr<VideoDecodeAccelerator> (
|
||||
GpuVideoDecodeAcceleratorFactory::*)(const gpu::GpuDriverBugWorkarounds&,
|
||||
const gpu::GpuPreferences&,
|
||||
MediaLog* media_log) const;
|
||||
const CreateVDAFp create_vda_fps[] = {
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
&GpuVideoDecodeAcceleratorFactory::CreateDXVAVDA,
|
||||
#endif
|
||||
|
||||
// Usually only one of USE_VAAPI or USE_V4L2_CODEC is defined on ChromeOS,
|
||||
// except for Chromeboxes with companion video acceleration chips, which have
|
||||
// both. In those cases prefer the VA creation function.
|
||||
#if BUILDFLAG(USE_VAAPI)
|
||||
&GpuVideoDecodeAcceleratorFactory::CreateVaapiVDA,
|
||||
#elif BUILDFLAG(USE_V4L2_CODEC) && \
|
||||
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH))
|
||||
&GpuVideoDecodeAcceleratorFactory::CreateV4L2VDA,
|
||||
&GpuVideoDecodeAcceleratorFactory::CreateV4L2SliceVDA,
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_MAC)
|
||||
&GpuVideoDecodeAcceleratorFactory::CreateVTVDA,
|
||||
#endif
|
||||
};
|
||||
|
||||
std::unique_ptr<VideoDecodeAccelerator> vda;
|
||||
|
||||
for (const auto& create_vda_function : create_vda_fps) {
|
||||
vda = (this->*create_vda_function)(workarounds, gpu_preferences, media_log);
|
||||
if (vda && vda->Initialize(config, client))
|
||||
return vda;
|
||||
else
|
||||
LOG(ERROR) << "Initialization of one or more VDAs failed.";
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
std::unique_ptr<VideoDecodeAccelerator>
|
||||
GpuVideoDecodeAcceleratorFactory::CreateDXVAVDA(
|
||||
const gpu::GpuDriverBugWorkarounds& workarounds,
|
||||
const gpu::GpuPreferences& gpu_preferences,
|
||||
MediaLog* media_log) const {
|
||||
std::unique_ptr<VideoDecodeAccelerator> decoder;
|
||||
DVLOG(0) << "Initializing DXVA HW decoder for windows.";
|
||||
decoder.reset(new DXVAVideoDecodeAccelerator(
|
||||
gl_client_.get_context, gl_client_.make_context_current,
|
||||
gl_client_.bind_image, workarounds, gpu_preferences, media_log));
|
||||
return decoder;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(USE_VAAPI)
|
||||
std::unique_ptr<VideoDecodeAccelerator>
|
||||
GpuVideoDecodeAcceleratorFactory::CreateVaapiVDA(
|
||||
const gpu::GpuDriverBugWorkarounds& /*workarounds*/,
|
||||
const gpu::GpuPreferences& /*gpu_preferences*/,
|
||||
MediaLog* /*media_log*/) const {
|
||||
std::unique_ptr<VideoDecodeAccelerator> decoder;
|
||||
decoder.reset(new VaapiVideoDecodeAccelerator(gl_client_.make_context_current,
|
||||
gl_client_.bind_image));
|
||||
return decoder;
|
||||
}
|
||||
#elif BUILDFLAG(USE_V4L2_CODEC) && \
|
||||
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH))
|
||||
std::unique_ptr<VideoDecodeAccelerator>
|
||||
GpuVideoDecodeAcceleratorFactory::CreateV4L2VDA(
|
||||
const gpu::GpuDriverBugWorkarounds& /*workarounds*/,
|
||||
const gpu::GpuPreferences& /*gpu_preferences*/,
|
||||
MediaLog* /*media_log*/) const {
|
||||
std::unique_ptr<VideoDecodeAccelerator> decoder;
|
||||
scoped_refptr<V4L2Device> device = V4L2Device::Create();
|
||||
if (device.get()) {
|
||||
decoder.reset(new V4L2VideoDecodeAccelerator(
|
||||
gl::GLSurfaceEGL::GetGLDisplayEGL()->GetDisplay(),
|
||||
gl_client_.get_context, gl_client_.make_context_current, device));
|
||||
}
|
||||
return decoder;
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoDecodeAccelerator>
|
||||
GpuVideoDecodeAcceleratorFactory::CreateV4L2SliceVDA(
|
||||
const gpu::GpuDriverBugWorkarounds& /*workarounds*/,
|
||||
const gpu::GpuPreferences& /*gpu_preferences*/,
|
||||
MediaLog* /*media_log*/) const {
|
||||
std::unique_ptr<VideoDecodeAccelerator> decoder;
|
||||
scoped_refptr<V4L2Device> device = V4L2Device::Create();
|
||||
if (device.get()) {
|
||||
decoder.reset(new V4L2SliceVideoDecodeAccelerator(
|
||||
device, gl::GLSurfaceEGL::GetGLDisplayEGL()->GetDisplay(),
|
||||
gl_client_.bind_image, gl_client_.make_context_current));
|
||||
}
|
||||
return decoder;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_MAC)
|
||||
std::unique_ptr<VideoDecodeAccelerator>
|
||||
GpuVideoDecodeAcceleratorFactory::CreateVTVDA(
|
||||
const gpu::GpuDriverBugWorkarounds& workarounds,
|
||||
const gpu::GpuPreferences& gpu_preferences,
|
||||
MediaLog* media_log) const {
|
||||
LOG(WARNING) << "Initializing VAAPI VDA.";
|
||||
std::unique_ptr<VideoDecodeAccelerator> decoder;
|
||||
decoder.reset(
|
||||
new VTVideoDecodeAccelerator(gl_client_, workarounds, media_log));
|
||||
return decoder;
|
||||
}
|
||||
#endif
|
||||
|
||||
GpuVideoDecodeAcceleratorFactory::GpuVideoDecodeAcceleratorFactory(
|
||||
const GpuVideoDecodeGLClient& gl_client)
|
||||
: gl_client_(gl_client) {}
|
||||
GpuVideoDecodeAcceleratorFactory::~GpuVideoDecodeAcceleratorFactory() = default;
|
||||
|
||||
} // namespace media
|
|
@ -1,652 +0,0 @@
|
|||
// Copyright 2023 The Chromium Authors and Alex313031
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "media/gpu/ipc/service/gpu_video_decode_accelerator.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "base/functional/bind.h"
|
||||
#include "base/location.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/memory/raw_ptr.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/synchronization/waitable_event.h"
|
||||
#include "base/task/bind_post_task.h"
|
||||
#include "base/task/sequenced_task_runner.h"
|
||||
#include "base/task/single_thread_task_runner.h"
|
||||
#include "build/build_config.h"
|
||||
#include "gpu/command_buffer/common/command_buffer.h"
|
||||
#include "gpu/config/gpu_preferences.h"
|
||||
#include "gpu/ipc/service/gpu_channel.h"
|
||||
#include "gpu/ipc/service/gpu_channel_manager.h"
|
||||
#include "ipc/ipc_message_macros.h"
|
||||
#include "ipc/ipc_message_utils.h"
|
||||
#include "ipc/message_filter.h"
|
||||
#include "media/base/limits.h"
|
||||
#include "media/gpu/gpu_video_accelerator_util.h"
|
||||
#include "media/gpu/gpu_video_decode_accelerator_factory.h"
|
||||
#include "mojo/public/cpp/bindings/associated_receiver.h"
|
||||
#include "ui/gfx/geometry/size.h"
|
||||
#include "ui/gl/gl_context.h"
|
||||
#include "ui/gl/gl_image.h"
|
||||
|
||||
namespace media {
|
||||
|
||||
namespace {
|
||||
static gl::GLContext* GetGLContext(
|
||||
const base::WeakPtr<gpu::CommandBufferStub>& stub) {
|
||||
if (!stub) {
|
||||
DLOG(ERROR) << "Stub is gone; no GLContext.";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return stub->decoder_context()->GetGLContext();
|
||||
}
|
||||
|
||||
static bool MakeDecoderContextCurrent(
|
||||
const base::WeakPtr<gpu::CommandBufferStub>& stub) {
|
||||
if (!stub) {
|
||||
DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!stub->decoder_context()->MakeCurrent()) {
|
||||
DLOG(ERROR) << "Failed to MakeCurrent()";
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE)
|
||||
static bool BindDecoderManagedImage(
|
||||
const base::WeakPtr<gpu::CommandBufferStub>& stub,
|
||||
uint32_t client_texture_id,
|
||||
uint32_t texture_target,
|
||||
const scoped_refptr<gl::GLImage>& image) {
|
||||
if (!stub) {
|
||||
DLOG(ERROR) << "Stub is gone; won't BindImage().";
|
||||
return false;
|
||||
}
|
||||
|
||||
gpu::DecoderContext* command_decoder = stub->decoder_context();
|
||||
command_decoder->AttachImageToTextureWithDecoderBinding(
|
||||
client_texture_id, texture_target, image.get());
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static bool BindClientManagedImage(
|
||||
const base::WeakPtr<gpu::CommandBufferStub>& stub,
|
||||
uint32_t client_texture_id,
|
||||
uint32_t texture_target,
|
||||
const scoped_refptr<gl::GLImage>& image) {
|
||||
if (!stub) {
|
||||
DLOG(ERROR) << "Stub is gone; won't BindImage().";
|
||||
return false;
|
||||
}
|
||||
|
||||
gpu::DecoderContext* command_decoder = stub->decoder_context();
|
||||
command_decoder->AttachImageToTextureWithClientBinding(
|
||||
client_texture_id, texture_target, image.get());
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static gpu::gles2::ContextGroup* GetContextGroup(
|
||||
const base::WeakPtr<gpu::CommandBufferStub>& stub) {
|
||||
if (!stub) {
|
||||
DLOG(ERROR) << "Stub is gone; no DecoderContext.";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return stub->decoder_context()->GetContextGroup();
|
||||
}
|
||||
|
||||
static std::unique_ptr<gpu::gles2::AbstractTexture> CreateAbstractTexture(
|
||||
const base::WeakPtr<gpu::CommandBufferStub>& stub,
|
||||
GLenum target,
|
||||
GLenum internal_format,
|
||||
GLsizei width,
|
||||
GLsizei height,
|
||||
GLsizei depth,
|
||||
GLint border,
|
||||
GLenum format,
|
||||
GLenum type) {
|
||||
if (!stub) {
|
||||
DLOG(ERROR) << "Stub is gone; no DecoderContext.";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return stub->decoder_context()->CreateAbstractTexture(
|
||||
target, internal_format, width, height, depth, border, format, type);
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
// DebugAutoLock works like AutoLock but only acquires the lock when
|
||||
// DCHECK is on.
|
||||
#if DCHECK_IS_ON()
|
||||
typedef base::AutoLock DebugAutoLock;
|
||||
#else
|
||||
class DebugAutoLock {
|
||||
public:
|
||||
explicit DebugAutoLock(base::Lock&) {}
|
||||
};
|
||||
#endif
|
||||
|
||||
// Receives incoming messages for the decoder. Operates exclusively on the IO
|
||||
// thread, since sometimes we want to do decodes directly from there.
|
||||
class GpuVideoDecodeAccelerator::MessageFilter
|
||||
: public mojom::GpuAcceleratedVideoDecoder {
|
||||
public:
|
||||
MessageFilter(GpuVideoDecodeAccelerator* owner,
|
||||
scoped_refptr<base::SequencedTaskRunner> owner_task_runner,
|
||||
bool decode_on_io)
|
||||
: owner_(owner),
|
||||
owner_task_runner_(std::move(owner_task_runner)),
|
||||
decode_on_io_(decode_on_io) {}
|
||||
~MessageFilter() override = default;
|
||||
|
||||
// Called from the main thread. Posts to `io_task_runner` to do the binding
|
||||
// and waits for completion before returning. This ensures the decoder's
|
||||
// endpoint is established before the synchronous request to establish it is
|
||||
// acknowledged to the client.
|
||||
bool Bind(mojo::PendingAssociatedReceiver<mojom::GpuAcceleratedVideoDecoder>
|
||||
receiver,
|
||||
const scoped_refptr<base::SequencedTaskRunner>& io_task_runner) {
|
||||
base::WaitableEvent bound_event;
|
||||
if (!io_task_runner->PostTask(
|
||||
FROM_HERE, base::BindOnce(&MessageFilter::BindOnIoThread,
|
||||
base::Unretained(this),
|
||||
std::move(receiver), &bound_event))) {
|
||||
return false;
|
||||
}
|
||||
bound_event.Wait();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Must be called on the IO thread. Posts back to the owner's task runner to
|
||||
// destroy it.
|
||||
void RequestShutdown() {
|
||||
if (!owner_)
|
||||
return;
|
||||
|
||||
// Must be reset here on the IO thread before `this` is destroyed.
|
||||
receiver_.reset();
|
||||
|
||||
GpuVideoDecodeAccelerator* owner = owner_;
|
||||
owner_ = nullptr;
|
||||
|
||||
// Invalidate any IO thread WeakPtrs which may be held by the
|
||||
// VideoDecodeAccelerator, and post to delete our owner which will in turn
|
||||
// delete us. Note that it is unsafe to access any members of `this` once
|
||||
// the task below is posted.
|
||||
owner->weak_factory_for_io_.InvalidateWeakPtrs();
|
||||
owner_task_runner_->PostTask(
|
||||
FROM_HERE, base::BindOnce(&GpuVideoDecodeAccelerator::DeleteSelfNow,
|
||||
base::Unretained(owner)));
|
||||
}
|
||||
|
||||
// mojom::GpuAcceleratedVideoDecoder:
|
||||
void Decode(BitstreamBuffer buffer) override;
|
||||
void AssignPictureBuffers(
|
||||
std::vector<mojom::PictureBufferAssignmentPtr> assignments) override;
|
||||
void ReusePictureBuffer(int32_t picture_buffer_id) override;
|
||||
void Flush(FlushCallback callback) override;
|
||||
void Reset(ResetCallback callback) override;
|
||||
void SetOverlayInfo(const OverlayInfo& overlay_info) override;
|
||||
|
||||
private:
|
||||
void BindOnIoThread(mojo::PendingAssociatedReceiver<
|
||||
mojom::GpuAcceleratedVideoDecoder> receiver,
|
||||
base::WaitableEvent* bound_event) {
|
||||
receiver_.Bind(std::move(receiver));
|
||||
receiver_.set_disconnect_handler(
|
||||
base::BindOnce(&MessageFilter::OnDisconnect, base::Unretained(this)));
|
||||
bound_event->Signal();
|
||||
}
|
||||
|
||||
void OnDisconnect() {
|
||||
if (!owner_)
|
||||
return;
|
||||
|
||||
owner_task_runner_->PostTask(
|
||||
FROM_HERE, base::BindOnce(&GpuVideoDecodeAccelerator::OnDestroy,
|
||||
base::Unretained(owner_)));
|
||||
}
|
||||
|
||||
raw_ptr<GpuVideoDecodeAccelerator> owner_;
|
||||
const scoped_refptr<base::SequencedTaskRunner> owner_task_runner_;
|
||||
const bool decode_on_io_;
|
||||
mojo::AssociatedReceiver<mojom::GpuAcceleratedVideoDecoder> receiver_{this};
|
||||
};
|
||||
|
||||
void GpuVideoDecodeAccelerator::MessageFilter::Decode(BitstreamBuffer buffer) {
|
||||
if (!owner_)
|
||||
return;
|
||||
|
||||
if (decode_on_io_) {
|
||||
owner_->OnDecode(std::move(buffer));
|
||||
} else {
|
||||
owner_task_runner_->PostTask(
|
||||
FROM_HERE, base::BindOnce(&GpuVideoDecodeAccelerator::OnDecode,
|
||||
base::Unretained(owner_), std::move(buffer)));
|
||||
}
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::MessageFilter::AssignPictureBuffers(
|
||||
std::vector<mojom::PictureBufferAssignmentPtr> assignments) {
|
||||
if (!owner_)
|
||||
return;
|
||||
owner_task_runner_->PostTask(
|
||||
FROM_HERE,
|
||||
base::BindOnce(&GpuVideoDecodeAccelerator::OnAssignPictureBuffers,
|
||||
base::Unretained(owner_), std::move(assignments)));
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::MessageFilter::ReusePictureBuffer(
|
||||
int32_t picture_buffer_id) {
|
||||
if (!owner_)
|
||||
return;
|
||||
owner_task_runner_->PostTask(
|
||||
FROM_HERE,
|
||||
base::BindOnce(&GpuVideoDecodeAccelerator::OnReusePictureBuffer,
|
||||
base::Unretained(owner_), picture_buffer_id));
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::MessageFilter::Flush(FlushCallback callback) {
|
||||
if (!owner_)
|
||||
return;
|
||||
owner_task_runner_->PostTask(
|
||||
FROM_HERE, base::BindOnce(&GpuVideoDecodeAccelerator::OnFlush,
|
||||
base::Unretained(owner_), std::move(callback)));
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::MessageFilter::Reset(ResetCallback callback) {
|
||||
if (!owner_)
|
||||
return;
|
||||
owner_task_runner_->PostTask(
|
||||
FROM_HERE, base::BindOnce(&GpuVideoDecodeAccelerator::OnReset,
|
||||
base::Unretained(owner_), std::move(callback)));
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::MessageFilter::SetOverlayInfo(
|
||||
const OverlayInfo& overlay_info) {
|
||||
if (!owner_)
|
||||
return;
|
||||
owner_task_runner_->PostTask(
|
||||
FROM_HERE, base::BindOnce(&GpuVideoDecodeAccelerator::OnSetOverlayInfo,
|
||||
base::Unretained(owner_), overlay_info));
|
||||
}
|
||||
|
||||
GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
|
||||
gpu::CommandBufferStub* stub,
|
||||
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
|
||||
const AndroidOverlayMojoFactoryCB& overlay_factory_cb)
|
||||
: stub_(stub),
|
||||
texture_target_(0),
|
||||
pixel_format_(PIXEL_FORMAT_UNKNOWN),
|
||||
textures_per_buffer_(0),
|
||||
child_task_runner_(base::SingleThreadTaskRunner::GetCurrentDefault()),
|
||||
io_task_runner_(io_task_runner),
|
||||
overlay_factory_cb_(overlay_factory_cb) {
|
||||
DCHECK(stub_);
|
||||
stub_->AddDestructionObserver(this);
|
||||
gl_client_.get_context =
|
||||
base::BindRepeating(&GetGLContext, stub_->AsWeakPtr());
|
||||
gl_client_.make_context_current =
|
||||
base::BindRepeating(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
|
||||
// The semantics of |bind_image| vary per-platform: On Windows and Apple it
|
||||
// must mark the image as needing binding by the decoder, while on other
|
||||
// platforms it must mark the image as *not* needing binding by the decoder.
|
||||
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE)
|
||||
gl_client_.bind_image =
|
||||
base::BindRepeating(&BindDecoderManagedImage, stub_->AsWeakPtr());
|
||||
#else
|
||||
gl_client_.bind_image =
|
||||
base::BindRepeating(&BindClientManagedImage, stub_->AsWeakPtr());
|
||||
#endif
|
||||
gl_client_.get_context_group =
|
||||
base::BindRepeating(&GetContextGroup, stub_->AsWeakPtr());
|
||||
gl_client_.create_abstract_texture =
|
||||
base::BindRepeating(&CreateAbstractTexture, stub_->AsWeakPtr());
|
||||
gl_client_.is_passthrough =
|
||||
stub_->decoder_context()->GetFeatureInfo()->is_passthrough_cmd_decoder();
|
||||
gl_client_.supports_arb_texture_rectangle = stub_->decoder_context()
|
||||
->GetFeatureInfo()
|
||||
->feature_flags()
|
||||
.arb_texture_rectangle;
|
||||
}
|
||||
|
||||
GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
|
||||
// This class can only be self-deleted from OnWillDestroyStub(), which means
|
||||
// the VDA has already been destroyed in there.
|
||||
DCHECK(!video_decode_accelerator_);
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::DeleteSelfNow() {
|
||||
delete this;
|
||||
}
|
||||
|
||||
// static
|
||||
gpu::VideoDecodeAcceleratorCapabilities
|
||||
GpuVideoDecodeAccelerator::GetCapabilities(
|
||||
const gpu::GpuPreferences& gpu_preferences,
|
||||
const gpu::GpuDriverBugWorkarounds& workarounds) {
|
||||
return GpuVideoDecodeAcceleratorFactory::GetDecoderCapabilities(
|
||||
gpu_preferences, workarounds);
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::NotifyInitializationComplete(
|
||||
DecoderStatus status) {
|
||||
decoder_client_->OnInitializationComplete(status.is_ok());
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
|
||||
uint32_t requested_num_of_buffers,
|
||||
VideoPixelFormat format,
|
||||
uint32_t textures_per_buffer,
|
||||
const gfx::Size& dimensions,
|
||||
uint32_t texture_target) {
|
||||
if (dimensions.width() > limits::kMaxDimension ||
|
||||
dimensions.height() > limits::kMaxDimension ||
|
||||
dimensions.GetArea() > limits::kMaxCanvas) {
|
||||
NotifyError(VideoDecodeAccelerator::PLATFORM_FAILURE);
|
||||
return;
|
||||
}
|
||||
|
||||
texture_dimensions_ = dimensions;
|
||||
textures_per_buffer_ = textures_per_buffer;
|
||||
texture_target_ = texture_target;
|
||||
pixel_format_ = format;
|
||||
|
||||
decoder_client_->OnProvidePictureBuffers(requested_num_of_buffers, format,
|
||||
textures_per_buffer, dimensions,
|
||||
texture_target);
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::DismissPictureBuffer(
|
||||
int32_t picture_buffer_id) {
|
||||
// Notify client that picture buffer is now unused.
|
||||
decoder_client_->OnDismissPictureBuffer(picture_buffer_id);
|
||||
DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
|
||||
uncleared_textures_.erase(picture_buffer_id);
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::PictureReady(const Picture& picture) {
|
||||
// VDA may call PictureReady on IO thread. SetTextureCleared should run on
|
||||
// the child thread. VDA is responsible to call PictureReady on the child
|
||||
// thread when a picture buffer is delivered the first time.
|
||||
if (child_task_runner_->BelongsToCurrentThread()) {
|
||||
SetTextureCleared(picture);
|
||||
} else {
|
||||
DCHECK(io_task_runner_->BelongsToCurrentThread());
|
||||
DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
|
||||
DCHECK_EQ(0u, uncleared_textures_.count(picture.picture_buffer_id()));
|
||||
}
|
||||
|
||||
auto params = mojom::PictureReadyParams::New();
|
||||
params->picture_buffer_id = picture.picture_buffer_id();
|
||||
params->bitstream_buffer_id = picture.bitstream_buffer_id();
|
||||
params->visible_rect = picture.visible_rect();
|
||||
params->color_space = picture.color_space();
|
||||
params->allow_overlay = picture.allow_overlay();
|
||||
params->read_lock_fences_enabled = picture.read_lock_fences_enabled();
|
||||
params->size_changed = picture.size_changed();
|
||||
params->surface_texture = picture.texture_owner();
|
||||
params->wants_promotion_hint = picture.wants_promotion_hint();
|
||||
decoder_client_->OnPictureReady(std::move(params));
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
|
||||
int32_t bitstream_buffer_id) {
|
||||
decoder_client_->OnBitstreamBufferProcessed(bitstream_buffer_id);
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::NotifyFlushDone() {
|
||||
DCHECK(!pending_flushes_.empty());
|
||||
std::move(pending_flushes_.front()).Run();
|
||||
pending_flushes_.pop_front();
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::NotifyResetDone() {
|
||||
DCHECK(!pending_resets_.empty());
|
||||
std::move(pending_resets_.front()).Run();
|
||||
pending_resets_.pop_front();
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::NotifyError(
|
||||
VideoDecodeAccelerator::Error error) {
|
||||
decoder_client_->OnError(error);
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::OnWillDestroyStub(bool have_context) {
|
||||
// The stub is going away, so we have to stop and destroy VDA here, before
|
||||
// returning, because the VDA may need the GL context to run and/or do its
|
||||
// cleanup. We cannot destroy the VDA before the IO thread message filter is
|
||||
// removed however, since we cannot service incoming messages with VDA gone.
|
||||
// We cannot simply check for existence of VDA on IO thread though, because
|
||||
// we don't want to synchronize the IO thread with the ChildThread.
|
||||
// So we have to wait for the RemoveFilter callback here instead and remove
|
||||
// the VDA after it arrives and before returning.
|
||||
stub_->RemoveDestructionObserver(this);
|
||||
if (filter_) {
|
||||
io_task_runner_->PostTask(FROM_HERE,
|
||||
base::BindOnce(&MessageFilter::RequestShutdown,
|
||||
base::Unretained(filter_.get())));
|
||||
}
|
||||
|
||||
video_decode_accelerator_.reset();
|
||||
}
|
||||
|
||||
bool GpuVideoDecodeAccelerator::Initialize(
|
||||
const VideoDecodeAccelerator::Config& config,
|
||||
mojo::PendingAssociatedReceiver<mojom::GpuAcceleratedVideoDecoder> receiver,
|
||||
mojo::PendingAssociatedRemote<mojom::GpuAcceleratedVideoDecoderClient>
|
||||
client) {
|
||||
DCHECK(!video_decode_accelerator_);
|
||||
|
||||
#if !BUILDFLAG(IS_WIN)
|
||||
// Ensure we will be able to get a GL context at all before initializing
|
||||
// non-Windows VDAs.
|
||||
if (!gl_client_.make_context_current.Run())
|
||||
return false;
|
||||
#endif
|
||||
|
||||
std::unique_ptr<GpuVideoDecodeAcceleratorFactory> vda_factory =
|
||||
GpuVideoDecodeAcceleratorFactory::Create(gl_client_);
|
||||
if (!vda_factory) {
|
||||
LOG(ERROR) << "Failed creating the VDA factory";
|
||||
return false;
|
||||
}
|
||||
LOG(WARNING) << "Created the VDA factory";
|
||||
|
||||
const gpu::GpuDriverBugWorkarounds& gpu_workarounds =
|
||||
stub_->channel()->gpu_channel_manager()->gpu_driver_bug_workarounds();
|
||||
const gpu::GpuPreferences& gpu_preferences =
|
||||
stub_->channel()->gpu_channel_manager()->gpu_preferences();
|
||||
|
||||
if (config.output_mode !=
|
||||
VideoDecodeAccelerator::Config::OutputMode::ALLOCATE) {
|
||||
DLOG(ERROR) << "Only ALLOCATE mode is supported";
|
||||
return false;
|
||||
}
|
||||
|
||||
video_decode_accelerator_ =
|
||||
vda_factory->CreateVDA(this, config, gpu_workarounds, gpu_preferences);
|
||||
if (!video_decode_accelerator_) {
|
||||
LOG(ERROR) << "HW video decode not available for profile "
|
||||
<< GetProfileName(config.profile)
|
||||
<< (config.is_encrypted() ? " with encryption" : "");
|
||||
return false;
|
||||
}
|
||||
LOG(WARNING) << "Created VDA";
|
||||
|
||||
decoder_client_.Bind(std::move(client), io_task_runner_);
|
||||
|
||||
// Attempt to set up performing decoding tasks on IO thread, if supported by
|
||||
// the VDA.
|
||||
bool decode_on_io =
|
||||
video_decode_accelerator_->TryToSetupDecodeOnSeparateSequence(
|
||||
weak_factory_for_io_.GetWeakPtr(), io_task_runner_);
|
||||
|
||||
// Bind the receiver on the IO thread. We wait here for it to be bound
|
||||
// before returning and signaling that the decoder has been created.
|
||||
filter_ =
|
||||
std::make_unique<MessageFilter>(this, stub_->task_runner(), decode_on_io);
|
||||
return filter_->Bind(std::move(receiver), io_task_runner_);
|
||||
}
|
||||
|
||||
// Runs on IO thread if VDA::TryToSetupDecodeOnSeparateSequence() succeeded,
|
||||
// otherwise on the main thread.
|
||||
void GpuVideoDecodeAccelerator::OnDecode(BitstreamBuffer bitstream_buffer) {
|
||||
DCHECK(video_decode_accelerator_);
|
||||
video_decode_accelerator_->Decode(std::move(bitstream_buffer));
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
|
||||
std::vector<mojom::PictureBufferAssignmentPtr> assignments) {
|
||||
gpu::DecoderContext* decoder_context = stub_->decoder_context();
|
||||
gpu::gles2::TextureManager* texture_manager =
|
||||
stub_->decoder_context()->GetContextGroup()->texture_manager();
|
||||
|
||||
std::vector<PictureBuffer> buffers;
|
||||
std::vector<std::vector<scoped_refptr<gpu::gles2::TextureRef>>> textures;
|
||||
for (const auto& assignment : assignments) {
|
||||
if (assignment->buffer_id < 0) {
|
||||
DLOG(ERROR) << "Buffer id " << assignment->buffer_id << " out of range";
|
||||
NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
|
||||
return;
|
||||
}
|
||||
std::vector<scoped_refptr<gpu::gles2::TextureRef>> current_textures;
|
||||
PictureBuffer::TextureIds buffer_texture_ids = assignment->texture_ids;
|
||||
PictureBuffer::TextureIds service_ids;
|
||||
if (buffer_texture_ids.size() != textures_per_buffer_) {
|
||||
DLOG(ERROR) << "Requested " << textures_per_buffer_
|
||||
<< " textures per picture buffer, got "
|
||||
<< buffer_texture_ids.size();
|
||||
NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
|
||||
return;
|
||||
}
|
||||
for (size_t j = 0; j < textures_per_buffer_; j++) {
|
||||
gpu::TextureBase* texture_base =
|
||||
decoder_context->GetTextureBase(buffer_texture_ids[j]);
|
||||
if (!texture_base) {
|
||||
DLOG(ERROR) << "Failed to find texture id " << buffer_texture_ids[j];
|
||||
NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
|
||||
return;
|
||||
}
|
||||
|
||||
if (texture_base->target() != texture_target_) {
|
||||
DLOG(ERROR) << "Texture target mismatch for texture id "
|
||||
<< buffer_texture_ids[j];
|
||||
NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
|
||||
return;
|
||||
}
|
||||
|
||||
if (texture_manager) {
|
||||
gpu::gles2::TextureRef* texture_ref =
|
||||
texture_manager->GetTexture(buffer_texture_ids[j]);
|
||||
if (texture_ref) {
|
||||
gpu::gles2::Texture* info = texture_ref->texture();
|
||||
if (texture_target_ == GL_TEXTURE_EXTERNAL_OES ||
|
||||
texture_target_ == GL_TEXTURE_RECTANGLE_ARB) {
|
||||
// These textures have their dimensions defined by the underlying
|
||||
// storage.
|
||||
// Use |texture_dimensions_| for this size.
|
||||
texture_manager->SetLevelInfo(
|
||||
texture_ref, texture_target_, 0, GL_RGBA,
|
||||
texture_dimensions_.width(), texture_dimensions_.height(), 1, 0,
|
||||
GL_RGBA, GL_UNSIGNED_BYTE, gfx::Rect());
|
||||
} else {
|
||||
// For other targets, texture dimensions should already be defined.
|
||||
GLsizei width = 0, height = 0;
|
||||
info->GetLevelSize(texture_target_, 0, &width, &height, nullptr);
|
||||
if (width != texture_dimensions_.width() ||
|
||||
height != texture_dimensions_.height()) {
|
||||
DLOG(ERROR) << "Size mismatch for texture id "
|
||||
<< buffer_texture_ids[j];
|
||||
NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO(dshwang): after moving to D3D11, remove this.
|
||||
// https://crbug.com/438691
|
||||
GLenum format =
|
||||
video_decode_accelerator_->GetSurfaceInternalFormat();
|
||||
if (format != GL_RGBA) {
|
||||
DCHECK(format == GL_BGRA_EXT);
|
||||
texture_manager->SetLevelInfo(texture_ref, texture_target_, 0,
|
||||
format, width, height, 1, 0, format,
|
||||
GL_UNSIGNED_BYTE, gfx::Rect());
|
||||
}
|
||||
}
|
||||
current_textures.push_back(texture_ref);
|
||||
}
|
||||
}
|
||||
service_ids.push_back(texture_base->service_id());
|
||||
}
|
||||
textures.push_back(current_textures);
|
||||
buffers.emplace_back(assignment->buffer_id, texture_dimensions_,
|
||||
buffer_texture_ids, service_ids, texture_target_,
|
||||
pixel_format_);
|
||||
}
|
||||
{
|
||||
DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
|
||||
for (uint32_t i = 0; i < assignments.size(); ++i)
|
||||
uncleared_textures_[assignments[i]->buffer_id] = textures[i];
|
||||
}
|
||||
video_decode_accelerator_->AssignPictureBuffers(buffers);
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
|
||||
int32_t picture_buffer_id) {
|
||||
DCHECK(video_decode_accelerator_);
|
||||
video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id);
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::OnFlush(base::OnceClosure callback) {
|
||||
DCHECK(video_decode_accelerator_);
|
||||
pending_flushes_.push_back(
|
||||
base::BindPostTask(io_task_runner_, std::move(callback)));
|
||||
video_decode_accelerator_->Flush();
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::OnReset(base::OnceClosure callback) {
|
||||
DCHECK(video_decode_accelerator_);
|
||||
pending_resets_.push_back(
|
||||
base::BindPostTask(io_task_runner_, std::move(callback)));
|
||||
video_decode_accelerator_->Reset();
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::OnSetOverlayInfo(
|
||||
const OverlayInfo& overlay_info) {
|
||||
DCHECK(video_decode_accelerator_);
|
||||
video_decode_accelerator_->SetOverlayInfo(overlay_info);
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::OnDestroy() {
|
||||
DCHECK(video_decode_accelerator_);
|
||||
OnWillDestroyStub(false);
|
||||
}
|
||||
|
||||
void GpuVideoDecodeAccelerator::SetTextureCleared(const Picture& picture) {
|
||||
DCHECK(child_task_runner_->BelongsToCurrentThread());
|
||||
DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
|
||||
auto it = uncleared_textures_.find(picture.picture_buffer_id());
|
||||
if (it == uncleared_textures_.end())
|
||||
return; // the texture has been cleared
|
||||
|
||||
for (auto texture_ref : it->second) {
|
||||
GLenum target = texture_ref->texture()->target();
|
||||
gpu::gles2::TextureManager* texture_manager =
|
||||
stub_->decoder_context()->GetContextGroup()->texture_manager();
|
||||
texture_manager->SetLevelCleared(texture_ref.get(), target, 0, true);
|
||||
}
|
||||
uncleared_textures_.erase(it);
|
||||
}
|
||||
|
||||
} // namespace media
|
File diff suppressed because it is too large
Load diff
|
@ -1,371 +0,0 @@
|
|||
// Copyright 2023 The Chromium Authors and Alex313031
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
//
|
||||
// This file contains an implementation of VideoDecoderAccelerator
|
||||
// that utilizes hardware video decoder present on Intel CPUs.
|
||||
|
||||
#ifndef MEDIA_GPU_VAAPI_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
|
||||
#define MEDIA_GPU_VAAPI_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "base/containers/queue.h"
|
||||
#include "base/containers/small_map.h"
|
||||
#include "base/memory/raw_ptr.h"
|
||||
#include "base/memory/weak_ptr.h"
|
||||
#include "base/synchronization/condition_variable.h"
|
||||
#include "base/synchronization/lock.h"
|
||||
#include "base/task/single_thread_task_runner.h"
|
||||
#include "base/thread_annotations.h"
|
||||
#include "base/threading/thread.h"
|
||||
#include "base/trace_event/memory_dump_provider.h"
|
||||
#include "build/build_config.h"
|
||||
#include "media/base/bitstream_buffer.h"
|
||||
#include "media/gpu/decode_surface_handler.h"
|
||||
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
|
||||
#include "media/gpu/media_gpu_export.h"
|
||||
#include "media/gpu/vaapi/vaapi_picture_factory.h"
|
||||
#include "media/gpu/vaapi/vaapi_wrapper.h"
|
||||
#include "media/video/picture.h"
|
||||
#include "media/video/video_decode_accelerator.h"
|
||||
|
||||
namespace gl {
|
||||
class GLImage;
|
||||
}
|
||||
|
||||
namespace media {
|
||||
|
||||
class AcceleratedVideoDecoder;
|
||||
template <typename T>
|
||||
class ScopedID;
|
||||
class VaapiVideoDecoderDelegate;
|
||||
class VaapiPicture;
|
||||
|
||||
// Class to provide video decode acceleration for Intel systems with hardware
|
||||
// support for it, and on which libva is available.
|
||||
// Decoding tasks are performed in a separate decoding thread.
|
||||
//
|
||||
// Threading/life-cycle: this object is created & destroyed on the GPU
|
||||
// ChildThread. A few methods on it are called on the decoder thread which is
|
||||
// stopped during |this->Destroy()|, so any tasks posted to the decoder thread
|
||||
// can assume |*this| is still alive. See |weak_this_| below for more details.
|
||||
class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
|
||||
: public VideoDecodeAccelerator,
|
||||
public DecodeSurfaceHandler<VASurface>,
|
||||
public base::trace_event::MemoryDumpProvider {
|
||||
public:
|
||||
VaapiVideoDecodeAccelerator(
|
||||
const MakeGLContextCurrentCallback& make_context_current_cb,
|
||||
const BindGLImageCallback& bind_image_cb);
|
||||
|
||||
VaapiVideoDecodeAccelerator(const VaapiVideoDecodeAccelerator&) = delete;
|
||||
VaapiVideoDecodeAccelerator& operator=(const VaapiVideoDecodeAccelerator&) =
|
||||
delete;
|
||||
|
||||
~VaapiVideoDecodeAccelerator() override;
|
||||
|
||||
// VideoDecodeAccelerator implementation.
|
||||
bool Initialize(const Config& config, Client* client) override;
|
||||
void Decode(BitstreamBuffer bitstream_buffer) override;
|
||||
void Decode(scoped_refptr<DecoderBuffer> buffer,
|
||||
int32_t bitstream_id) override;
|
||||
void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
|
||||
#if BUILDFLAG(IS_OZONE)
|
||||
void ImportBufferForPicture(
|
||||
int32_t picture_buffer_id,
|
||||
VideoPixelFormat pixel_format,
|
||||
gfx::GpuMemoryBufferHandle gpu_memory_buffer_handle) override;
|
||||
#endif
|
||||
void ReusePictureBuffer(int32_t picture_buffer_id) override;
|
||||
void Flush() override;
|
||||
void Reset() override;
|
||||
void Destroy() override;
|
||||
bool TryToSetupDecodeOnSeparateSequence(
|
||||
const base::WeakPtr<Client>& decode_client,
|
||||
const scoped_refptr<base::SequencedTaskRunner>& decode_task_runner)
|
||||
override;
|
||||
|
||||
static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
|
||||
|
||||
static bool IsVppProfileSupported();
|
||||
|
||||
// DecodeSurfaceHandler implementation.
|
||||
scoped_refptr<VASurface> CreateSurface() override;
|
||||
void SurfaceReady(scoped_refptr<VASurface> va_surface,
|
||||
int32_t bitstream_id,
|
||||
const gfx::Rect& visible_rect,
|
||||
const VideoColorSpace& color_space) override;
|
||||
|
||||
// base::trace_event::MemoryDumpProvider implementation.
|
||||
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
|
||||
base::trace_event::ProcessMemoryDump* pmd) override;
|
||||
|
||||
private:
|
||||
friend class VaapiVideoDecodeAcceleratorTest;
|
||||
|
||||
// An input buffer with id provided by the client and awaiting consumption.
|
||||
class InputBuffer;
|
||||
// A self-cleaning VASurfaceID.
|
||||
using ScopedVASurfaceID = ScopedID<VASurfaceID>;
|
||||
|
||||
// Notify the client that an error has occurred and decoding cannot continue.
|
||||
void NotifyError(Error error);
|
||||
void NotifyStatus(VaapiStatus status);
|
||||
|
||||
// Queue a input buffer for decode.
|
||||
void QueueInputBuffer(scoped_refptr<DecoderBuffer> buffer,
|
||||
int32_t bitstream_id);
|
||||
|
||||
// Gets a new |current_input_buffer_| from |input_buffers_| and sets it up in
|
||||
// |decoder_|. This method will sleep if no |input_buffers_| are available.
|
||||
// Returns true if a new buffer has been set up, false if an early exit has
|
||||
// been requested (due to initiated reset/flush/destroy).
|
||||
bool GetCurrInputBuffer_Locked() EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Signals the client that |curr_input_buffer_| has been read and can be
|
||||
// returned. Will also release the mapping.
|
||||
void ReturnCurrInputBuffer_Locked() EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Waits for more surfaces to become available. Returns true once they do or
|
||||
// false if an early exit has been requested (due to an initiated
|
||||
// reset/flush/destroy).
|
||||
bool WaitForSurfaces_Locked() EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Continue decoding given input buffers and sleep waiting for input/output
|
||||
// as needed. Will exit if a new set of surfaces or reset/flush/destroy
|
||||
// is requested.
|
||||
void DecodeTask();
|
||||
|
||||
// Scheduled after receiving a flush request and executed after the current
|
||||
// decoding task finishes decoding pending inputs. Makes the decoder return
|
||||
// all remaining output pictures and puts it in an idle state, ready
|
||||
// to resume if needed and schedules a FinishFlush.
|
||||
void FlushTask();
|
||||
|
||||
// Scheduled by the FlushTask after decoder is flushed to put VAVDA into idle
|
||||
// state and notify the client that flushing has been finished.
|
||||
void FinishFlush();
|
||||
|
||||
// Scheduled after receiving a reset request and executed after the current
|
||||
// decoding task finishes decoding the current frame. Puts the decoder into
|
||||
// an idle state, ready to resume if needed, discarding decoded but not yet
|
||||
// outputted pictures (decoder keeps ownership of their associated picture
|
||||
// buffers). Schedules a FinishReset afterwards.
|
||||
void ResetTask();
|
||||
|
||||
// Scheduled by ResetTask after it's done putting VAVDA into an idle state.
|
||||
// Drops remaining input buffers and notifies the client that reset has been
|
||||
// finished.
|
||||
void FinishReset();
|
||||
|
||||
// Helper for Destroy(), doing all the actual work except for deleting self.
|
||||
void Cleanup();
|
||||
|
||||
// Get a usable framebuffer configuration for use in binding textures
|
||||
// or return false on failure.
|
||||
bool InitializeFBConfig();
|
||||
|
||||
// Callback to be executed once we have a |va_surface| to be output and an
|
||||
// available VaapiPicture in |available_picture_buffers_| for output. Puts
|
||||
// contents of |va_surface| into the latter, releases the surface and passes
|
||||
// the resulting picture to |client_| along with |visible_rect|.
|
||||
void OutputPicture(scoped_refptr<VASurface> va_surface,
|
||||
int32_t input_id,
|
||||
gfx::Rect visible_rect,
|
||||
const VideoColorSpace& picture_color_space);
|
||||
|
||||
// Try to OutputPicture() if we have both a ready surface and picture.
|
||||
void TryOutputPicture();
|
||||
|
||||
// Called when a VASurface is no longer in use by |decoder_| nor |client_|.
|
||||
// Returns it to |available_va_surfaces_|. |va_surface_id| is not used but it
|
||||
// must be here to bind this method as VASurface::ReleaseCB.
|
||||
void RecycleVASurface(std::unique_ptr<ScopedVASurfaceID> va_surface,
|
||||
VASurfaceID va_surface_id);
|
||||
|
||||
// Request a new set of |num_pics| PictureBuffers to be allocated by
|
||||
// |client_|. Up to |num_reference_frames| out of |num_pics_| might be needed
|
||||
// by |decoder_|.
|
||||
void InitiateSurfaceSetChange(size_t num_pics,
|
||||
gfx::Size size,
|
||||
size_t num_reference_frames,
|
||||
const gfx::Rect& visible_rect);
|
||||
|
||||
// Check if the surfaces have been released or post ourselves for later.
|
||||
void TryFinishSurfaceSetChange();
|
||||
|
||||
// Different modes of internal buffer allocations.
|
||||
enum class BufferAllocationMode {
|
||||
// Only using |client_|s provided PictureBuffers, none internal.
|
||||
kNone,
|
||||
|
||||
// Using a reduced amount of |client_|s provided PictureBuffers and
|
||||
// |decoder_|s GetNumReferenceFrames() internallly.
|
||||
kSuperReduced,
|
||||
|
||||
// Similar to kSuperReduced, but we have to increase slightly the amount of
|
||||
// PictureBuffers allocated for the |client_|.
|
||||
kReduced,
|
||||
|
||||
// VaapiVideoDecodeAccelerator can work with this mode on all platforms.
|
||||
// Using |client_|s provided PictureBuffers and as many internally
|
||||
// allocated.
|
||||
kNormal,
|
||||
|
||||
// Wrap VA-API driver for VDPAU backend on NVidia
|
||||
kWrapVdpau,
|
||||
};
|
||||
|
||||
// Decides the concrete buffer allocation mode, depending on the hardware
|
||||
// platform and other parameters.
|
||||
BufferAllocationMode DecideBufferAllocationMode();
|
||||
bool IsBufferAllocationModeReducedOrSuperReduced() const;
|
||||
|
||||
// VAVDA state.
|
||||
enum State {
|
||||
// Initialize() not called yet or failed.
|
||||
kUninitialized,
|
||||
// DecodeTask running.
|
||||
kDecoding,
|
||||
// Resetting, waiting for decoder to finish current task and cleanup.
|
||||
kResetting,
|
||||
// Idle, decoder in state ready to start/resume decoding.
|
||||
kIdle,
|
||||
// Destroying, waiting for the decoder to finish current task.
|
||||
kDestroying,
|
||||
};
|
||||
|
||||
base::Lock lock_;
|
||||
State state_ GUARDED_BY(lock_);
|
||||
// Only used on |task_runner_|.
|
||||
Config::OutputMode output_mode_;
|
||||
|
||||
// Queue of available InputBuffers.
|
||||
base::queue<std::unique_ptr<InputBuffer>> input_buffers_ GUARDED_BY(lock_);
|
||||
// Signalled when input buffers are queued onto |input_buffers_| queue.
|
||||
base::ConditionVariable input_ready_;
|
||||
|
||||
// Current input buffer at decoder. Only used on |decoder_thread_task_runner_|
|
||||
std::unique_ptr<InputBuffer> curr_input_buffer_;
|
||||
|
||||
// Only used on |task_runner_|.
|
||||
std::unique_ptr<VaapiPictureFactory> vaapi_picture_factory_;
|
||||
|
||||
// The following variables are constructed/initialized in Initialize() when
|
||||
// the codec information is received. |vaapi_wrapper_| is thread safe.
|
||||
scoped_refptr<VaapiWrapper> vaapi_wrapper_;
|
||||
// Only used on |decoder_thread_task_runner_|.
|
||||
std::unique_ptr<AcceleratedVideoDecoder> decoder_;
|
||||
// TODO(crbug.com/1022246): Instead of having the raw pointer here, getting
|
||||
// the pointer from AcceleratedVideoDecoder.
|
||||
raw_ptr<VaapiVideoDecoderDelegate> decoder_delegate_ = nullptr;
|
||||
|
||||
// Filled in during Initialize().
|
||||
BufferAllocationMode buffer_allocation_mode_;
|
||||
|
||||
// VaapiWrapper for VPP (Video Post Processing). This is used for copying
|
||||
// from a decoded surface to a surface bound to client's PictureBuffer.
|
||||
scoped_refptr<VaapiWrapper> vpp_vaapi_wrapper_;
|
||||
|
||||
// All allocated VaapiPictures, regardless of their current state. Pictures
|
||||
// are allocated at AssignPictureBuffers() and are kept until dtor or
|
||||
// TryFinishSurfaceSetChange(). Comes after |vaapi_wrapper_| to ensure all
|
||||
// pictures are destroyed before this is destroyed.
|
||||
base::small_map<std::map<int32_t, std::unique_ptr<VaapiPicture>>> pictures_
|
||||
GUARDED_BY(lock_);
|
||||
// List of PictureBuffer ids available to be sent to |client_| via
|
||||
// OutputPicture() (|client_| returns them via ReusePictureBuffer()).
|
||||
std::list<int32_t> available_picture_buffers_ GUARDED_BY(lock_);
|
||||
|
||||
// VASurfaces available and that can be passed to |decoder_| for its use upon
|
||||
// CreateSurface() request (and then returned via RecycleVASurface()).
|
||||
std::list<std::unique_ptr<ScopedVASurfaceID>> available_va_surfaces_
|
||||
GUARDED_BY(lock_);
|
||||
// Signalled when output surfaces are queued into |available_va_surfaces_|.
|
||||
base::ConditionVariable surfaces_available_;
|
||||
// VASurfaceIDs format, filled in when created.
|
||||
unsigned int va_surface_format_;
|
||||
|
||||
// Pending output requests from the decoder. When it indicates that we should
|
||||
// output a surface and we have an available Picture (i.e. texture) ready
|
||||
// to use, we'll execute the callback passing the Picture. The callback
|
||||
// will put the contents of the surface into the picture and return it to
|
||||
// the client, releasing the surface as well.
|
||||
// If we don't have any available |pictures_| at the time when the decoder
|
||||
// requests output, we'll store the request in this queue for later and run it
|
||||
// once the client gives us more textures via ReusePictureBuffer().
|
||||
// Only used on |task_runner_|.
|
||||
base::queue<base::OnceClosure> pending_output_cbs_;
|
||||
|
||||
// WeakPtr<> pointing to |this| for use in posting tasks from the decoder
|
||||
// thread back to the ChildThread. Because the decoder thread is a member of
|
||||
// this class, any task running on the decoder thread is guaranteed that this
|
||||
// object is still alive. As a result, tasks posted from ChildThread to
|
||||
// decoder thread should use base::Unretained(this), and tasks posted from the
|
||||
// decoder thread to the ChildThread should use |weak_this_|.
|
||||
base::WeakPtr<VaapiVideoDecodeAccelerator> weak_this_;
|
||||
|
||||
// Callback used to recycle VASurfaces. Only used on |task_runner_|.
|
||||
base::RepeatingCallback<void(std::unique_ptr<ScopedVASurfaceID>, VASurfaceID)>
|
||||
va_surface_recycle_cb_;
|
||||
|
||||
// To expose client callbacks from VideoDecodeAccelerator. Used only on
|
||||
// |task_runner_|.
|
||||
std::unique_ptr<base::WeakPtrFactory<Client>> client_ptr_factory_;
|
||||
base::WeakPtr<Client> client_;
|
||||
|
||||
// ChildThread's task runner.
|
||||
const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
|
||||
|
||||
base::Thread decoder_thread_;
|
||||
// Use this to post tasks to |decoder_thread_| instead of
|
||||
// |decoder_thread_.task_runner()| because the latter will be NULL once
|
||||
// |decoder_thread_.Stop()| returns.
|
||||
scoped_refptr<base::SingleThreadTaskRunner> decoder_thread_task_runner_;
|
||||
|
||||
// Whether we are waiting for any |pending_output_cbs_| to be run before
|
||||
// NotifyingFlushDone. Only used on |task_runner_|.
|
||||
bool finish_flush_pending_;
|
||||
|
||||
// Decoder requested a new surface set and we are waiting for all the surfaces
|
||||
// to be returned before we can free them. Only used on |task_runner_|.
|
||||
bool awaiting_va_surfaces_recycle_;
|
||||
|
||||
// Last requested number/resolution/visible rectangle of output
|
||||
// PictureBuffers.
|
||||
size_t requested_num_pics_;
|
||||
gfx::Size requested_pic_size_;
|
||||
gfx::Rect requested_visible_rect_;
|
||||
// Potential extra PictureBuffers to request, used only on
|
||||
// BufferAllocationMode::kNone, see DecideBufferAllocationMode().
|
||||
size_t num_extra_pics_ = 0;
|
||||
|
||||
// Max number of reference frames needed by |decoder_|. Only used on
|
||||
// |task_runner_| and when in BufferAllocationMode::kNone.
|
||||
size_t requested_num_reference_frames_;
|
||||
size_t previously_requested_num_reference_frames_;
|
||||
|
||||
// The video stream's profile.
|
||||
VideoCodecProfile profile_;
|
||||
|
||||
// Callback to make GL context current.
|
||||
MakeGLContextCurrentCallback make_context_current_cb_;
|
||||
|
||||
// Callback to bind a GLImage to a given texture.
|
||||
BindGLImageCallback bind_image_cb_;
|
||||
|
||||
// The WeakPtrFactory for |weak_this_|.
|
||||
base::WeakPtrFactory<VaapiVideoDecodeAccelerator> weak_this_factory_;
|
||||
};
|
||||
|
||||
} // namespace media
|
||||
|
||||
#endif // MEDIA_GPU_VAAPI_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
|
|
@ -465,7 +465,7 @@ class MEDIA_GPU_EXPORT VaapiWrapper
|
|||
struct VABufferDescriptor {
|
||||
VABufferType type;
|
||||
size_t size;
|
||||
raw_ptr<const void> data;
|
||||
raw_ptr<const void, DanglingUntriaged> data;
|
||||
};
|
||||
[[nodiscard]] bool SubmitBuffers(
|
||||
const std::vector<VABufferDescriptor>& va_buffers);
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
#include "net/filter/filter_source_stream.h"
|
||||
#include "net/filter/gzip_source_stream.h"
|
||||
#include "net/filter/source_stream.h"
|
||||
#include "net/filter/zstd_source_stream.h"
|
||||
#include "net/first_party_sets/first_party_set_metadata.h"
|
||||
#include "net/first_party_sets/same_party_context.h"
|
||||
#include "net/http/http_content_disposition.h"
|
||||
|
@ -1259,6 +1260,7 @@ std::unique_ptr<SourceStream> URLRequestHttpJob::SetUpSourceStream() {
|
|||
case SourceStream::TYPE_BROTLI:
|
||||
case SourceStream::TYPE_DEFLATE:
|
||||
case SourceStream::TYPE_GZIP:
|
||||
case SourceStream::TYPE_ZSTD:
|
||||
if (request_->accepted_stream_types() &&
|
||||
!request_->accepted_stream_types()->contains(source_type)) {
|
||||
// If the source type is disabled, we treat it
|
||||
|
@ -1288,6 +1290,9 @@ std::unique_ptr<SourceStream> URLRequestHttpJob::SetUpSourceStream() {
|
|||
case SourceStream::TYPE_DEFLATE:
|
||||
downstream = GzipSourceStream::Create(std::move(upstream), type);
|
||||
break;
|
||||
case SourceStream::TYPE_ZSTD:
|
||||
downstream = CreateZstdSourceStream(std::move(upstream));
|
||||
break;
|
||||
case SourceStream::TYPE_NONE:
|
||||
case SourceStream::TYPE_UNKNOWN:
|
||||
NOTREACHED();
|
||||
|
@ -1647,17 +1652,11 @@ void URLRequestHttpJob::RecordCompletionHistograms(CompletionCause reason) {
|
|||
UMA_HISTOGRAM_CUSTOM_COUNTS("Net.HttpJob.PrefilterBytesRead.Cache",
|
||||
prefilter_bytes_read(), 1, 50000000, 50);
|
||||
|
||||
if (response_info_->unused_since_prefetch)
|
||||
UMA_HISTOGRAM_COUNTS_1M("Net.Prefetch.HitBytes",
|
||||
prefilter_bytes_read());
|
||||
} else {
|
||||
UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time);
|
||||
UMA_HISTOGRAM_CUSTOM_COUNTS("Net.HttpJob.PrefilterBytesRead.Net",
|
||||
prefilter_bytes_read(), 1, 50000000, 50);
|
||||
|
||||
if (request_info_.load_flags & LOAD_PREFETCH) {
|
||||
UMA_HISTOGRAM_COUNTS_1M("Net.Prefetch.PrefilterBytesReadFromNetwork",
|
||||
prefilter_bytes_read());
|
||||
}
|
||||
if (is_https_google && used_quic) {
|
||||
UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpJob.TotalTimeNotCached.Secure.Quic",
|
||||
|
|
2
src/third_party/zlib/BUILD.gn
vendored
2
src/third_party/zlib/BUILD.gn
vendored
|
@ -359,7 +359,7 @@ component("zlib") {
|
|||
if (is_android) {
|
||||
import("//build/config/android/config.gni")
|
||||
if (defined(android_ndk_root) && android_ndk_root != "") {
|
||||
deps += [ "//third_party/android_ndk:cpu_features" ]
|
||||
deps += [ "//third_party/cpu_features:ndk_compat" ]
|
||||
} else {
|
||||
assert(false, "CPU detection requires the Android NDK")
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue