Delete media directory

This commit is contained in:
Alexander David Frick 2022-07-09 18:37:22 -05:00 committed by GitHub
parent e17d0b25b1
commit b45b1bb78f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 0 additions and 8171 deletions

File diff suppressed because it is too large Load diff

View file

@ -1,247 +0,0 @@
// Copyright 2022 The Chromium Authors and Alex313031. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/gpu_video_decode_accelerator_factory.h"
#include <memory>
#include "base/memory/ptr_util.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "gpu/config/gpu_preferences.h"
#include "media/base/media_switches.h"
#include "media/gpu/buildflags.h"
#include "media/gpu/gpu_video_accelerator_util.h"
#include "media/gpu/macros.h"
#include "media/gpu/media_gpu_export.h"
#include "media/media_buildflags.h"
#if BUILDFLAG(IS_WIN)
#include "base/win/windows_version.h"
#include "media/gpu/windows/dxva_video_decode_accelerator_win.h"
#endif
#if BUILDFLAG(IS_MAC)
#include "media/gpu/mac/vt_video_decode_accelerator_mac.h"
#endif
#if BUILDFLAG(USE_VAAPI)
#include "media/gpu/vaapi/vaapi_video_decode_accelerator.h"
#include "ui/gl/gl_implementation.h"
#elif BUILDFLAG(USE_V4L2_CODEC)
#include "media/gpu/v4l2/v4l2_device.h"
#include "media/gpu/v4l2/v4l2_slice_video_decode_accelerator.h"
#include "media/gpu/v4l2/v4l2_video_decode_accelerator.h"
#include "ui/gl/gl_surface_egl.h"
#endif
namespace media {
namespace {
gpu::VideoDecodeAcceleratorCapabilities GetDecoderCapabilitiesInternal(
const gpu::GpuPreferences& gpu_preferences,
const gpu::GpuDriverBugWorkarounds& workarounds) {
if (gpu_preferences.disable_accelerated_video_decode)
return gpu::VideoDecodeAcceleratorCapabilities();
// Query VDAs for their capabilities and construct a set of supported
// profiles for current platform. This must be done in the same order as in
// CreateVDA(), as we currently preserve additional capabilities (such as
// resolutions supported) only for the first VDA supporting the given codec
// profile (instead of calculating a superset).
// TODO(posciak,henryhsu): improve this so that we choose a superset of
// resolutions and other supported profile parameters.
VideoDecodeAccelerator::Capabilities capabilities;
#if BUILDFLAG(IS_WIN)
capabilities.supported_profiles =
DXVAVideoDecodeAccelerator::GetSupportedProfiles(gpu_preferences,
workarounds);
#elif BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
#if BUILDFLAG(USE_VAAPI)
capabilities.supported_profiles =
VaapiVideoDecodeAccelerator::GetSupportedProfiles();
#elif BUILDFLAG(USE_V4L2_CODEC)
GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
V4L2VideoDecodeAccelerator::GetSupportedProfiles(),
&capabilities.supported_profiles);
GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles(),
&capabilities.supported_profiles);
#endif
#elif BUILDFLAG(IS_MAC)
capabilities.supported_profiles =
VTVideoDecodeAccelerator::GetSupportedProfiles(workarounds);
#endif
return GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeCapabilities(
capabilities);
}
} // namespace
// static
MEDIA_GPU_EXPORT std::unique_ptr<GpuVideoDecodeAcceleratorFactory>
GpuVideoDecodeAcceleratorFactory::Create(
const GpuVideoDecodeGLClient& gl_client) {
return base::WrapUnique(new GpuVideoDecodeAcceleratorFactory(gl_client));
}
// static
MEDIA_GPU_EXPORT gpu::VideoDecodeAcceleratorCapabilities
GpuVideoDecodeAcceleratorFactory::GetDecoderCapabilities(
const gpu::GpuPreferences& gpu_preferences,
const gpu::GpuDriverBugWorkarounds& workarounds) {
// Cache the capabilities so that they will not be computed more than once per
// GPU process. It is assumed that |gpu_preferences| and |workarounds| do not
// change between calls.
// TODO(sandersd): Move cache to GpuMojoMediaClient once
// |video_decode_accelerator_capabilities| is removed from GPUInfo.
static gpu::VideoDecodeAcceleratorCapabilities capabilities =
GetDecoderCapabilitiesInternal(gpu_preferences, workarounds);
#if BUILDFLAG(USE_V4L2_CODEC)
// V4L2-only: the decoder devices may not be visible at the time the GPU
// process is starting. If the capabilities vector is empty, try to query the
// devices again in the hope that they will have appeared in the meantime.
// TODO(crbug.com/948147): trigger query when an device add/remove event
// (e.g. via udev) has happened instead.
if (capabilities.supported_profiles.empty()) {
VLOGF(1) << "Capabilities empty, querying again...";
capabilities = GetDecoderCapabilitiesInternal(gpu_preferences, workarounds);
}
#endif
return capabilities;
}
MEDIA_GPU_EXPORT std::unique_ptr<VideoDecodeAccelerator>
GpuVideoDecodeAcceleratorFactory::CreateVDA(
VideoDecodeAccelerator::Client* client,
const VideoDecodeAccelerator::Config& config,
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) {
DCHECK(thread_checker_.CalledOnValidThread());
if (gpu_preferences.disable_accelerated_video_decode)
return nullptr;
// Array of Create..VDA() function pointers, potentially usable on current
// platform. This list is ordered by priority, from most to least preferred,
// if applicable. This list must be in the same order as the querying order
// in GetDecoderCapabilities() above.
using CreateVDAFp = std::unique_ptr<VideoDecodeAccelerator> (
GpuVideoDecodeAcceleratorFactory::*)(const gpu::GpuDriverBugWorkarounds&,
const gpu::GpuPreferences&,
MediaLog* media_log) const;
const CreateVDAFp create_vda_fps[] = {
#if BUILDFLAG(IS_WIN)
&GpuVideoDecodeAcceleratorFactory::CreateDXVAVDA,
#endif
// Usually only one of USE_VAAPI or USE_V4L2_CODEC is defined on ChromeOS,
// except for Chromeboxes with companion video acceleration chips, which have
// both. In those cases prefer the VA creation function.
#if BUILDFLAG(USE_VAAPI)
&GpuVideoDecodeAcceleratorFactory::CreateVaapiVDA,
#elif BUILDFLAG(USE_V4L2_CODEC)
&GpuVideoDecodeAcceleratorFactory::CreateV4L2VDA,
&GpuVideoDecodeAcceleratorFactory::CreateV4L2SliceVDA,
#endif
#if BUILDFLAG(IS_MAC)
&GpuVideoDecodeAcceleratorFactory::CreateVTVDA,
#endif
};
std::unique_ptr<VideoDecodeAccelerator> vda;
for (const auto& create_vda_function : create_vda_fps) {
vda = (this->*create_vda_function)(workarounds, gpu_preferences, media_log);
if (vda && vda->Initialize(config, client))
return vda;
else
LOG(ERROR) << "Initialization of one or more VDAs failed.";
}
return nullptr;
}
#if BUILDFLAG(IS_WIN)
std::unique_ptr<VideoDecodeAccelerator>
GpuVideoDecodeAcceleratorFactory::CreateDXVAVDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const {
std::unique_ptr<VideoDecodeAccelerator> decoder;
DVLOG(0) << "Initializing DXVA HW decoder for windows.";
decoder.reset(new DXVAVideoDecodeAccelerator(
gl_client_.get_context, gl_client_.make_context_current,
gl_client_.bind_image, workarounds, gpu_preferences, media_log));
return decoder;
}
#endif
#if BUILDFLAG(USE_VAAPI)
std::unique_ptr<VideoDecodeAccelerator>
GpuVideoDecodeAcceleratorFactory::CreateVaapiVDA(
const gpu::GpuDriverBugWorkarounds& /*workarounds*/,
const gpu::GpuPreferences& /*gpu_preferences*/,
MediaLog* /*media_log*/) const {
std::unique_ptr<VideoDecodeAccelerator> decoder;
decoder.reset(new VaapiVideoDecodeAccelerator(gl_client_.make_context_current,
gl_client_.bind_image));
return decoder;
}
#elif BUILDFLAG(USE_V4L2_CODEC)
std::unique_ptr<VideoDecodeAccelerator>
GpuVideoDecodeAcceleratorFactory::CreateV4L2VDA(
const gpu::GpuDriverBugWorkarounds& /*workarounds*/,
const gpu::GpuPreferences& /*gpu_preferences*/,
MediaLog* /*media_log*/) const {
std::unique_ptr<VideoDecodeAccelerator> decoder;
scoped_refptr<V4L2Device> device = V4L2Device::Create();
if (device.get()) {
decoder.reset(new V4L2VideoDecodeAccelerator(
gl::GLSurfaceEGL::GetGLDisplayEGL()->GetDisplay(),
gl_client_.get_context, gl_client_.make_context_current, device));
}
return decoder;
}
std::unique_ptr<VideoDecodeAccelerator>
GpuVideoDecodeAcceleratorFactory::CreateV4L2SliceVDA(
const gpu::GpuDriverBugWorkarounds& /*workarounds*/,
const gpu::GpuPreferences& /*gpu_preferences*/,
MediaLog* /*media_log*/) const {
std::unique_ptr<VideoDecodeAccelerator> decoder;
scoped_refptr<V4L2Device> device = V4L2Device::Create();
if (device.get()) {
decoder.reset(new V4L2SliceVideoDecodeAccelerator(
device, gl::GLSurfaceEGL::GetGLDisplayEGL()->GetDisplay(),
gl_client_.bind_image, gl_client_.make_context_current));
}
return decoder;
}
#endif
#if BUILDFLAG(IS_MAC)
std::unique_ptr<VideoDecodeAccelerator>
GpuVideoDecodeAcceleratorFactory::CreateVTVDA(
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuPreferences& gpu_preferences,
MediaLog* media_log) const {
LOG(WARNING) << "Initializing VAAPI VDA.";
std::unique_ptr<VideoDecodeAccelerator> decoder;
decoder.reset(
new VTVideoDecodeAccelerator(gl_client_, workarounds, media_log));
return decoder;
}
#endif
GpuVideoDecodeAcceleratorFactory::GpuVideoDecodeAcceleratorFactory(
const GpuVideoDecodeGLClient& gl_client)
: gl_client_(gl_client) {}
GpuVideoDecodeAcceleratorFactory::~GpuVideoDecodeAcceleratorFactory() = default;
} // namespace media

View file

@ -1,624 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/ipc/service/gpu_video_decode_accelerator.h"
#include <memory>
#include <vector>
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/waitable_event.h"
#include "base/task/bind_post_task.h"
#include "base/task/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/command_buffer.h"
#include "gpu/config/gpu_preferences.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "ipc/ipc_message_macros.h"
#include "ipc/ipc_message_utils.h"
#include "ipc/message_filter.h"
#include "media/base/limits.h"
#include "media/gpu/gpu_video_accelerator_util.h"
#include "media/gpu/gpu_video_decode_accelerator_factory.h"
#include "mojo/public/cpp/bindings/associated_receiver.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image.h"
namespace media {
namespace {
static gl::GLContext* GetGLContext(
const base::WeakPtr<gpu::CommandBufferStub>& stub) {
if (!stub) {
DLOG(ERROR) << "Stub is gone; no GLContext.";
return nullptr;
}
return stub->decoder_context()->GetGLContext();
}
static bool MakeDecoderContextCurrent(
const base::WeakPtr<gpu::CommandBufferStub>& stub) {
if (!stub) {
DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
return false;
}
if (!stub->decoder_context()->MakeCurrent()) {
DLOG(ERROR) << "Failed to MakeCurrent()";
return false;
}
return true;
}
static bool BindImage(const base::WeakPtr<gpu::CommandBufferStub>& stub,
uint32_t client_texture_id,
uint32_t texture_target,
const scoped_refptr<gl::GLImage>& image,
bool can_bind_to_sampler) {
if (!stub) {
DLOG(ERROR) << "Stub is gone; won't BindImage().";
return false;
}
gpu::DecoderContext* command_decoder = stub->decoder_context();
command_decoder->BindImage(client_texture_id, texture_target, image.get(),
can_bind_to_sampler);
return true;
}
static gpu::gles2::ContextGroup* GetContextGroup(
const base::WeakPtr<gpu::CommandBufferStub>& stub) {
if (!stub) {
DLOG(ERROR) << "Stub is gone; no DecoderContext.";
return nullptr;
}
return stub->decoder_context()->GetContextGroup();
}
static std::unique_ptr<gpu::gles2::AbstractTexture> CreateAbstractTexture(
const base::WeakPtr<gpu::CommandBufferStub>& stub,
GLenum target,
GLenum internal_format,
GLsizei width,
GLsizei height,
GLsizei depth,
GLint border,
GLenum format,
GLenum type) {
if (!stub) {
DLOG(ERROR) << "Stub is gone; no DecoderContext.";
return nullptr;
}
return stub->decoder_context()->CreateAbstractTexture(
target, internal_format, width, height, depth, border, format, type);
}
} // anonymous namespace
// DebugAutoLock works like AutoLock but only acquires the lock when
// DCHECK is on.
#if DCHECK_IS_ON()
typedef base::AutoLock DebugAutoLock;
#else
class DebugAutoLock {
public:
explicit DebugAutoLock(base::Lock&) {}
};
#endif
// Receives incoming messages for the decoder. Operates exclusively on the IO
// thread, since sometimes we want to do decodes directly from there.
class GpuVideoDecodeAccelerator::MessageFilter
: public mojom::GpuAcceleratedVideoDecoder {
public:
MessageFilter(GpuVideoDecodeAccelerator* owner,
scoped_refptr<base::SequencedTaskRunner> owner_task_runner,
bool decode_on_io)
: owner_(owner),
owner_task_runner_(std::move(owner_task_runner)),
decode_on_io_(decode_on_io) {}
~MessageFilter() override = default;
// Called from the main thread. Posts to `io_task_runner` to do the binding
// and waits for completion before returning. This ensures the decoder's
// endpoint is established before the synchronous request to establish it is
// acknowledged to the client.
bool Bind(mojo::PendingAssociatedReceiver<mojom::GpuAcceleratedVideoDecoder>
receiver,
const scoped_refptr<base::SequencedTaskRunner>& io_task_runner) {
base::WaitableEvent bound_event;
if (!io_task_runner->PostTask(
FROM_HERE, base::BindOnce(&MessageFilter::BindOnIoThread,
base::Unretained(this),
std::move(receiver), &bound_event))) {
return false;
}
bound_event.Wait();
return true;
}
// Must be called on the IO thread. Posts back to the owner's task runner to
// destroy it.
void RequestShutdown() {
if (!owner_)
return;
// Must be reset here on the IO thread before `this` is destroyed.
receiver_.reset();
GpuVideoDecodeAccelerator* owner = owner_;
owner_ = nullptr;
// Invalidate any IO thread WeakPtrs which may be held by the
// VideoDecodeAccelerator, and post to delete our owner which will in turn
// delete us. Note that it is unsafe to access any members of `this` once
// the task below is posted.
owner->weak_factory_for_io_.InvalidateWeakPtrs();
owner_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&GpuVideoDecodeAccelerator::DeleteSelfNow,
base::Unretained(owner)));
}
// mojom::GpuAcceleratedVideoDecoder:
void Decode(BitstreamBuffer buffer) override;
void AssignPictureBuffers(
std::vector<mojom::PictureBufferAssignmentPtr> assignments) override;
void ReusePictureBuffer(int32_t picture_buffer_id) override;
void Flush(FlushCallback callback) override;
void Reset(ResetCallback callback) override;
void SetOverlayInfo(const OverlayInfo& overlay_info) override;
private:
void BindOnIoThread(mojo::PendingAssociatedReceiver<
mojom::GpuAcceleratedVideoDecoder> receiver,
base::WaitableEvent* bound_event) {
receiver_.Bind(std::move(receiver));
receiver_.set_disconnect_handler(
base::BindOnce(&MessageFilter::OnDisconnect, base::Unretained(this)));
bound_event->Signal();
}
void OnDisconnect() {
if (!owner_)
return;
owner_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&GpuVideoDecodeAccelerator::OnDestroy,
base::Unretained(owner_)));
}
raw_ptr<GpuVideoDecodeAccelerator> owner_;
const scoped_refptr<base::SequencedTaskRunner> owner_task_runner_;
const bool decode_on_io_;
mojo::AssociatedReceiver<mojom::GpuAcceleratedVideoDecoder> receiver_{this};
};
void GpuVideoDecodeAccelerator::MessageFilter::Decode(BitstreamBuffer buffer) {
if (!owner_)
return;
if (decode_on_io_) {
owner_->OnDecode(std::move(buffer));
} else {
owner_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&GpuVideoDecodeAccelerator::OnDecode,
base::Unretained(owner_), std::move(buffer)));
}
}
void GpuVideoDecodeAccelerator::MessageFilter::AssignPictureBuffers(
std::vector<mojom::PictureBufferAssignmentPtr> assignments) {
if (!owner_)
return;
owner_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&GpuVideoDecodeAccelerator::OnAssignPictureBuffers,
base::Unretained(owner_), std::move(assignments)));
}
void GpuVideoDecodeAccelerator::MessageFilter::ReusePictureBuffer(
int32_t picture_buffer_id) {
if (!owner_)
return;
owner_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&GpuVideoDecodeAccelerator::OnReusePictureBuffer,
base::Unretained(owner_), picture_buffer_id));
}
void GpuVideoDecodeAccelerator::MessageFilter::Flush(FlushCallback callback) {
if (!owner_)
return;
owner_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&GpuVideoDecodeAccelerator::OnFlush,
base::Unretained(owner_), std::move(callback)));
}
void GpuVideoDecodeAccelerator::MessageFilter::Reset(ResetCallback callback) {
if (!owner_)
return;
owner_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&GpuVideoDecodeAccelerator::OnReset,
base::Unretained(owner_), std::move(callback)));
}
void GpuVideoDecodeAccelerator::MessageFilter::SetOverlayInfo(
const OverlayInfo& overlay_info) {
if (!owner_)
return;
owner_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&GpuVideoDecodeAccelerator::OnSetOverlayInfo,
base::Unretained(owner_), overlay_info));
}
GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
gpu::CommandBufferStub* stub,
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
const AndroidOverlayMojoFactoryCB& overlay_factory_cb)
: stub_(stub),
texture_target_(0),
pixel_format_(PIXEL_FORMAT_UNKNOWN),
textures_per_buffer_(0),
child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
io_task_runner_(io_task_runner),
overlay_factory_cb_(overlay_factory_cb) {
DCHECK(stub_);
stub_->AddDestructionObserver(this);
gl_client_.get_context =
base::BindRepeating(&GetGLContext, stub_->AsWeakPtr());
gl_client_.make_context_current =
base::BindRepeating(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
gl_client_.bind_image = base::BindRepeating(&BindImage, stub_->AsWeakPtr());
gl_client_.get_context_group =
base::BindRepeating(&GetContextGroup, stub_->AsWeakPtr());
gl_client_.create_abstract_texture =
base::BindRepeating(&CreateAbstractTexture, stub_->AsWeakPtr());
gl_client_.is_passthrough =
stub_->decoder_context()->GetFeatureInfo()->is_passthrough_cmd_decoder();
gl_client_.supports_arb_texture_rectangle = stub_->decoder_context()
->GetFeatureInfo()
->feature_flags()
.arb_texture_rectangle;
}
GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
// This class can only be self-deleted from OnWillDestroyStub(), which means
// the VDA has already been destroyed in there.
DCHECK(!video_decode_accelerator_);
}
void GpuVideoDecodeAccelerator::DeleteSelfNow() {
delete this;
}
// static
gpu::VideoDecodeAcceleratorCapabilities
GpuVideoDecodeAccelerator::GetCapabilities(
const gpu::GpuPreferences& gpu_preferences,
const gpu::GpuDriverBugWorkarounds& workarounds) {
return GpuVideoDecodeAcceleratorFactory::GetDecoderCapabilities(
gpu_preferences, workarounds);
}
void GpuVideoDecodeAccelerator::NotifyInitializationComplete(
DecoderStatus status) {
decoder_client_->OnInitializationComplete(status.is_ok());
}
void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
uint32_t requested_num_of_buffers,
VideoPixelFormat format,
uint32_t textures_per_buffer,
const gfx::Size& dimensions,
uint32_t texture_target) {
if (dimensions.width() > limits::kMaxDimension ||
dimensions.height() > limits::kMaxDimension ||
dimensions.GetArea() > limits::kMaxCanvas) {
NotifyError(VideoDecodeAccelerator::PLATFORM_FAILURE);
return;
}
texture_dimensions_ = dimensions;
textures_per_buffer_ = textures_per_buffer;
texture_target_ = texture_target;
pixel_format_ = format;
decoder_client_->OnProvidePictureBuffers(requested_num_of_buffers, format,
textures_per_buffer, dimensions,
texture_target);
}
void GpuVideoDecodeAccelerator::DismissPictureBuffer(
int32_t picture_buffer_id) {
// Notify client that picture buffer is now unused.
decoder_client_->OnDismissPictureBuffer(picture_buffer_id);
DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
uncleared_textures_.erase(picture_buffer_id);
}
void GpuVideoDecodeAccelerator::PictureReady(const Picture& picture) {
// VDA may call PictureReady on IO thread. SetTextureCleared should run on
// the child thread. VDA is responsible to call PictureReady on the child
// thread when a picture buffer is delivered the first time.
if (child_task_runner_->BelongsToCurrentThread()) {
SetTextureCleared(picture);
} else {
DCHECK(io_task_runner_->BelongsToCurrentThread());
DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
DCHECK_EQ(0u, uncleared_textures_.count(picture.picture_buffer_id()));
}
auto params = mojom::PictureReadyParams::New();
params->picture_buffer_id = picture.picture_buffer_id();
params->bitstream_buffer_id = picture.bitstream_buffer_id();
params->visible_rect = picture.visible_rect();
params->color_space = picture.color_space();
params->allow_overlay = picture.allow_overlay();
params->read_lock_fences_enabled = picture.read_lock_fences_enabled();
params->size_changed = picture.size_changed();
params->surface_texture = picture.texture_owner();
params->wants_promotion_hint = picture.wants_promotion_hint();
decoder_client_->OnPictureReady(std::move(params));
}
void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
int32_t bitstream_buffer_id) {
decoder_client_->OnBitstreamBufferProcessed(bitstream_buffer_id);
}
void GpuVideoDecodeAccelerator::NotifyFlushDone() {
DCHECK(!pending_flushes_.empty());
std::move(pending_flushes_.front()).Run();
pending_flushes_.pop_front();
}
void GpuVideoDecodeAccelerator::NotifyResetDone() {
DCHECK(!pending_resets_.empty());
std::move(pending_resets_.front()).Run();
pending_resets_.pop_front();
}
void GpuVideoDecodeAccelerator::NotifyError(
VideoDecodeAccelerator::Error error) {
decoder_client_->OnError(error);
}
void GpuVideoDecodeAccelerator::OnWillDestroyStub(bool have_context) {
// The stub is going away, so we have to stop and destroy VDA here, before
// returning, because the VDA may need the GL context to run and/or do its
// cleanup. We cannot destroy the VDA before the IO thread message filter is
// removed however, since we cannot service incoming messages with VDA gone.
// We cannot simply check for existence of VDA on IO thread though, because
// we don't want to synchronize the IO thread with the ChildThread.
// So we have to wait for the RemoveFilter callback here instead and remove
// the VDA after it arrives and before returning.
stub_->RemoveDestructionObserver(this);
if (filter_) {
io_task_runner_->PostTask(FROM_HERE,
base::BindOnce(&MessageFilter::RequestShutdown,
base::Unretained(filter_.get())));
}
video_decode_accelerator_.reset();
}
bool GpuVideoDecodeAccelerator::Initialize(
const VideoDecodeAccelerator::Config& config,
mojo::PendingAssociatedReceiver<mojom::GpuAcceleratedVideoDecoder> receiver,
mojo::PendingAssociatedRemote<mojom::GpuAcceleratedVideoDecoderClient>
client) {
DCHECK(!video_decode_accelerator_);
#if !BUILDFLAG(IS_WIN)
// Ensure we will be able to get a GL context at all before initializing
// non-Windows VDAs.
if (!gl_client_.make_context_current.Run())
return false;
#endif
std::unique_ptr<GpuVideoDecodeAcceleratorFactory> vda_factory =
GpuVideoDecodeAcceleratorFactory::Create(gl_client_);
if (!vda_factory) {
LOG(ERROR) << "Failed creating the VDA factory";
return false;
}
LOG(WARNING) << "Created the VDA factory";
const gpu::GpuDriverBugWorkarounds& gpu_workarounds =
stub_->channel()->gpu_channel_manager()->gpu_driver_bug_workarounds();
const gpu::GpuPreferences& gpu_preferences =
stub_->channel()->gpu_channel_manager()->gpu_preferences();
if (config.output_mode !=
VideoDecodeAccelerator::Config::OutputMode::ALLOCATE) {
DLOG(ERROR) << "Only ALLOCATE mode is supported";
return false;
}
video_decode_accelerator_ =
vda_factory->CreateVDA(this, config, gpu_workarounds, gpu_preferences);
if (!video_decode_accelerator_) {
LOG(ERROR) << "HW video decode not available for profile "
<< GetProfileName(config.profile)
<< (config.is_encrypted() ? " with encryption" : "");
return false;
}
LOG(WARNING) << "Created VDA";
decoder_client_.Bind(std::move(client), io_task_runner_);
// Attempt to set up performing decoding tasks on IO thread, if supported by
// the VDA.
bool decode_on_io =
video_decode_accelerator_->TryToSetupDecodeOnSeparateThread(
weak_factory_for_io_.GetWeakPtr(), io_task_runner_);
// Bind the receiver on the IO thread. We wait here for it to be bound
// before returning and signaling that the decoder has been created.
filter_ =
std::make_unique<MessageFilter>(this, stub_->task_runner(), decode_on_io);
return filter_->Bind(std::move(receiver), io_task_runner_);
}
// Runs on IO thread if VDA::TryToSetupDecodeOnSeparateThread() succeeded,
// otherwise on the main thread.
void GpuVideoDecodeAccelerator::OnDecode(BitstreamBuffer bitstream_buffer) {
DCHECK(video_decode_accelerator_);
video_decode_accelerator_->Decode(std::move(bitstream_buffer));
}
void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
std::vector<mojom::PictureBufferAssignmentPtr> assignments) {
gpu::DecoderContext* decoder_context = stub_->decoder_context();
gpu::gles2::TextureManager* texture_manager =
stub_->decoder_context()->GetContextGroup()->texture_manager();
std::vector<PictureBuffer> buffers;
std::vector<std::vector<scoped_refptr<gpu::gles2::TextureRef>>> textures;
for (const auto& assignment : assignments) {
if (assignment->buffer_id < 0) {
DLOG(ERROR) << "Buffer id " << assignment->buffer_id << " out of range";
NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
}
std::vector<scoped_refptr<gpu::gles2::TextureRef>> current_textures;
PictureBuffer::TextureIds buffer_texture_ids = assignment->texture_ids;
PictureBuffer::TextureIds service_ids;
if (buffer_texture_ids.size() != textures_per_buffer_) {
DLOG(ERROR) << "Requested " << textures_per_buffer_
<< " textures per picture buffer, got "
<< buffer_texture_ids.size();
NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
}
for (size_t j = 0; j < textures_per_buffer_; j++) {
gpu::TextureBase* texture_base =
decoder_context->GetTextureBase(buffer_texture_ids[j]);
if (!texture_base) {
DLOG(ERROR) << "Failed to find texture id " << buffer_texture_ids[j];
NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
}
if (texture_base->target() != texture_target_) {
DLOG(ERROR) << "Texture target mismatch for texture id "
<< buffer_texture_ids[j];
NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
}
if (texture_manager) {
gpu::gles2::TextureRef* texture_ref =
texture_manager->GetTexture(buffer_texture_ids[j]);
if (texture_ref) {
gpu::gles2::Texture* info = texture_ref->texture();
if (texture_target_ == GL_TEXTURE_EXTERNAL_OES ||
texture_target_ == GL_TEXTURE_RECTANGLE_ARB) {
// These textures have their dimensions defined by the underlying
// storage.
// Use |texture_dimensions_| for this size.
texture_manager->SetLevelInfo(
texture_ref, texture_target_, 0, GL_RGBA,
texture_dimensions_.width(), texture_dimensions_.height(), 1, 0,
GL_RGBA, GL_UNSIGNED_BYTE, gfx::Rect());
} else {
// For other targets, texture dimensions should already be defined.
GLsizei width = 0, height = 0;
info->GetLevelSize(texture_target_, 0, &width, &height, nullptr);
if (width != texture_dimensions_.width() ||
height != texture_dimensions_.height()) {
DLOG(ERROR) << "Size mismatch for texture id "
<< buffer_texture_ids[j];
NotifyError(VideoDecodeAccelerator::INVALID_ARGUMENT);
return;
}
// TODO(dshwang): after moving to D3D11, remove this.
// https://crbug.com/438691
GLenum format =
video_decode_accelerator_->GetSurfaceInternalFormat();
if (format != GL_RGBA) {
DCHECK(format == GL_BGRA_EXT);
texture_manager->SetLevelInfo(texture_ref, texture_target_, 0,
format, width, height, 1, 0, format,
GL_UNSIGNED_BYTE, gfx::Rect());
}
}
current_textures.push_back(texture_ref);
}
}
service_ids.push_back(texture_base->service_id());
}
textures.push_back(current_textures);
buffers.emplace_back(assignment->buffer_id, texture_dimensions_,
buffer_texture_ids, service_ids, texture_target_,
pixel_format_);
}
{
DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
for (uint32_t i = 0; i < assignments.size(); ++i)
uncleared_textures_[assignments[i]->buffer_id] = textures[i];
}
video_decode_accelerator_->AssignPictureBuffers(buffers);
}
void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
int32_t picture_buffer_id) {
DCHECK(video_decode_accelerator_);
video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id);
}
void GpuVideoDecodeAccelerator::OnFlush(base::OnceClosure callback) {
DCHECK(video_decode_accelerator_);
pending_flushes_.push_back(
base::BindPostTask(io_task_runner_, std::move(callback)));
video_decode_accelerator_->Flush();
}
void GpuVideoDecodeAccelerator::OnReset(base::OnceClosure callback) {
DCHECK(video_decode_accelerator_);
pending_resets_.push_back(
base::BindPostTask(io_task_runner_, std::move(callback)));
video_decode_accelerator_->Reset();
}
void GpuVideoDecodeAccelerator::OnSetOverlayInfo(
const OverlayInfo& overlay_info) {
DCHECK(video_decode_accelerator_);
video_decode_accelerator_->SetOverlayInfo(overlay_info);
}
void GpuVideoDecodeAccelerator::OnDestroy() {
DCHECK(video_decode_accelerator_);
OnWillDestroyStub(false);
}
void GpuVideoDecodeAccelerator::SetTextureCleared(const Picture& picture) {
DCHECK(child_task_runner_->BelongsToCurrentThread());
DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
auto it = uncleared_textures_.find(picture.picture_buffer_id());
if (it == uncleared_textures_.end())
return; // the texture has been cleared
for (auto texture_ref : it->second) {
GLenum target = texture_ref->texture()->target();
gpu::gles2::TextureManager* texture_manager =
stub_->decoder_context()->GetContextGroup()->texture_manager();
texture_manager->SetLevelCleared(texture_ref.get(), target, 0, true);
}
uncleared_textures_.erase(it);
}
} // namespace media

View file

@ -1,178 +0,0 @@
// Copyright 2022 The Chromium Authors and Alex313031. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/vaapi/vaapi_picture_factory.h"
#include "base/containers/contains.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
#include "media/video/picture.h"
#include "ui/gl/gl_bindings.h"
#if defined(USE_OZONE)
#include "media/gpu/vaapi/vaapi_picture_native_pixmap_ozone.h"
#endif // defined(USE_OZONE)
#if BUILDFLAG(USE_VAAPI_X11)
#include "media/gpu/vaapi/vaapi_picture_native_pixmap_angle.h"
#include "media/gpu/vaapi/vaapi_picture_tfp.h"
#endif // BUILDFLAG(USE_VAAPI_X11)
#if defined(USE_EGL)
#include "media/gpu/vaapi/vaapi_picture_native_pixmap_egl.h"
#endif
namespace media {
namespace {
template <typename PictureType>
std::unique_ptr<VaapiPicture> CreateVaapiPictureNativeImpl(
scoped_refptr<VaapiWrapper> vaapi_wrapper,
const MakeGLContextCurrentCallback& make_context_current_cb,
const BindGLImageCallback& bind_image_cb,
const PictureBuffer& picture_buffer,
const gfx::Size& visible_size,
uint32_t client_texture_id,
uint32_t service_texture_id) {
return std::make_unique<PictureType>(
std::move(vaapi_wrapper), make_context_current_cb, bind_image_cb,
picture_buffer.id(), picture_buffer.size(), visible_size,
service_texture_id, client_texture_id, picture_buffer.texture_target());
}
} // namespace
VaapiPictureFactory::VaapiPictureFactory() {
vaapi_impl_pairs_.insert(
std::make_pair(gl::kGLImplementationEGLGLES2,
VaapiPictureFactory::kVaapiImplementationDrm));
#if BUILDFLAG(USE_VAAPI_X11)
vaapi_impl_pairs_.insert(
std::make_pair(gl::kGLImplementationEGLANGLE,
VaapiPictureFactory::kVaapiImplementationAngle));
vaapi_impl_pairs_.insert(
std::make_pair(gl::kGLImplementationDesktopGL,
VaapiPictureFactory::kVaapiImplementationX11));
#elif defined(USE_OZONE)
vaapi_impl_pairs_.insert(
std::make_pair(gl::kGLImplementationEGLANGLE,
VaapiPictureFactory::kVaapiImplementationDrm));
#endif
DeterminePictureCreationAndDownloadingMechanism();
}
VaapiPictureFactory::~VaapiPictureFactory() = default;
std::unique_ptr<VaapiPicture> VaapiPictureFactory::Create(
scoped_refptr<VaapiWrapper> vaapi_wrapper,
const MakeGLContextCurrentCallback& make_context_current_cb,
const BindGLImageCallback& bind_image_cb,
const PictureBuffer& picture_buffer,
const gfx::Size& visible_size) {
// ARC++ sends |picture_buffer| with no texture_target().
DCHECK(picture_buffer.texture_target() == GetGLTextureTarget() ||
picture_buffer.texture_target() == 0u);
// |client_texture_ids| and |service_texture_ids| are empty from ARC++.
const uint32_t client_texture_id =
!picture_buffer.client_texture_ids().empty()
? picture_buffer.client_texture_ids()[0]
: 0;
const uint32_t service_texture_id =
!picture_buffer.service_texture_ids().empty()
? picture_buffer.service_texture_ids()[0]
: 0;
// Select DRM(egl) / TFP(glx) at runtime with --use-gl=egl / --use-gl=desktop
return CreateVaapiPictureNative(vaapi_wrapper, make_context_current_cb,
bind_image_cb, picture_buffer, visible_size,
client_texture_id, service_texture_id);
}
VaapiPictureFactory::VaapiImplementation
VaapiPictureFactory::GetVaapiImplementation(gl::GLImplementation gl_impl) {
if (base::Contains(vaapi_impl_pairs_, gl_impl))
return vaapi_impl_pairs_[gl_impl];
return kVaapiImplementationNone;
}
uint32_t VaapiPictureFactory::GetGLTextureTarget() {
#if BUILDFLAG(USE_VAAPI_X11)
return GL_TEXTURE_2D;
#else
return GL_TEXTURE_EXTERNAL_OES;
#endif
}
gfx::BufferFormat VaapiPictureFactory::GetBufferFormat() {
#if BUILDFLAG(IS_LINUX)
return gfx::BufferFormat::RGBX_8888;
#else
return gfx::BufferFormat::YUV_420_BIPLANAR;
#endif
}
void VaapiPictureFactory::DeterminePictureCreationAndDownloadingMechanism() {
switch (GetVaapiImplementation(gl::GetGLImplementation())) {
#if defined(USE_OZONE)
// We can be called without GL initialized, which is valid if we use Ozone.
case kVaapiImplementationNone:
create_picture_cb_ = base::BindRepeating(
&CreateVaapiPictureNativeImpl<VaapiPictureNativePixmapOzone>);
needs_vpp_for_downloading_ = true;
break;
#endif // defined(USE_OZONE)
#if BUILDFLAG(USE_VAAPI_X11)
case kVaapiImplementationX11:
create_picture_cb_ =
base::BindRepeating(&CreateVaapiPictureNativeImpl<VaapiTFPPicture>);
// Neither VaapiTFPPicture or VaapiPictureNativePixmapAngle needs the VPP.
needs_vpp_for_downloading_ = false;
break;
case kVaapiImplementationAngle:
create_picture_cb_ = base::BindRepeating(
&CreateVaapiPictureNativeImpl<VaapiPictureNativePixmapAngle>);
// Neither VaapiTFPPicture or VaapiPictureNativePixmapAngle needs the VPP.
needs_vpp_for_downloading_ = false;
break;
#endif // BUILDFLAG(USE_VAAPI_X11)
case kVaapiImplementationDrm:
#if defined(USE_OZONE)
create_picture_cb_ = base::BindRepeating(
&CreateVaapiPictureNativeImpl<VaapiPictureNativePixmapOzone>);
needs_vpp_for_downloading_ = true;
break;
#elif defined(USE_EGL)
create_picture_cb_ = base::BindRepeating(
&CreateVaapiPictureNativeImpl<VaapiPictureNativePixmapEgl>);
needs_vpp_for_downloading_ = true;
break;
#else
// ozone or egl must be used to use the DRM implementation.
[[fallthrough]];
#endif
default:
NOTREACHED();
break;
}
}
bool VaapiPictureFactory::NeedsProcessingPipelineForDownloading() const {
return needs_vpp_for_downloading_;
}
std::unique_ptr<VaapiPicture> VaapiPictureFactory::CreateVaapiPictureNative(
scoped_refptr<VaapiWrapper> vaapi_wrapper,
const MakeGLContextCurrentCallback& make_context_current_cb,
const BindGLImageCallback& bind_image_cb,
const PictureBuffer& picture_buffer,
const gfx::Size& visible_size,
uint32_t client_texture_id,
uint32_t service_texture_id) {
CHECK(create_picture_cb_);
return create_picture_cb_.Run(
std::move(vaapi_wrapper), make_context_current_cb, bind_image_cb,
picture_buffer, visible_size, client_texture_id, service_texture_id);
}
} // namespace media

View file

@ -1,70 +0,0 @@
// Copyright 2022 The Chromium Authors and Alex313031. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/vaapi/vaapi_picture_native_pixmap.h"
#include "media/gpu/macros.h"
#include "media/gpu/vaapi/va_surface.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/linux/native_pixmap_dmabuf.h"
#include "ui/gfx/native_pixmap.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_image_native_pixmap.h"
namespace media {
VaapiPictureNativePixmap::VaapiPictureNativePixmap(
scoped_refptr<VaapiWrapper> vaapi_wrapper,
const MakeGLContextCurrentCallback& make_context_current_cb,
const BindGLImageCallback& bind_image_cb,
int32_t picture_buffer_id,
const gfx::Size& size,
const gfx::Size& visible_size,
uint32_t texture_id,
uint32_t client_texture_id,
uint32_t texture_target)
: VaapiPicture(std::move(vaapi_wrapper),
make_context_current_cb,
bind_image_cb,
picture_buffer_id,
size,
visible_size,
texture_id,
client_texture_id,
texture_target) {}
VaapiPictureNativePixmap::~VaapiPictureNativePixmap() = default;
bool VaapiPictureNativePixmap::DownloadFromSurface(
scoped_refptr<VASurface> va_surface) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!vaapi_wrapper_->SyncSurface(va_surface->id())) {
VLOGF(1) << "Cannot sync VPP input surface";
return false;
}
if (!vaapi_wrapper_->BlitSurface(*va_surface, *va_surface_)) {
VLOGF(1) << "Cannot convert decoded image into output buffer";
return false;
}
// Sync target surface since the buffer is returning to client.
if (!vaapi_wrapper_->SyncSurface(va_surface_->id())) {
VLOGF(1) << "Cannot sync VPP output surface";
return false;
}
return true;
}
bool VaapiPictureNativePixmap::AllowOverlay() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return true;
}
VASurfaceID VaapiPictureNativePixmap::va_surface_id() const {
return va_surface_->id();
}
} // namespace media

File diff suppressed because it is too large Load diff

View file

@ -1,368 +0,0 @@
// Copyright (c) 2022 The Chromium Authors and Alex313031. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// This file contains an implementation of VideoDecoderAccelerator
// that utilizes hardware video decoder present on Intel CPUs.
#ifndef MEDIA_GPU_VAAPI_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
#define MEDIA_GPU_VAAPI_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
#include <stddef.h>
#include <stdint.h>
#include <list>
#include <map>
#include <memory>
#include <utility>
#include <vector>
#include "base/containers/queue.h"
#include "base/containers/small_map.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/task/single_thread_task_runner.h"
#include "base/thread_annotations.h"
#include "base/threading/thread.h"
#include "base/trace_event/memory_dump_provider.h"
#include "media/base/bitstream_buffer.h"
#include "media/gpu/decode_surface_handler.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/vaapi/vaapi_picture_factory.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
#include "media/video/picture.h"
#include "media/video/video_decode_accelerator.h"
namespace gl {
class GLImage;
}
namespace media {
class AcceleratedVideoDecoder;
template <typename T>
class ScopedID;
class VaapiVideoDecoderDelegate;
class VaapiPicture;
// Class to provide video decode acceleration for Intel systems with hardware
// support for it, and on which libva is available.
// Decoding tasks are performed in a separate decoding thread.
//
// Threading/life-cycle: this object is created & destroyed on the GPU
// ChildThread. A few methods on it are called on the decoder thread which is
// stopped during |this->Destroy()|, so any tasks posted to the decoder thread
// can assume |*this| is still alive. See |weak_this_| below for more details.
class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
: public VideoDecodeAccelerator,
public DecodeSurfaceHandler<VASurface>,
public base::trace_event::MemoryDumpProvider {
public:
VaapiVideoDecodeAccelerator(
const MakeGLContextCurrentCallback& make_context_current_cb,
const BindGLImageCallback& bind_image_cb);
VaapiVideoDecodeAccelerator(const VaapiVideoDecodeAccelerator&) = delete;
VaapiVideoDecodeAccelerator& operator=(const VaapiVideoDecodeAccelerator&) =
delete;
~VaapiVideoDecodeAccelerator() override;
// VideoDecodeAccelerator implementation.
bool Initialize(const Config& config, Client* client) override;
void Decode(BitstreamBuffer bitstream_buffer) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
int32_t bitstream_id) override;
void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
#if defined(USE_OZONE)
void ImportBufferForPicture(
int32_t picture_buffer_id,
VideoPixelFormat pixel_format,
gfx::GpuMemoryBufferHandle gpu_memory_buffer_handle) override;
#endif
void ReusePictureBuffer(int32_t picture_buffer_id) override;
void Flush() override;
void Reset() override;
void Destroy() override;
bool TryToSetupDecodeOnSeparateThread(
const base::WeakPtr<Client>& decode_client,
const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner)
override;
static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
static bool IsVppProfileSupported();
// DecodeSurfaceHandler implementation.
scoped_refptr<VASurface> CreateSurface() override;
void SurfaceReady(scoped_refptr<VASurface> va_surface,
int32_t bitstream_id,
const gfx::Rect& visible_rect,
const VideoColorSpace& color_space) override;
// base::trace_event::MemoryDumpProvider implementation.
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) override;
private:
friend class VaapiVideoDecodeAcceleratorTest;
// An input buffer with id provided by the client and awaiting consumption.
class InputBuffer;
// A self-cleaning VASurfaceID.
using ScopedVASurfaceID = ScopedID<VASurfaceID>;
// Notify the client that an error has occurred and decoding cannot continue.
void NotifyError(Error error);
void NotifyStatus(VaapiStatus status);
// Queue a input buffer for decode.
void QueueInputBuffer(scoped_refptr<DecoderBuffer> buffer,
int32_t bitstream_id);
// Gets a new |current_input_buffer_| from |input_buffers_| and sets it up in
// |decoder_|. This method will sleep if no |input_buffers_| are available.
// Returns true if a new buffer has been set up, false if an early exit has
// been requested (due to initiated reset/flush/destroy).
bool GetCurrInputBuffer_Locked() EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Signals the client that |curr_input_buffer_| has been read and can be
// returned. Will also release the mapping.
void ReturnCurrInputBuffer_Locked() EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Waits for more surfaces to become available. Returns true once they do or
// false if an early exit has been requested (due to an initiated
// reset/flush/destroy).
bool WaitForSurfaces_Locked() EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Continue decoding given input buffers and sleep waiting for input/output
// as needed. Will exit if a new set of surfaces or reset/flush/destroy
// is requested.
void DecodeTask();
// Scheduled after receiving a flush request and executed after the current
// decoding task finishes decoding pending inputs. Makes the decoder return
// all remaining output pictures and puts it in an idle state, ready
// to resume if needed and schedules a FinishFlush.
void FlushTask();
// Scheduled by the FlushTask after decoder is flushed to put VAVDA into idle
// state and notify the client that flushing has been finished.
void FinishFlush();
// Scheduled after receiving a reset request and executed after the current
// decoding task finishes decoding the current frame. Puts the decoder into
// an idle state, ready to resume if needed, discarding decoded but not yet
// outputted pictures (decoder keeps ownership of their associated picture
// buffers). Schedules a FinishReset afterwards.
void ResetTask();
// Scheduled by ResetTask after it's done putting VAVDA into an idle state.
// Drops remaining input buffers and notifies the client that reset has been
// finished.
void FinishReset();
// Helper for Destroy(), doing all the actual work except for deleting self.
void Cleanup();
// Get a usable framebuffer configuration for use in binding textures
// or return false on failure.
bool InitializeFBConfig();
// Callback to be executed once we have a |va_surface| to be output and an
// available VaapiPicture in |available_picture_buffers_| for output. Puts
// contents of |va_surface| into the latter, releases the surface and passes
// the resulting picture to |client_| along with |visible_rect|.
void OutputPicture(scoped_refptr<VASurface> va_surface,
int32_t input_id,
gfx::Rect visible_rect,
const VideoColorSpace& picture_color_space);
// Try to OutputPicture() if we have both a ready surface and picture.
void TryOutputPicture();
// Called when a VASurface is no longer in use by |decoder_| nor |client_|.
// Returns it to |available_va_surfaces_|. |va_surface_id| is not used but it
// must be here to bind this method as VASurface::ReleaseCB.
void RecycleVASurface(std::unique_ptr<ScopedVASurfaceID> va_surface,
VASurfaceID va_surface_id);
// Request a new set of |num_pics| PictureBuffers to be allocated by
// |client_|. Up to |num_reference_frames| out of |num_pics_| might be needed
// by |decoder_|.
void InitiateSurfaceSetChange(size_t num_pics,
gfx::Size size,
size_t num_reference_frames,
const gfx::Rect& visible_rect);
// Check if the surfaces have been released or post ourselves for later.
void TryFinishSurfaceSetChange();
// Different modes of internal buffer allocations.
enum class BufferAllocationMode {
// Only using |client_|s provided PictureBuffers, none internal.
kNone,
// Using a reduced amount of |client_|s provided PictureBuffers and
// |decoder_|s GetNumReferenceFrames() internallly.
kSuperReduced,
// Similar to kSuperReduced, but we have to increase slightly the amount of
// PictureBuffers allocated for the |client_|.
kReduced,
// VaapiVideoDecodeAccelerator can work with this mode on all platforms.
// Using |client_|s provided PictureBuffers and as many internally
// allocated.
kNormal,
kWrapVdpau,
};
// Decides the concrete buffer allocation mode, depending on the hardware
// platform and other parameters.
BufferAllocationMode DecideBufferAllocationMode();
bool IsBufferAllocationModeReducedOrSuperReduced() const;
// VAVDA state.
enum State {
// Initialize() not called yet or failed.
kUninitialized,
// DecodeTask running.
kDecoding,
// Resetting, waiting for decoder to finish current task and cleanup.
kResetting,
// Idle, decoder in state ready to start/resume decoding.
kIdle,
// Destroying, waiting for the decoder to finish current task.
kDestroying,
};
base::Lock lock_;
State state_ GUARDED_BY(lock_);
// Only used on |task_runner_|.
Config::OutputMode output_mode_;
// Queue of available InputBuffers.
base::queue<std::unique_ptr<InputBuffer>> input_buffers_ GUARDED_BY(lock_);
// Signalled when input buffers are queued onto |input_buffers_| queue.
base::ConditionVariable input_ready_;
// Current input buffer at decoder. Only used on |decoder_thread_task_runner_|
std::unique_ptr<InputBuffer> curr_input_buffer_;
// Only used on |task_runner_|.
std::unique_ptr<VaapiPictureFactory> vaapi_picture_factory_;
// The following variables are constructed/initialized in Initialize() when
// the codec information is received. |vaapi_wrapper_| is thread safe.
scoped_refptr<VaapiWrapper> vaapi_wrapper_;
// Only used on |decoder_thread_task_runner_|.
std::unique_ptr<AcceleratedVideoDecoder> decoder_;
// TODO(crbug.com/1022246): Instead of having the raw pointer here, getting
// the pointer from AcceleratedVideoDecoder.
raw_ptr<VaapiVideoDecoderDelegate> decoder_delegate_ = nullptr;
// Filled in during Initialize().
BufferAllocationMode buffer_allocation_mode_;
// VaapiWrapper for VPP (Video Post Processing). This is used for copying
// from a decoded surface to a surface bound to client's PictureBuffer.
scoped_refptr<VaapiWrapper> vpp_vaapi_wrapper_;
// All allocated VaapiPictures, regardless of their current state. Pictures
// are allocated at AssignPictureBuffers() and are kept until dtor or
// TryFinishSurfaceSetChange(). Comes after |vaapi_wrapper_| to ensure all
// pictures are destroyed before this is destroyed.
base::small_map<std::map<int32_t, std::unique_ptr<VaapiPicture>>> pictures_
GUARDED_BY(lock_);
// List of PictureBuffer ids available to be sent to |client_| via
// OutputPicture() (|client_| returns them via ReusePictureBuffer()).
std::list<int32_t> available_picture_buffers_ GUARDED_BY(lock_);
// VASurfaces available and that can be passed to |decoder_| for its use upon
// CreateSurface() request (and then returned via RecycleVASurface()).
std::list<std::unique_ptr<ScopedVASurfaceID>> available_va_surfaces_
GUARDED_BY(lock_);
// Signalled when output surfaces are queued into |available_va_surfaces_|.
base::ConditionVariable surfaces_available_;
// VASurfaceIDs format, filled in when created.
unsigned int va_surface_format_;
// Pending output requests from the decoder. When it indicates that we should
// output a surface and we have an available Picture (i.e. texture) ready
// to use, we'll execute the callback passing the Picture. The callback
// will put the contents of the surface into the picture and return it to
// the client, releasing the surface as well.
// If we don't have any available |pictures_| at the time when the decoder
// requests output, we'll store the request in this queue for later and run it
// once the client gives us more textures via ReusePictureBuffer().
// Only used on |task_runner_|.
base::queue<base::OnceClosure> pending_output_cbs_;
// WeakPtr<> pointing to |this| for use in posting tasks from the decoder
// thread back to the ChildThread. Because the decoder thread is a member of
// this class, any task running on the decoder thread is guaranteed that this
// object is still alive. As a result, tasks posted from ChildThread to
// decoder thread should use base::Unretained(this), and tasks posted from the
// decoder thread to the ChildThread should use |weak_this_|.
base::WeakPtr<VaapiVideoDecodeAccelerator> weak_this_;
// Callback used to recycle VASurfaces. Only used on |task_runner_|.
base::RepeatingCallback<void(std::unique_ptr<ScopedVASurfaceID>, VASurfaceID)>
va_surface_recycle_cb_;
// To expose client callbacks from VideoDecodeAccelerator. Used only on
// |task_runner_|.
std::unique_ptr<base::WeakPtrFactory<Client>> client_ptr_factory_;
base::WeakPtr<Client> client_;
// ChildThread's task runner.
const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
base::Thread decoder_thread_;
// Use this to post tasks to |decoder_thread_| instead of
// |decoder_thread_.task_runner()| because the latter will be NULL once
// |decoder_thread_.Stop()| returns.
scoped_refptr<base::SingleThreadTaskRunner> decoder_thread_task_runner_;
// Whether we are waiting for any |pending_output_cbs_| to be run before
// NotifyingFlushDone. Only used on |task_runner_|.
bool finish_flush_pending_;
// Decoder requested a new surface set and we are waiting for all the surfaces
// to be returned before we can free them. Only used on |task_runner_|.
bool awaiting_va_surfaces_recycle_;
// Last requested number/resolution/visible rectangle of output
// PictureBuffers.
size_t requested_num_pics_;
gfx::Size requested_pic_size_;
gfx::Rect requested_visible_rect_;
// Potential extra PictureBuffers to request, used only on
// BufferAllocationMode::kNone, see DecideBufferAllocationMode().
size_t num_extra_pics_ = 0;
// Max number of reference frames needed by |decoder_|. Only used on
// |task_runner_| and when in BufferAllocationMode::kNone.
size_t requested_num_reference_frames_;
size_t previously_requested_num_reference_frames_;
// The video stream's profile.
VideoCodecProfile profile_;
// Callback to make GL context current.
MakeGLContextCurrentCallback make_context_current_cb_;
// Callback to bind a GLImage to a given texture.
BindGLImageCallback bind_image_cb_;
// The WeakPtrFactory for |weak_this_|.
base::WeakPtrFactory<VaapiVideoDecodeAccelerator> weak_this_factory_;
};
} // namespace media
#endif // MEDIA_GPU_VAAPI_VAAPI_VIDEO_DECODE_ACCELERATOR_H_

File diff suppressed because it is too large Load diff

View file

@ -1,675 +0,0 @@
// Copyright 2022 The Chromium Authors and Alex313031. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// This file contains an implementation of VaapiWrapper, used by
// VaapiVideoDecodeAccelerator and VaapiH264Decoder for decode,
// and VaapiVideoEncodeAccelerator for encode, to interface
// with libva (VA-API library for hardware video codec).
#ifndef MEDIA_GPU_VAAPI_VAAPI_WRAPPER_H_
#define MEDIA_GPU_VAAPI_VAAPI_WRAPPER_H_
#include <stddef.h>
#include <stdint.h>
#include <va/va.h>
#include <map>
#include <memory>
#include <set>
#include <vector>
#include "base/files/file.h"
#include "base/gtest_prod_util.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "build/chromeos_buildflags.h"
#include "media/gpu/chromeos/fourcc.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/vaapi/va_surface.h"
#include "media/gpu/vaapi/vaapi_utils.h"
#include "media/video/video_decode_accelerator.h"
#include "media/video/video_encode_accelerator.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
#include "ui/gfx/geometry/size.h"
#if BUILDFLAG(USE_VAAPI_X11)
#include "ui/gfx/x/xproto.h" // nogncheck
#endif // BUILDFLAG(USE_VAAPI_X11)
namespace gfx {
enum class BufferFormat;
class NativePixmap;
class NativePixmapDmaBuf;
class Rect;
}
#define MAYBE_ASSERT_ACQUIRED(lock) \
if (lock) \
lock->AssertAcquired()
namespace media {
constexpr unsigned int kInvalidVaRtFormat = 0u;
class VideoFrame;
// Enum, function and callback type to allow VaapiWrapper to log errors in VA
// function calls executed on behalf of its owner. |histogram_name| is prebound
// to allow for disinguishing such owners.
enum class VaapiFunctions;
void ReportVaapiErrorToUMA(const std::string& histogram_name,
VaapiFunctions value);
using ReportErrorToUMACB = base::RepeatingCallback<void(VaapiFunctions)>;
// This struct holds a NativePixmapDmaBuf, usually the result of exporting a VA
// surface, and some associated size information needed to tell clients about
// the underlying buffer.
struct NativePixmapAndSizeInfo {
NativePixmapAndSizeInfo();
~NativePixmapAndSizeInfo();
// The VA-API internal buffer dimensions, which may be different than the
// dimensions requested at the time of creation of the surface (but always
// larger than or equal to those). This can be used for validation in, e.g.,
// testing.
gfx::Size va_surface_resolution;
// The size of the underlying Buffer Object. A use case for this is when an
// image decode is requested and the caller needs to know the size of the
// allocated buffer for caching purposes.
size_t byte_size = 0u;
// Contains the information needed to use the surface in a graphics API,
// including the visible size (|pixmap|->GetBufferSize()) which should be no
// larger than |va_surface_resolution|.
scoped_refptr<gfx::NativePixmapDmaBuf> pixmap;
};
enum class VAImplementation {
kMesaGallium,
kIntelI965,
kIntelIHD,
kOther,
kNVIDIAVDPAU,
kInvalid,
};
// This class handles VA-API calls and ensures proper locking of VA-API calls
// to libva, the userspace shim to the HW codec driver. The thread safety of
// libva depends on the backend. If the backend is not thread-safe, we need to
// maintain a global lock that guards all libva calls. This class is fully
// synchronous and its constructor, all of its methods, and its destructor must
// be called on the same sequence. These methods may wait on the |va_lock_|
// which guards libva calls across all VaapiWrapper instances and other libva
// call sites. If the backend is known to be thread safe and
// |enforce_sequence_affinity_| is true when the |kGlobalVaapiLock| flag is
// disabled, |va_lock_| will be null and won't guard any libva calls.
//
// This class is responsible for managing VAAPI connection, contexts and state.
// It is also responsible for managing and freeing VABuffers (not VASurfaces),
// which are used to queue parameters and slice data to the HW codec,
// as well as underlying memory for VASurfaces themselves.
//
// Historical note: the sequence affinity characteristic was introduced as a
// pre-requisite to remove the global *|va_lock_|. However, the legacy
// VaapiVideoDecodeAccelerator is known to use its VaapiWrapper from multiple
// threads. Therefore, to avoid doing a large refactoring of a legacy class, we
// allow it to call VaapiWrapper::Create() or
// VaapiWrapper::CreateForVideoCodec() with |enforce_sequence_affinity| == false
// so that sequence affinity is not enforced. This also indicates that the
// global lock will still be in effect for the VaapiVideoDecodeAccelerator.
class MEDIA_GPU_EXPORT VaapiWrapper
: public base::RefCountedThreadSafe<VaapiWrapper> {
public:
enum CodecMode {
kDecode,
#if BUILDFLAG(IS_CHROMEOS_ASH)
// NOTE: A kDecodeProtected VaapiWrapper is created using the actual video
// profile and an extra VAProfileProtected, each with some special added
// VAConfigAttribs. Then when CreateProtectedSession() is called, it will
// then create a protected session using protected profile & entrypoint
// which gets attached to the decoding context (or attached when the
// decoding context is created or re-created). This then enables
// decrypt + decode support in the driver and encrypted frame data can then
// be submitted.
kDecodeProtected, // Decrypt + decode to protected surface.
#endif
kEncodeConstantBitrate, // Encode with Constant Bitrate algorithm.
kEncodeConstantQuantizationParameter, // Encode with Constant Quantization
// Parameter algorithm.
kEncodeVariableBitrate, // Encode with variable bitrate algorithm.
kVideoProcess,
kCodecModeMax,
};
// This is enum associated with VASurfaceAttribUsageHint.
enum class SurfaceUsageHint : int32_t {
kGeneric = VA_SURFACE_ATTRIB_USAGE_HINT_GENERIC,
kVideoDecoder = VA_SURFACE_ATTRIB_USAGE_HINT_DECODER,
kVideoEncoder = VA_SURFACE_ATTRIB_USAGE_HINT_ENCODER,
kVideoProcessWrite = VA_SURFACE_ATTRIB_USAGE_HINT_VPP_WRITE,
};
using InternalFormats = struct {
bool yuv420 : 1;
bool yuv420_10 : 1;
bool yuv422 : 1;
bool yuv444 : 1;
};
// Returns the type of the underlying VA-API implementation.
static VAImplementation GetImplementationType();
// Return an instance of VaapiWrapper initialized for |va_profile| and
// |mode|. |report_error_to_uma_cb| will be called independently from
// reporting errors to clients via method return values.
static scoped_refptr<VaapiWrapper> Create(
CodecMode mode,
VAProfile va_profile,
EncryptionScheme encryption_scheme,
const ReportErrorToUMACB& report_error_to_uma_cb,
bool enforce_sequence_affinity = true);
// Create VaapiWrapper for VideoCodecProfile. It maps VideoCodecProfile
// |profile| to VAProfile.
// |report_error_to_uma_cb| will be called independently from reporting
// errors to clients via method return values.
static scoped_refptr<VaapiWrapper> CreateForVideoCodec(
CodecMode mode,
VideoCodecProfile profile,
EncryptionScheme encryption_scheme,
const ReportErrorToUMACB& report_error_to_uma_cb,
bool enforce_sequence_affinity = true);
VaapiWrapper(const VaapiWrapper&) = delete;
VaapiWrapper& operator=(const VaapiWrapper&) = delete;
// Returns the supported SVC scalability modes for specified profile.
static std::vector<SVCScalabilityMode> GetSupportedScalabilityModes(
VideoCodecProfile media_profile,
VAProfile va_profile);
// Return the supported video encode profiles.
static VideoEncodeAccelerator::SupportedProfiles GetSupportedEncodeProfiles();
// Return the supported video decode profiles.
static VideoDecodeAccelerator::SupportedProfiles GetSupportedDecodeProfiles();
// Return true when decoding using |va_profile| is supported.
static bool IsDecodeSupported(VAProfile va_profile);
// Returns the supported internal formats for decoding using |va_profile|. If
// decoding is not supported for that profile, returns InternalFormats{}.
static InternalFormats GetDecodeSupportedInternalFormats(
VAProfile va_profile);
// Returns true if |rt_format| is supported for decoding using |va_profile|.
// Returns false if |rt_format| or |va_profile| is not supported for decoding.
static bool IsDecodingSupportedForInternalFormat(VAProfile va_profile,
unsigned int rt_format);
// Gets the minimum and maximum surface sizes allowed for |va_profile| in
// |codec_mode|. Returns true if both sizes can be obtained, false otherwise.
// Each dimension in |min_size| will be at least 1 (as long as this method
// returns true). Additionally, because of the initialization in
// VASupportedProfiles::FillProfileInfo_Locked(), the |max_size| is guaranteed
// to not be empty (as long as this method returns true).
static bool GetSupportedResolutions(VAProfile va_profile,
CodecMode codec_mode,
gfx::Size& min_size,
gfx::Size& max_size);
// Gets the maximum surface size allowed for decoding using |va_profile|.
// Returns true if the size can be obtained, false otherwise. Because of the
// initialization in VASupportedProfiles::FillProfileInfo_Locked(), the size
// is guaranteed to not be empty (as long as this method returns true).
static bool GetDecodeMaxResolution(VAProfile va_profile, gfx::Size* max_size);
// Obtains a suitable FOURCC that can be used in vaCreateImage() +
// vaGetImage(). |rt_format| corresponds to the JPEG's subsampling format.
// |preferred_fourcc| is the FOURCC of the format preferred by the caller. If
// it is determined that the VAAPI driver can do the conversion from the
// internal format (|rt_format|), *|suitable_fourcc| is set to
// |preferred_fourcc|. Otherwise, it is set to a supported format. Returns
// true if a suitable FOURCC could be determined, false otherwise (e.g., if
// the |rt_format| is unsupported by the driver). If |preferred_fourcc| is not
// a supported image format, *|suitable_fourcc| is set to VA_FOURCC_I420.
static bool GetJpegDecodeSuitableImageFourCC(unsigned int rt_format,
uint32_t preferred_fourcc,
uint32_t* suitable_fourcc);
// Checks to see if VAProfileNone is supported on this decoder
static bool IsVppProfileSupported();
// Checks the surface size is allowed for VPP. Returns true if the size is
// supported, false otherwise.
static bool IsVppResolutionAllowed(const gfx::Size& size);
// Returns true if the VPP supports converting from/to |fourcc|.
static bool IsVppFormatSupported(uint32_t fourcc);
// Returns the pixel formats supported by the VPP.
static std::vector<Fourcc> GetVppSupportedFormats();
// Returns true if VPP supports the format conversion from a JPEG decoded
// internal surface to a FOURCC. |rt_format| corresponds to the JPEG's
// subsampling format. |fourcc| is the output surface's FOURCC.
static bool IsVppSupportedForJpegDecodedSurfaceToFourCC(
unsigned int rt_format,
uint32_t fourcc);
// Return true when JPEG encode is supported.
static bool IsJpegEncodeSupported();
// Return true when the specified image format is supported.
static bool IsImageFormatSupported(const VAImageFormat& format);
// Returns the list of VAImageFormats supported by the driver.
static const std::vector<VAImageFormat>& GetSupportedImageFormatsForTesting();
// Returns the list of supported profiles and entrypoints for a given |mode|.
static std::map<VAProfile, std::vector<VAEntrypoint>>
GetSupportedConfigurationsForCodecModeForTesting(CodecMode mode);
static VAEntrypoint GetDefaultVaEntryPoint(CodecMode mode, VAProfile profile);
static uint32_t BufferFormatToVARTFormat(gfx::BufferFormat fmt);
// Creates |num_surfaces| VASurfaceIDs of |va_format|, |size| and
// |surface_usage_hints| and, if successful, creates a |va_context_id_| of the
// same size. |surface_usage_hints| may affect an alignment and tiling of the
// created surface. Returns true if successful, with the created IDs in
// |va_surfaces|. The client is responsible for destroying |va_surfaces| via
// DestroyContextAndSurfaces() to free the allocated surfaces.
[[nodiscard]] virtual bool CreateContextAndSurfaces(
unsigned int va_format,
const gfx::Size& size,
const std::vector<SurfaceUsageHint>& surface_usage_hints,
size_t num_surfaces,
std::vector<VASurfaceID>* va_surfaces);
// Creates |num_surfaces| ScopedVASurfaces of |va_format| and |size| and, if
// successful, creates a |va_context_id_| of the same size. Returns an empty
// vector if creation failed. If |visible_size| is supplied, the returned
// ScopedVASurface's size is set to it. Otherwise, it's set to |size| (refer
// to CreateScopedVASurfaces() for details).
virtual std::vector<std::unique_ptr<ScopedVASurface>>
CreateContextAndScopedVASurfaces(
unsigned int va_format,
const gfx::Size& size,
const std::vector<SurfaceUsageHint>& usage_hints,
size_t num_surfaces,
const absl::optional<gfx::Size>& visible_size);
// Attempts to create a protected session that will be attached to the
// decoding context to enable encrypted video decoding. If it cannot be
// attached now, it will be attached when the decoding context is created or
// re-created. |encryption| should be the encryption scheme from the
// DecryptConfig. |hw_config| should have been obtained from the OEMCrypto
// implementation via the CdmFactoryDaemonProxy. |hw_identifier_out| is an
// output parameter which will return session specific information which can
// be passed through the ChromeOsCdmContext to retrieve encrypted key
// information. Returns true on success and false otherwise.
bool CreateProtectedSession(media::EncryptionScheme encryption,
const std::vector<uint8_t>& hw_config,
std::vector<uint8_t>* hw_identifier_out);
// Returns true if and only if we have created a protected session and
// querying libva indicates that our protected session is no longer alive,
// otherwise this will return false.
bool IsProtectedSessionDead();
#if BUILDFLAG(IS_CHROMEOS_ASH)
// Returns true if and only if |va_protected_session_id| is not VA_INVALID_ID
// and querying libva indicates that the protected session identified by
// |va_protected_session_id| is no longer alive.
bool IsProtectedSessionDead(VAProtectedSessionID va_protected_session_id);
// Returns the ID of the current protected session or VA_INVALID_ID if there's
// none. This must be called on the same sequence as other methods that use
// the protected session ID internally.
//
// TODO(b/183515581): update this documentation once we force the VaapiWrapper
// to be used on a single sequence.
VAProtectedSessionID GetProtectedSessionID() const;
#endif
// If we have a protected session, destroys it immediately. This should be
// used as part of recovering dead protected sessions.
void DestroyProtectedSession();
// Releases the |va_surfaces| and destroys |va_context_id_|.
void DestroyContextAndSurfaces(std::vector<VASurfaceID> va_surfaces);
// Creates a VAContextID of |size| (unless it's a Vpp context in which case
// |size| is ignored and 0x0 is used instead). The client is responsible for
// releasing said context via DestroyContext() or DestroyContextAndSurfaces(),
// or it will be released on dtor. If a valid |va_protected_session_id_|
// exists, it will be attached to the newly created |va_context_id_| as well.
[[nodiscard]] virtual bool CreateContext(const gfx::Size& size);
// Destroys the context identified by |va_context_id_|.
virtual void DestroyContext();
// Requests |num_surfaces| ScopedVASurfaces of size |size|, |va_rt_format| and
// optionally |va_fourcc|. Returns self-cleaning ScopedVASurfaces or empty
// vector if creation failed. If |visible_size| is supplied, the returned
// ScopedVASurfaces' size are set to it: for example, we may want to request a
// 16x16 surface to decode a 13x12 JPEG: we may want to keep track of the
// visible size 13x12 inside the ScopedVASurface to inform the surface's users
// that that's the only region with meaningful content. If |visible_size| is
// not supplied, we store |size| in the returned ScopedVASurfaces.
virtual std::vector<std::unique_ptr<ScopedVASurface>> CreateScopedVASurfaces(
unsigned int va_rt_format,
const gfx::Size& size,
const std::vector<SurfaceUsageHint>& usage_hints,
size_t num_surfaces,
const absl::optional<gfx::Size>& visible_size,
const absl::optional<uint32_t>& va_fourcc);
// Creates a self-releasing VASurface from |pixmap|. The created VASurface
// shares the ownership of the underlying buffer represented by |pixmap|. The
// ownership of the surface is transferred to the caller. A caller can destroy
// |pixmap| after this method returns and the underlying buffer will be kept
// alive by the VASurface. |protected_content| should only be true if the
// format needs VA_RT_FORMAT_PROTECTED (currently only true for AMD).
virtual scoped_refptr<VASurface> CreateVASurfaceForPixmap(
scoped_refptr<gfx::NativePixmap> pixmap,
bool protected_content = false);
// Creates a self-releasing VASurface from |buffers|. The ownership of the
// surface is transferred to the caller. |buffers| should be a pointer array
// of size 1, with |buffer_size| corresponding to its size. |size| should be
// the desired surface dimensions (which does not need to map to |buffer_size|
// in any relevant way). |buffers| should be kept alive when using the
// VASurface and for accessing the data after the operation is complete.
scoped_refptr<VASurface> CreateVASurfaceForUserPtr(const gfx::Size& size,
uintptr_t* buffers,
size_t buffer_size);
// Creates a self-releasing VASurface with specified usage hints. The
// ownership of the surface is transferred to the caller. |size| should be
// the desired surface dimensions.
scoped_refptr<VASurface> CreateVASurfaceWithUsageHints(
unsigned int va_rt_format,
const gfx::Size& size,
const std::vector<SurfaceUsageHint>& usage_hints);
// Implementations of the pixmap exporter for both types of VASurface.
// See ExportVASurfaceAsNativePixmapDmaBufUnwrapped() for further
// documentation.
std::unique_ptr<NativePixmapAndSizeInfo> ExportVASurfaceAsNativePixmapDmaBuf(
const VASurface& va_surface);
std::unique_ptr<NativePixmapAndSizeInfo> ExportVASurfaceAsNativePixmapDmaBuf(
const ScopedVASurface& scoped_va_surface);
// Synchronize the VASurface explicitly. This is useful when sharing a surface
// between contexts.
[[nodiscard]] bool SyncSurface(VASurfaceID va_surface_id);
// Calls SubmitBuffer_Locked() to request libva to allocate a new VABufferID
// of |va_buffer_type| and |size|, and to map-and-copy the |data| into it. The
// allocated VABufferIDs stay alive until DestroyPendingBuffers_Locked(). Note
// that this method does not submit the buffers for execution, they are simply
// stored until ExecuteAndDestroyPendingBuffers()/Execute_Locked(). The
// ownership of |data| stays with the caller. On failure, all pending buffers
// are destroyed.
[[nodiscard]] bool SubmitBuffer(VABufferType va_buffer_type,
size_t size,
const void* data);
// Convenient templatized version of SubmitBuffer() where |size| is deduced to
// be the size of the type of |*data|.
template <typename T>
[[nodiscard]] bool SubmitBuffer(VABufferType va_buffer_type, const T* data) {
CHECK(!enforce_sequence_affinity_ ||
sequence_checker_.CalledOnValidSequence());
return SubmitBuffer(va_buffer_type, sizeof(T), data);
}
// Batch-version of SubmitBuffer(), where the lock for accessing libva is
// acquired only once.
struct VABufferDescriptor {
VABufferType type;
size_t size;
const void* data;
};
[[nodiscard]] bool SubmitBuffers(
const std::vector<VABufferDescriptor>& va_buffers);
// Destroys all |pending_va_buffers_| sent via SubmitBuffer*(). Useful when a
// pending job is to be cancelled (on reset or error).
void DestroyPendingBuffers();
// Executes job in hardware on target |va_surface_id| and destroys pending
// buffers. Returns false if Execute() fails.
[[nodiscard]] virtual bool ExecuteAndDestroyPendingBuffers(
VASurfaceID va_surface_id);
// Maps each |va_buffers| ID and copies the data described by the associated
// VABufferDescriptor into it; then calls Execute_Locked() on |va_surface_id|.
[[nodiscard]] bool MapAndCopyAndExecute(
VASurfaceID va_surface_id,
const std::vector<std::pair<VABufferID, VABufferDescriptor>>& va_buffers);
#if BUILDFLAG(USE_VAAPI_X11)
// Put data from |va_surface_id| into |x_pixmap| of size
// |dest_size|, converting/scaling to it.
[[nodiscard]] bool PutSurfaceIntoPixmap(VASurfaceID va_surface_id,
x11::Pixmap x_pixmap,
gfx::Size dest_size);
#endif // BUILDFLAG(USE_VAAPI_X11)
// Creates a ScopedVAImage from a VASurface |va_surface_id| and map it into
// memory with the given |format| and |size|. If |format| is not equal to the
// internal format, the underlying implementation will do format conversion if
// supported. |size| should be smaller than or equal to the surface. If |size|
// is smaller, the image will be cropped.
std::unique_ptr<ScopedVAImage> CreateVaImage(VASurfaceID va_surface_id,
VAImageFormat* format,
const gfx::Size& size);
// Uploads contents of |frame| into |va_surface_id| for encode.
[[nodiscard]] virtual bool UploadVideoFrameToSurface(
const VideoFrame& frame,
VASurfaceID va_surface_id,
const gfx::Size& va_surface_size);
// Creates a buffer of |size| bytes to be used as encode output.
virtual std::unique_ptr<ScopedVABuffer> CreateVABuffer(VABufferType type,
size_t size);
// Gets the encoded frame linear size of the buffer with given |buffer_id|.
// |sync_surface_id| will be used as a sync point, i.e. it will have to become
// idle before starting the acquirement. |sync_surface_id| should be the
// source surface passed to the encode job. Returns 0 if it fails for any
// reason.
[[nodiscard]] virtual uint64_t GetEncodedChunkSize(
VABufferID buffer_id,
VASurfaceID sync_surface_id);
// Downloads the contents of the buffer with given |buffer_id| into a buffer
// of size |target_size|, pointed to by |target_ptr|. The number of bytes
// downloaded will be returned in |coded_data_size|. |sync_surface_id| will
// be used as a sync point, i.e. it will have to become idle before starting
// the download. |sync_surface_id| should be the source surface passed
// to the encode job. |sync_surface_id| will be nullopt when it has already
// been synced in GetEncodedChunkSize(). In the case vaSyncSurface()
// is not executed. Returns false if it fails for any reason. For example, the
// linear size of the resulted encoded frame is larger than |target_size|.
[[nodiscard]] virtual bool DownloadFromVABuffer(
VABufferID buffer_id,
absl::optional<VASurfaceID> sync_surface_id,
uint8_t* target_ptr,
size_t target_size,
size_t* coded_data_size);
// Get the max number of reference frames for encoding supported by the
// driver.
// For H.264 encoding, the value represents the maximum number of reference
// frames for both the reference picture list 0 (bottom 16 bits) and the
// reference picture list 1 (top 16 bits).
[[nodiscard]] virtual bool GetVAEncMaxNumOfRefFrames(
VideoCodecProfile profile,
size_t* max_ref_frames);
// Gets packed headers are supported for encoding. This is called for
// H264 encoding. |packed_sps|, |packed_pps| and |packed_slice| stands for
// whether packed slice parameter set, packed picture parameter set and packed
// slice header is supported, respectively.
[[nodiscard]] virtual bool GetSupportedPackedHeaders(
VideoCodecProfile profile,
bool& packed_sps,
bool& packed_pps,
bool& packed_slice);
// Checks if the driver supports frame rotation.
bool IsRotationSupported();
// Blits a VASurface |va_surface_src| into another VASurface
// |va_surface_dest| applying pixel format conversion, rotation, cropping
// and scaling if needed. |src_rect| and |dest_rect| are optional. They can
// be used to specify the area used in the blit. If |va_protected_session_id|
// is provided and is not VA_INVALID_ID, the corresponding protected session
// is attached to the VPP context prior to submitting the VPP buffers and
// detached after submitting those buffers.
[[nodiscard]] virtual bool BlitSurface(
const VASurface& va_surface_src,
const VASurface& va_surface_dest,
absl::optional<gfx::Rect> src_rect = absl::nullopt,
absl::optional<gfx::Rect> dest_rect = absl::nullopt,
VideoRotation rotation = VIDEO_ROTATION_0
#if BUILDFLAG(IS_CHROMEOS_ASH)
,
VAProtectedSessionID va_protected_session_id = VA_INVALID_ID
#endif
);
// Initialize static data before sandbox is enabled.
static void PreSandboxInitialization();
// vaDestroySurfaces() a vector or a single VASurfaceID.
virtual void DestroySurfaces(std::vector<VASurfaceID> va_surfaces);
virtual void DestroySurface(VASurfaceID va_surface_id);
protected:
explicit VaapiWrapper(CodecMode mode, bool enforce_sequence_affinity = true);
virtual ~VaapiWrapper();
private:
friend class base::RefCountedThreadSafe<VaapiWrapper>;
friend class VaapiWrapperTest;
friend class VaapiVideoEncodeAcceleratorTest;
FRIEND_TEST_ALL_PREFIXES(VaapiTest, LowQualityEncodingSetting);
FRIEND_TEST_ALL_PREFIXES(VaapiUtilsTest, ScopedVAImage);
FRIEND_TEST_ALL_PREFIXES(VaapiUtilsTest, BadScopedVAImage);
FRIEND_TEST_ALL_PREFIXES(VaapiUtilsTest, BadScopedVABufferMapping);
FRIEND_TEST_ALL_PREFIXES(VaapiMinigbmTest, AllocateAndCompareWithMinigbm);
[[nodiscard]] bool Initialize(VAProfile va_profile,
EncryptionScheme encryption_scheme);
void Deinitialize();
[[nodiscard]] bool VaInitialize(
const ReportErrorToUMACB& report_error_to_uma_cb);
// Tries to allocate |num_surfaces| VASurfaceIDs of |size| and |va_format|.
// Fills |va_surfaces| and returns true if successful, or returns false.
[[nodiscard]] bool CreateSurfaces(
unsigned int va_format,
const gfx::Size& size,
const std::vector<SurfaceUsageHint>& usage_hints,
size_t num_surfaces,
std::vector<VASurfaceID>* va_surfaces);
// Syncs and exports |va_surface_id| as a gfx::NativePixmapDmaBuf. Currently,
// the only VAAPI surface pixel formats supported are VA_FOURCC_IMC3 and
// VA_FOURCC_NV12.
//
// Notes:
//
// - For VA_FOURCC_IMC3, the format of the returned NativePixmapDmaBuf is
// gfx::BufferFormat::YVU_420 because we don't have a YUV_420 format. The
// planes are flipped accordingly, i.e.,
// gfx::NativePixmapDmaBuf::GetDmaBufOffset(1) refers to the V plane.
// TODO(andrescj): revisit once crrev.com/c/1573718 lands.
//
// - For VA_FOURCC_NV12, the format of the returned NativePixmapDmaBuf is
// gfx::BufferFormat::YUV_420_BIPLANAR.
//
// Returns nullptr on failure, or if the exported surface can't contain
// |va_surface_size|.
std::unique_ptr<NativePixmapAndSizeInfo>
ExportVASurfaceAsNativePixmapDmaBufUnwrapped(
VASurfaceID va_surface_id,
const gfx::Size& va_surface_size);
// Carries out the vaBeginPicture()-vaRenderPicture()-vaEndPicture() on target
// |va_surface_id|. Returns false if any of these calls fails.
[[nodiscard]] bool Execute_Locked(VASurfaceID va_surface_id,
const std::vector<VABufferID>& va_buffers)
EXCLUSIVE_LOCKS_REQUIRED(va_lock_);
virtual void DestroyPendingBuffers_Locked()
EXCLUSIVE_LOCKS_REQUIRED(va_lock_);
// Requests libva to allocate a new VABufferID of type |va_buffer.type|, then
// maps-and-copies |va_buffer.size| contents of |va_buffer.data| to it. If a
// failure occurs, calls DestroyPendingBuffers_Locked() and returns false.
[[nodiscard]] virtual bool SubmitBuffer_Locked(
const VABufferDescriptor& va_buffer) EXCLUSIVE_LOCKS_REQUIRED(va_lock_);
// Maps |va_buffer_id| and, if successful, copies the contents of |va_buffer|
// into it.
[[nodiscard]] bool MapAndCopy_Locked(VABufferID va_buffer_id,
const VABufferDescriptor& va_buffer)
EXCLUSIVE_LOCKS_REQUIRED(va_lock_);
// Queries whether |va_profile_| and |va_entrypoint_| support encoding quality
// setting and, if available, configures it to its maximum value, for lower
// consumption and maximum speed.
void MaybeSetLowQualityEncoding_Locked() EXCLUSIVE_LOCKS_REQUIRED(va_lock_);
// If a protected session is active, attaches it to the decoding context.
[[nodiscard]] bool MaybeAttachProtectedSession_Locked()
EXCLUSIVE_LOCKS_REQUIRED(va_lock_);
const CodecMode mode_;
const bool enforce_sequence_affinity_;
base::SequenceCheckerImpl sequence_checker_;
// If using global VA lock, this is a pointer to VADisplayState's member
// |va_lock_|. Guaranteed to be valid for the lifetime of VaapiWrapper.
raw_ptr<base::Lock> va_lock_;
// VA handles.
// All valid after successful Initialize() and until Deinitialize().
VADisplay va_display_ GUARDED_BY(va_lock_);
VAConfigID va_config_id_{VA_INVALID_ID};
// Created in CreateContext() or CreateContextAndSurfaces() and valid until
// DestroyContext() or DestroyContextAndSurfaces().
VAContextID va_context_id_{VA_INVALID_ID};
// Profile and entrypoint configured for the corresponding |va_context_id_|.
VAProfile va_profile_;
VAEntrypoint va_entrypoint_;
// Data queued up for HW codec, to be committed on next execution.
// TODO(b/166646505): let callers manage the lifetime of these buffers.
std::vector<VABufferID> pending_va_buffers_;
// VA buffer to be used for kVideoProcess. Allocated the first time around,
// and reused afterwards.
std::unique_ptr<ScopedVABuffer> va_buffer_for_vpp_;
#if BUILDFLAG(IS_CHROMEOS_ASH)
// For protected decode mode.
VAConfigID va_protected_config_id_{VA_INVALID_ID};
VAProtectedSessionID va_protected_session_id_{VA_INVALID_ID};
#endif
// Called to report codec errors to UMA. Errors to clients are reported via
// return values from public methods.
ReportErrorToUMACB report_error_to_uma_cb_;
};
} // namespace media
#endif // MEDIA_GPU_VAAPI_VAAPI_WRAPPER_H_