概述
之前分析了显示系统的大致流程,其中有几个地方不是很清楚,这里我专门写几篇专题。
这篇先来看GraphicBuffer分配内存,我们在之前的博客中分析到用户进程创建一个Surface,最后返回的参数gbp是sp<IGraphicBufferProducer>类型的,过程之前都分析过了,我们就不分析了,这个gbp是在Layer的onFirstRef中创建的。
在BufferQueue的createBufferQueue中创建了producer和consumer,然后创建了MonitoredProducer对象,并且用producer来作为参数。
void Layer::onFirstRef() {
// Creates a custom BufferQueue for SurfaceFlingerConsumer to use
sp<IGraphicBufferProducer> producer;
sp<IGraphicBufferConsumer> consumer;
BufferQueue::createBufferQueue(&producer, &consumer);
mProducer = new MonitoredProducer(producer, mFlinger);
......
用户进程和SurfaceFlinger通信的Binder类
MonitoredProducer是继承IGraphicBufferProducer 类。
class MonitoredProducer : public IGraphicBufferProducer {
我们再来看看IGraphicBufferProducer 类的实现,典型的Binder用法,在这个类中有Bp端和Bn端的实现。这个就是用户进程和SurfaceFlinger进程的关于内存的Binder通信。
我们requestBuffer函数,用户进程通过binder和SurfaceFlinger通信,获取数据,然后新建一个GraphicBuffer对象,将数据放入这个对象。
class BpGraphicBufferProducer : public BpInterface<IGraphicBufferProducer>
{
public:
BpGraphicBufferProducer(const sp<IBinder>& impl)
: BpInterface<IGraphicBufferProducer>(impl)
{
}
virtual ~BpGraphicBufferProducer();
virtual status_t requestBuffer(int bufferIdx, sp<GraphicBuffer>* buf) {
Parcel data, reply;
data.writeInterfaceToken(IGraphicBufferProducer::getInterfaceDescriptor());
data.writeInt32(bufferIdx);
status_t result =remote()->transact(REQUEST_BUFFER, data, &reply);//通过Binder获取SurfaceFlinger的数据
if (result != NO_ERROR) {
return result;
}
bool nonNull = reply.readInt32();
if (nonNull) {
*buf = new GraphicBuffer();//新建一个GraphicBuffer
result = reply.read(**buf);//将从SurfaceFlinger获取的数据放入这个新建的对象
if(result != NO_ERROR) {
(*buf).clear();
return result;
}
}
result = reply.readInt32();
return result;
}
我们再来看看SurfaceFlinger的Bn侧是如何实现的,这里主要是调用子类获取到buffer,然后通过Binder传出。
status_t BnGraphicBufferProducer::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
case REQUEST_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int bufferIdx
= data.readInt32();
sp<GraphicBuffer> buffer;
int result = requestBuffer(bufferIdx, &buffer);//调用子类的requestBuffer函数,获取到buffer
reply->writeInt32(buffer != 0);
if (buffer != 0) {
reply->write(*buffer);//将buffer传出
}
reply->writeInt32(result);
return NO_ERROR;
而其子类也就是MonitoredProducer类,主要是调用了mProducer的方法,这个在MonitoredProducer对象创建的时候传入的,是BufferQueueProducer类。
status_t MonitoredProducer::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
return mProducer->requestBuffer(slot, buf);
}
新建一个GraphicBuffer
BufferQueueProducer类的requestBuffer类从mSlots中获取buffer传出,这个之前我们也分析过,mSlots中的buffer是在dequeueBuffer函数中申请的。
status_t BufferQueueProducer::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
ATRACE_CALL();
BQ_LOGV("requestBuffer: slot %d", slot);
Mutex::Autolock lock(mCore->mMutex);
if (mCore->mIsAbandoned) {
BQ_LOGE("requestBuffer: BufferQueue has been abandoned");
return NO_INIT;
}
if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
BQ_LOGE("requestBuffer: slot index %d out of range [0, %d)",
slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
return BAD_VALUE;
} else if (mSlots[slot].mBufferState != BufferSlot::DEQUEUED) {
BQ_LOGE("requestBuffer: slot %d is not owned by the producer "
"(state = %d)", slot, mSlots[slot].mBufferState);
return BAD_VALUE;
}
mSlots[slot].mRequestBufferCalled = true;
*buf = mSlots[slot].mGraphicBuffer;//从mSlots获取buffer
return NO_ERROR;
}
这里之前分析过了,我们就简单分析下,在dequeueBuffer函数中,如果flags是BUFFER_NEEDS_REALLOCATION就需要重新分配GraphicBuffer,是通过mCore->mAllocator->createGraphicBuffer来分配的。
......
if (returnFlags & BUFFER_NEEDS_REALLOCATION) {
status_t error;
BQ_LOGV("dequeueBuffer: allocating a new buffer for slot %d", *outSlot);
sp<GraphicBuffer> graphicBuffer(mCore->mAllocator->createGraphicBuffer(
width, height, format, usage, &error));
......
GraphicBufferAlloc::createGraphicBuffer函数如下。
sp<GraphicBuffer> GraphicBufferAlloc::createGraphicBuffer(uint32_t width,
uint32_t height, PixelFormat format, uint32_t usage, status_t* error) {
sp<GraphicBuffer> graphicBuffer(
new GraphicBuffer(width, height, format, usage));
status_t err = graphicBuffer->initCheck();
*error = err;
if (err != 0 || graphicBuffer->handle == 0) {
if (err == NO_MEMORY) {
GraphicBuffer::dumpAllocationsToSystemLog();
}
ALOGE("GraphicBufferAlloc::createGraphicBuffer(w=%d, h=%d) "
"failed (%s), handle=%p",
width, height, strerror(-err), graphicBuffer->handle);
return 0;
}
return graphicBuffer;
}
然后我们再来看看GraphicBuffer的构造函数,主要是初始化了各个变量,然后调用了GraphicBuffer的initSize函数。
GraphicBuffer::GraphicBuffer(uint32_t inWidth, uint32_t inHeight,
PixelFormat inFormat, uint32_t inUsage)
: BASE(), mOwner(ownData), mBufferMapper(GraphicBufferMapper::get()),
mInitCheck(NO_ERROR), mId(getUniqueId())
{
width
=
height =
stride =
format =
usage
= 0;
handle = NULL;
mInitCheck = initSize(inWidth, inHeight, inFormat, inUsage);
}
我们来看看这个函数主要是通过GraphicBufferAllocator的alloc来分配buffer
status_t GraphicBuffer::initSize(uint32_t inWidth, uint32_t inHeight,
PixelFormat inFormat, uint32_t inUsage)
{
GraphicBufferAllocator& allocator = GraphicBufferAllocator::get();
uint32_t outStride = 0;
status_t err = allocator.alloc(inWidth, inHeight, inFormat, inUsage,
&handle, &outStride);
if (err == NO_ERROR) {
width = static_cast<int>(inWidth);
height = static_cast<int>(inHeight);
format = inFormat;
usage = static_cast<int>(inUsage);
stride = static_cast<int>(outStride);
}
return err;
}
HAL层alloc函数分配buffer
GraphicBufferAllocator的构造函数中获取hal层模块。
GraphicBufferAllocator::GraphicBufferAllocator()
: mAllocDev(0)
{
hw_module_t const* module;
int err = hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &module);
if (err == 0) {
gralloc_open(module, &mAllocDev);
}
}
然后在alloc中分配内存。
status_t GraphicBufferAllocator::alloc(uint32_t width, uint32_t height,
PixelFormat format, uint32_t usage, buffer_handle_t* handle,
uint32_t* stride)
{
ATRACE_CALL();
// make sure to not allocate a N x 0 or 0 x N buffer, since this is
// allowed from an API stand-point allocate a 1x1 buffer instead.
if (!width || !height)
width = height = 1;
// we have a h/w allocator and h/w buffer is requested
status_t err;
// Filter out any usage bits that should not be passed to the gralloc module
usage &= GRALLOC_USAGE_ALLOC_MASK;
int outStride = 0;
err = mAllocDev->alloc(mAllocDev, static_cast<int>(width),
static_cast<int>(height), format, static_cast<int>(usage), handle,
&outStride);
*stride = static_cast<uint32_t>(outStride);
if (err == NO_ERROR) {
Mutex::Autolock _l(sLock);
KeyedVector<buffer_handle_t, alloc_rec_t>& list(sAllocList);
uint32_t bpp = bytesPerPixel(format);
alloc_rec_t rec;
rec.width = width;
rec.height = height;
rec.stride = *stride;
rec.format = format;
rec.usage = usage;
rec.size = static_cast<size_t>(height * (*stride) * bpp);
list.add(*handle, rec);
}
return err;
}
分配的HAL层我们之前分析过了,是在alloc_device.cpp中调用alloc_device_alloc函数,这里buffer_handle_t类型就是native_handle_t类型。
static int alloc_device_alloc(alloc_device_t* dev, int w, int h, int format, int usage, buffer_handle_t* pHandle, int* pStride)
我们来看下native_handle_t类型,numFds就是data[0]这个数组中有几个是代表fd,numInts是代表data[0]有几个是代表int字段的。
typedef struct native_handle
{
int version;
/* sizeof(native_handle_t) */
int numFds;
/* number of file-descriptors at &data[0] */
int numInts;
/* number of ints at &data[numFds] */
int data[0];
/* numFds + numInts ints */
} native_handle_t;
而我们在alloc_devcei_alloc,实际分配的时候是新建的private_handle_t,也就是说最后我们分配的全在这个变量中,因为native_handle是其第一个参数,所有通过指针这两个对象都是可以强制转的。
#ifdef __cplusplus
struct private_handle_t : public native_handle {
#else
struct private_handle_t {
struct native_handle nativeHandle;
#endif
enum {
PRIV_FLAGS_FRAMEBUFFER = 0x00000001
};
// file-descriptors
int
fd;
// ints
int
magic;
int
flags;
int
size;
int
offset;
// FIXME: the attributes below should be out-of-line
uint64_t base __attribute__((aligned(8)));
int
pid;
#ifdef __cplusplus
static inline int sNumInts() {
return (((sizeof(private_handle_t) - sizeof(native_handle_t))/sizeof(int)) - sNumFds);
}
static const int sNumFds = 1;
static const int sMagic = 0x3141592;
private_handle_t(int fd, int size, int flags) :
fd(fd), magic(sMagic), flags(flags), size(size), offset(0),
base(0), pid(getpid())
{
version = sizeof(native_handle);
numInts = sNumInts();
numFds = sNumFds;
}
~private_handle_t() {
magic = 0;
}
static int validate(const native_handle* h) {
const private_handle_t* hnd = (const private_handle_t*)h;
if (!h || h->version != sizeof(native_handle) ||
h->numInts != sNumInts() || h->numFds != sNumFds ||
hnd->magic != sMagic)
{
ALOGE("invalid gralloc handle (at %p)", h);
return -EINVAL;
}
return 0;
}
#endif
};
GraphicBuffer序列化传给用户进程
分配好了GraphicBuffer之后,就是通过Binder传给用户进程。
status_t BnGraphicBufferProducer::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
case REQUEST_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int bufferIdx
= data.readInt32();
sp<GraphicBuffer> buffer;
int result = requestBuffer(bufferIdx, &buffer);
reply->writeInt32(buffer != 0);
if (buffer != 0) {
reply->write(*buffer);
}
reply->writeInt32(result);
return NO_ERROR;
}
这里我们就要看GraphicBuffer的flatten接口。通过Binder传送,这个类就要实现flattenable接口。我们先来看看flatten,这可以理解为将数据序列化,然后通过binder发送。
fds是保存的各个fd,buffer是保存的数据
status_t GraphicBuffer::flatten(void*& buffer, size_t& size, int*& fds, size_t& count) const {
size_t sizeNeeded = GraphicBuffer::getFlattenedSize();
if (size < sizeNeeded) return NO_MEMORY;
size_t fdCountNeeded = GraphicBuffer::getFdCount();
if (count < fdCountNeeded) return NO_MEMORY;
int32_t* buf = static_cast<int32_t*>(buffer);
buf[0] = 'GBFR';//填充buffer
buf[1] = width;
buf[2] = height;
buf[3] = stride;
buf[4] = format;
buf[5] = usage;
buf[6] = static_cast<int32_t>(mId >> 32);
buf[7] = static_cast<int32_t>(mId & 0xFFFFFFFFull);
buf[8] = static_cast<int32_t>(mGenerationNumber);
buf[9] = 0;
buf[10] = 0;
if (handle) {
buf[9] = handle->numFds;//fd个数
buf[10] = handle->numInts;//int字段个数
memcpy(fds, handle->data,//将handle中关于fd复制fds中
static_cast<size_t>(handle->numFds) * sizeof(int));
memcpy(&buf[11], handle->data + handle->numFds,//handle中关于int字段复制到buf[11]开始的地址
static_cast<size_t>(handle->numInts) * sizeof(int));
}
buffer = static_cast<void*>(static_cast<uint8_t*>(buffer) + sizeNeeded);
size -= sizeNeeded;
if (handle) {
fds += handle->numFds;
count -= static_cast<size_t>(handle->numFds);
}
return NO_ERROR;
}
我们再来看看native_handle和private_handle_t类型,我们可以发现native_handle就是一个通用的类型。numFds代表data开始的地址有几个代表fd,numInts是Int字段的个数
也就是data开始除去fd,就是有一个Int字段的个数。
typedef struct native_handle
{
int version;
/* sizeof(native_handle_t) */
int numFds;
/* number of file-descriptors at &data[0] */
int numInts;
/* number of ints at &data[numFds] */
int data[0];
/* numFds + numInts ints */
} native_handle_t;
而这里我们真正的实现在private_handle_t中,前面data[0]开始的地址,就是从这里的fd开,然后是int字段。fd这里就一个,int字段就是从magic开始。
#ifdef __cplusplus
struct private_handle_t : public native_handle {
#else
struct private_handle_t {
struct native_handle nativeHandle;
#endif
enum {
PRIV_FLAGS_FRAMEBUFFER = 0x00000001
};
// file-descriptors 这里就是data[0]一开始的fd
int
fd;
// ints 然后是int字段
int
magic;
int
flags;
int
size;
int
offset;
// FIXME: the attributes below should be out-of-line
uint64_t base __attribute__((aligned(8)));
int
pid;
我们再来看看private_handle_t的构造函数,numFds就是1,numInts就是这个构造函数除了 numFds剩下的都是numInts的字节数。因为private_handle_t是继承native_handle_t的,所有这里的numFds和numInts就是代表native_handle_t中的。
static inline int sNumInts() {
return (((sizeof(private_handle_t) - sizeof(native_handle_t))/sizeof(int)) - sNumFds);
}
static const int sNumFds = 1;
static const int sMagic = 0x3141592;
private_handle_t(int fd, int size, int flags) :
fd(fd), magic(sMagic), flags(flags), size(size), offset(0),
base(0), pid(getpid())
{
version = sizeof(native_handle);
numInts = sNumInts();
numFds = sNumFds;
}
用户进程获取GraphicBuffer反序列化
用户进程获取到GraphicBuffer的序列化的数据后还要反序列化,然后才能获取GraphicBuffer对象。
virtual status_t requestBuffer(int bufferIdx, sp<GraphicBuffer>* buf) {
Parcel data, reply;
data.writeInterfaceToken(IGraphicBufferProducer::getInterfaceDescriptor());
data.writeInt32(bufferIdx);
status_t result =remote()->transact(REQUEST_BUFFER, data, &reply);
if (result != NO_ERROR) {
return result;
}
bool nonNull = reply.readInt32();//获取到数据
if (nonNull) {
*buf = new GraphicBuffer();//新建GraphicBuffer
result = reply.read(**buf);
if(result != NO_ERROR) {
(*buf).clear();
return result;
}
}
result = reply.readInt32();
return result;
}
反序列化,从unflatten函数和flatten正好相反,从buffer和fds中获取数据保存在GraphicBuffer的成员变量中。
status_t GraphicBuffer::unflatten(
void const*& buffer, size_t& size, int const*& fds, size_t& count) {
if (size < 11 * sizeof(int)) return NO_MEMORY;
int const* buf = static_cast<int const*>(buffer);
if (buf[0] != 'GBFR') return BAD_TYPE;
const size_t numFds
= static_cast<size_t>(buf[9]);
const size_t numInts = static_cast<size_t>(buf[10]);
const size_t maxNumber = 4096;
if (numFds >= maxNumber || numInts >= (maxNumber - 11)) {
width = height = stride = format = usage = 0;
handle = NULL;
ALOGE("unflatten: numFds or numInts is too large: %zd, %zd",
numFds, numInts);
return BAD_VALUE;
}
const size_t sizeNeeded = (11 + numInts) * sizeof(int);
if (size < sizeNeeded) return NO_MEMORY;
size_t fdCountNeeded = numFds;
if (count < fdCountNeeded) return NO_MEMORY;
if (handle) {
// free previous handle if any
free_handle();
}
if (numFds || numInts) {
width
= buf[1];//保存数据
height = buf[2];
stride = buf[3];
format = buf[4];
usage
= buf[5];
native_handle* h = native_handle_create(//新建一个native_handle对象
static_cast<int>(numFds), static_cast<int>(numInts));
if (!h) {
width = height = stride = format = usage = 0;
handle = NULL;
ALOGE("unflatten: native_handle_create failed");
return NO_MEMORY;
}
memcpy(h->data, fds, numFds * sizeof(int));//从fds中复制数据到handle
memcpy(h->data + numFds, &buf[11], numInts * sizeof(int));//总buf[11]开始的地址复制数据到handle
handle = h;
} else {
width = height = stride = format = usage = 0;
handle = NULL;
}
mId = static_cast<uint64_t>(buf[6]) << 32;
mId |= static_cast<uint32_t>(buf[7]);
mGenerationNumber = static_cast<uint32_t>(buf[8]);
mOwner = ownHandle;
if (handle != 0) {
status_t err = mBufferMapper.registerBuffer(handle);//
if (err != NO_ERROR) {
width = height = stride = format = usage = 0;
handle = NULL;
ALOGE("unflatten: registerBuffer failed: %s (%d)",
strerror(-err), err);
return err;
}
}
buffer = static_cast<void const*>(static_cast<uint8_t const*>(buffer) + sizeNeeded);
size -= sizeNeeded;
fds += numFds;
count -= numFds;
return NO_ERROR;
}
最后还会在unFlatten函数中调用mBufferMapper.registerBuffer
status_t GraphicBufferMapper::registerBuffer(buffer_handle_t handle)
{
ATRACE_CALL();
status_t err;
err = mAllocMod->registerBuffer(mAllocMod, handle);
ALOGW_IF(err, "registerBuffer(%p) failed %d (%s)",
handle, err, strerror(-err));
return err;
}
最后会通过HAL层模块调用registerBuffer函数,也是Gralloc模块。这里会使用mmap进行共享内存。
最后
以上就是不安手链为你收集整理的Android6.0 显示系统GraphicBuffer分配内存的全部内容,希望文章能够帮你解决Android6.0 显示系统GraphicBuffer分配内存所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
发表评论 取消回复