Android绘制系统(3): SurfaceTexture - clarkehe/Android GitHub Wiki
SurfaceTexture是绘制系统中另一个比较重要的概念。前面我们说过Surface,本质就是一个帧数据生产者(有一个bufferProducer的引用),是OpenGL窗口的一个实现(从ANativeWindow继承)。SurfaceTexture相反,我们可以理解为帧数据的一个消费者。
SurfaceTexture中有一个单词Texture(材质,也叫纹理)。什么是材质?可能做过游戏开发的就有概念。之前没做过游戏,只是听说过,要说清楚什么是材质,不知该怎么说。我们先从代码看起。
Android里可以通过OpenGL接口创建一个材质,返回就是材质ID。
int[] textureHandles = new int[1];
int textureHandle;
//CTX有一个材质管理器,材质ID就是生成一个唯一ID
//http://androidxref.com/5.0.0_r2/xref/frameworks/native/opengl/libagl/texture.cpp#891
GLES20.glGenTextures(1, textureHandles, 0);
textureHandle = textureHandles[0];
GlUtil.checkGlError("glGenTextures");
接着,我们要绑定材质。
//http://androidxref.com/5.0.0_r2/xref/frameworks/native/opengl/libagl/texture.cpp#865
//http://androidxref.com/5.0.0_r2/xref/frameworks/native/opengl/libagl/TextureObjectManager.cpp#252
// Bind the texture handle to the 2D texture target.
// 通过ID, new EGLTextureObject, 并保存, 同时设为当前active材质
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, textureHandle);
设置材质的一些属性,这个不太懂,可以先不理。
// Configure min/mag filtering, i.e. what scaling method do we use if what we're rendering
// is smaller or larger than the source image.
GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_LINEAR);
GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_LINEAR);
GlUtil.checkGlError("loadImageTexture");
最后,将位图关联到材质(也可以不关联位图,不关联的话,就只有EGLTextureObject对象,但EGLTextureObject不持有图像数据)。
//会自动关联到上面Bind的材质(就是当前的活动材质),材质会对应一个EGLTextureObject对象, 这个对象通过GGLSurface会分配空间。
//这里是把bitmap的像素点,复制到了材质的缓存。材质可能理解为OpenGL环境的中一幅位图。
// Load the data from bitmap into the texture handle.
GLUtils.texImage2D(GLES20.GL_TEXTURE_2D, 0, bmp, 0);
GlUtil.checkGlError("loadImageTexture");
重点来了,textImage2D会调用
native private static int native_texImage2D(int target, int level, int internalformat,
Bitmap bitmap, int type, int border)方法, 参数为:
target = GLES20.GL_TEXTURE_2D
level = 0
internalformat = -1
bitmap = 传入的bitmap
type = -1
border = 0
native_texImage2D的的实现函数是
static jint util_texImage2D(JNIEnv *env, jclass clazz,
jint target, jint level, jint internalformat,
jobject jbitmap, jint type, jint border)
{
SkBitmap const * nativeBitmap =
(SkBitmap const *)env->GetLongField(jbitmap, nativeBitmapID);
const SkBitmap& bitmap(*nativeBitmap);
SkColorType colorType = bitmap.colorType();
if (internalformat < 0) {
internalformat = getInternalFormat(colorType);
}
if (type < 0) {
type = getType(colorType);
}
int err = checkFormat(colorType, internalformat, type);
if (err)
return err;
bitmap.lockPixels();
const int w = bitmap.width();
const int h = bitmap.height();
const void* p = bitmap.getPixels();
if (internalformat == GL_PALETTE8_RGBA8_OES) {
if (sizeof(SkPMColor) != sizeof(uint32_t)) {
err = -1;
goto error;
}
const size_t size = bitmap.getSize();
const size_t palette_size = 256*sizeof(SkPMColor);
const size_t imageSize = size + palette_size;
void* const data = malloc(imageSize);
if (data) {
void* const pixels = (char*)data + palette_size;
SkColorTable* ctable = bitmap.getColorTable();
memcpy(data, ctable->lockColors(), ctable->count() * sizeof(SkPMColor));
memcpy(pixels, p, size);
ctable->unlockColors();
glCompressedTexImage2D(target, level, internalformat, w, h, border, imageSize, data);
free(data);
} else {
err = -1;
}
} else {
glTexImage2D(target, level, internalformat, w, h, border, internalformat, type, p);
}
error:
bitmap.unlockPixels();
return err;
}
internalformat及type小于0时,值根据当前bitmap进行获取,并获取了bitmap的宽高及像素指针,接着根据internalformat值,有两个分支;internalformat的值不为GL_PALETTE8_RGBA8_OES(一种压缩的格式)时,会调用glTexImage2D。
void glTexImage2D(
GLenum target, GLint level, GLint internalformat,
GLsizei width, GLsizei height, GLint border,
GLenum format, GLenum type, const GLvoid *pixels)
{
ogles_context_t* c = ogles_context_t::get();
if (target != GL_TEXTURE_2D) {
ogles_error(c, GL_INVALID_ENUM);
return;
}
if (width<0 || height<0 || border!=0 || level < 0) {
ogles_error(c, GL_INVALID_VALUE);
return;
}
if (format != (GLenum)internalformat) {
ogles_error(c, GL_INVALID_OPERATION);
return;
}
if (validFormatType(c, format, type)) {
return;
}
int32_t size = 0;
GGLSurface* surface = 0;
int error = createTextureSurface(c, &surface, &size,
level, format, type, width, height);
if (error) {
ogles_error(c, error);
return;
}
if (pixels) {
const int32_t formatIdx = convertGLPixelFormat(format, type);
const GGLFormat& pixelFormat(c->rasterizer.formats[formatIdx]);
const int32_t align = c->textures.unpackAlignment-1;
const int32_t bpr = ((width * pixelFormat.size) + align) & ~align;
const size_t size = bpr * height;
const int32_t stride = bpr / pixelFormat.size;
GGLSurface userSurface;
userSurface.version = sizeof(userSurface);
userSurface.width = width;
userSurface.height = height;
userSurface.stride = stride;
userSurface.format = formatIdx;
userSurface.compressedFormat = 0;
userSurface.data = (GLubyte*)pixels;
int err = copyPixels(c, *surface, 0, 0, userSurface, 0, 0, width, height);
if (err) {
ogles_error(c, err);
return;
}
generateMipmap(c, level);
}
}
大致看代码,做了三步操作,第一步是调用createTextureSurface创建了GGLSurface(surface),第二步用bitmap的宽、高、像素指针信息临时构造了一个GGLSurface(userSurface),第三步将userSurface中的像素点复制到surface。
int createTextureSurface(ogles_context_t* c,
GGLSurface** outSurface, int32_t* outSize, GLint level,
GLenum format, GLenum type, GLsizei width, GLsizei height,
GLenum compressedFormat = 0)
{
// find out which texture is bound to the current unit
const int active = c->textures.active;
const GLuint name = c->textures.tmu[active].name;
// convert the pixelformat to one we can handle
const int32_t formatIdx = convertGLPixelFormat(format, type);
if (formatIdx == 0) { // we don't know what to do with this
return GL_INVALID_OPERATION;
}
// figure out the size we need as well as the stride
const GGLFormat& pixelFormat(c->rasterizer.formats[formatIdx]);
const int32_t align = c->textures.unpackAlignment-1;
const int32_t bpr = ((width * pixelFormat.size) + align) & ~align;
const size_t size = bpr * height;
const int32_t stride = bpr / pixelFormat.size;
if (level > 0) {
const int active = c->textures.active;
EGLTextureObject* tex = c->textures.tmu[active].texture;
status_t err = tex->reallocate(level,
width, height, stride, formatIdx, compressedFormat, bpr);
if (err != NO_ERROR)
return GL_OUT_OF_MEMORY;
GGLSurface& surface = tex->editMip(level);
*outSurface = &surface;
*outSize = size;
return 0;
}
sp<EGLTextureObject> tex = getAndBindActiveTextureObject(c);
status_t err = tex->reallocate(level,
width, height, stride, formatIdx, compressedFormat, bpr);
if (err != NO_ERROR)
return GL_OUT_OF_MEMORY;
tex->internalformat = format;
*outSurface = &tex->surface;
*outSize = size;
return 0;
}
在level=0时,getAndBindActiveTextureObject获取当前活动材质,就是我一开始create与bind的材质;接着调用了EGLTextureObject的reallocate方法。
status_t EGLTextureObject::reallocate(
GLint level, int w, int h, int s,
int format, int compressedFormat, int bpr)
{
const size_t size = h * bpr;
if (level == 0)
{
if (size!=mSize || !surface.data) {
if (mSize && surface.data) {
free(surface.data);
}
surface.data = (GGLubyte*)malloc(size);
if (!surface.data) {
mSize = 0;
mIsComplete = false;
return NO_MEMORY;
}
mSize = size;
}
surface.version = sizeof(GGLSurface);
surface.width = w;
surface.height = h;
surface.stride = s;
surface.format = format;
surface.compressedFormat = compressedFormat;
if (mMipmaps)
freeMipmaps();
mIsComplete = true;
}
else
{
....
}
return NO_ERROR;
}
surface是EGLTextureObject的成员变量“GGLSurface surface;”,当surface.data为NULL或重新分配大小与已经分配大小不等时,调用malloc分配的内存空间,并设置了surface的其他属性。从这里可以看出,材质ID对应一个EGLTextureObject对象,surface成员分配内存空间。
createTextureSurface函数本质就是为EGLTextureObject对象的surface分配内存,并返回这个surface。surface创建好了接着就是copyPixels。
int copyPixels(
ogles_context_t* c,
const GGLSurface& dst,
GLint xoffset, GLint yoffset,
const GGLSurface& src,
GLint x, GLint y, GLsizei w, GLsizei h)
{
if ((dst.format == src.format) &&
(dst.stride == src.stride) &&
(dst.width == src.width) &&
(dst.height == src.height) &&
(dst.stride > 0) &&
((x|y) == 0) &&
((xoffset|yoffset) == 0))
{
// this is a common case...
const GGLFormat& pixelFormat(c->rasterizer.formats[src.format]);
const size_t size = src.height * src.stride * pixelFormat.size;
memcpy(dst.data, src.data, size);
return 0;
}
// use pixel-flinger to handle all the conversions
GGLContext* ggl = getRasterizer(c);
if (!ggl) {
// the only reason this would fail is because we ran out of memory
return GL_OUT_OF_MEMORY;
}
ggl->colorBuffer(ggl, &dst);
ggl->bindTexture(ggl, &src);
ggl->texCoord2i(ggl, x-xoffset, y-yoffset);
ggl->recti(ggl, xoffset, yoffset, xoffset+w, yoffset+h);
return 0;
}
当材质宽高等属性相同时,就是内存的拷贝;当不同时,进行了转换。
通过对材质创建、绑定,设置bitmap到材质,我们可以知道,材质有唯一的ID, 对应一个EGLTextureObject,通过GGLSurface持有内存空间。这个空间是二唯的,保存着图像信息。
说完成了Texture,我们回到SurfaceTexture。
先看下JAVA层,SurfaceTexture的创建。
/**
* Construct a new SurfaceTexture to stream images to a given OpenGL texture.
*
* In single buffered mode the application is responsible for serializing access to the image
* content buffer. Each time the image content is to be updated, the
* {@link #releaseTexImage()} method must be called before the image content producer takes
* ownership of the buffer. For example, when producing image content with the NDK
* ANativeWindow_lock and ANativeWindow_unlockAndPost functions, {@link #releaseTexImage()}
* must be called before each ANativeWindow_lock, or that call will fail. When producing
* image content with OpenGL ES, {@link #releaseTexImage()} must be called before the first
* OpenGL ES function call each frame.
*
* @param texName the OpenGL texture object name (e.g. generated via glGenTextures)
* @param singleBufferMode whether the SurfaceTexture will be in single buffered mode.
*
* @throws Surface.OutOfResourcesException If the SurfaceTexture cannot be created.
*/
public SurfaceTexture(int texName, boolean singleBufferMode) {
mCreatorLooper = Looper.myLooper();
nativeInit(false, texName, singleBufferMode, new WeakReference<SurfaceTexture>(this));
}
第一个参数int texName,就是创建的材质ID,批一个是缓存模式,先不理。SurfaceTexture在JAVA层就是一个壳,真正的初始化是在Native层(源码)。
static void SurfaceTexture_init(JNIEnv* env, jobject thiz, jboolean isDetached,
jint texName, jboolean singleBufferMode, jobject weakThiz)
{
sp<IGraphicBufferProducer> producer;
sp<IGraphicBufferConsumer> consumer;
BufferQueue::createBufferQueue(&producer, &consumer);
if (singleBufferMode) {
consumer->disableAsyncBuffer();
consumer->setDefaultMaxBufferCount(1);
}
sp<GLConsumer> surfaceTexture;
if (isDetached) {
surfaceTexture = new GLConsumer(consumer, GL_TEXTURE_EXTERNAL_OES,
true, true);
} else {
surfaceTexture = new GLConsumer(consumer, texName,
GL_TEXTURE_EXTERNAL_OES, true, true);
}
if (surfaceTexture == 0) {
jniThrowException(env, OutOfResourcesException,
"Unable to create native SurfaceTexture");
return;
}
SurfaceTexture_setSurfaceTexture(env, thiz, surfaceTexture);
SurfaceTexture_setProducer(env, thiz, producer);
jclass clazz = env->GetObjectClass(thiz);
if (clazz == NULL) {
jniThrowRuntimeException(env,
"Can't find android/graphics/SurfaceTexture");
return;
}
sp<JNISurfaceTextureContext> ctx(new JNISurfaceTextureContext(env, weakThiz,
clazz));
surfaceTexture->setFrameAvailableListener(ctx);
SurfaceTexture_setFrameAvailableListener(env, thiz, ctx);
}
又看到了producer与consumer,前面说过Surface是生成者,SurfaceTexture是消费者,这里就可以证明。SurfaceTexture在Native层的实现就是GLConsumer,isDetached=false时,GLConsumer引用了consumer及材质ID texName。
接下来分析下GLConsumer的功能,及如何使用consumer与texName的。
SurfaceTexture在java层有一个重要的接口:
/**
* Update the texture image to the most recent frame from the image stream. This may only be
* called while the OpenGL ES context that owns the texture is current on the calling thread.
* It will implicitly bind its texture to the GL_TEXTURE_EXTERNAL_OES texture target.
*/
public void updateTexImage() {
nativeUpdateTexImage();
}
这个接口的作用就是从image stream(也就是作为producer的Surface)取出最近的帧来更新材质图像(就是构造中传给SurfaceTexture的texName)。
这个接口最终在Native的实现的,这就到了C++层的GLConsumer。updateTexImage最后会调用GLConsumer的updateTexImage()方法(源码)。
status_t GLConsumer::updateTexImage() {
ATRACE_CALL();
ST_LOGV("updateTexImage");
Mutex::Autolock lock(mMutex);
if (mAbandoned) {
ST_LOGE("updateTexImage: GLConsumer is abandoned!");
return NO_INIT;
}
// Make sure the EGL state is the same as in previous calls.
status_t err = checkAndUpdateEglStateLocked();
if (err != NO_ERROR) {
return err;
}
BufferQueue::BufferItem item;
// Acquire the next buffer.
// In asynchronous mode the list is guaranteed to be one buffer
// deep, while in synchronous mode we use the oldest buffer.
err = acquireBufferLocked(&item, 0);
if (err != NO_ERROR) {
if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
// We always bind the texture even if we don't update its contents.
ST_LOGV("updateTexImage: no buffers were available");
glBindTexture(mTexTarget, mTexName);
err = NO_ERROR;
} else {
ST_LOGE("updateTexImage: acquire failed: %s (%d)",
strerror(-err), err);
}
return err;
}
// Release the previous buffer.
err = updateAndReleaseLocked(item);
if (err != NO_ERROR) {
// We always bind the texture.
glBindTexture(mTexTarget, mTexName);
return err;
}
// Bind the new buffer to the GL texture, and wait until it's ready.
return bindTextureImageLocked();
}
updateTexImage大概分三步,首先获取最近帧的buffer,接着释放上次获取的buffer,最后一步最关键将buffer绑定到材质。
status_t GLConsumer::bindTextureImageLocked() {
if (mEglDisplay == EGL_NO_DISPLAY) {
ALOGE("bindTextureImage: invalid display");
return INVALID_OPERATION;
}
GLint error;
while ((error = glGetError()) != GL_NO_ERROR) {
ST_LOGW("bindTextureImage: clearing GL error: %#04x", error);
}
glBindTexture(mTexTarget, mTexName);
if (mCurrentTexture == BufferQueue::INVALID_BUFFER_SLOT &&
mCurrentTextureImage == NULL) {
ST_LOGE("bindTextureImage: no currently-bound texture");
return NO_INIT;
}
status_t err = mCurrentTextureImage->createIfNeeded(mEglDisplay,
mCurrentCrop);
if (err != NO_ERROR) {
ST_LOGW("bindTextureImage: can't create image on display=%p slot=%d",
mEglDisplay, mCurrentTexture);
return UNKNOWN_ERROR;
}
mCurrentTextureImage->bindToTextureTarget(mTexTarget);
...
// Wait for the new buffer to be ready.
return doGLFenceWaitLocked();
}
bindTextureImageLocked逻辑也很简单,glBindTexture生成texName对应的EGLTextureObject对象,接着通过工具类EglImage变量mCurrentTextureImage实现最终的绑定。
补充说下的是,acquireBufferLocked获取的是帧图像的缓存,这个缓存被mCurrentTextureImage(EglImage)引用,生成了EGLImageKHR mEglImage,最终的绑定是mEglImage与材质之间的绑定。可看下面的代码,就可理清有关系。
status_t GLConsumer::acquireBufferLocked(BufferQueue::BufferItem *item,
nsecs_t presentWhen) {
status_t err = ConsumerBase::acquireBufferLocked(item, presentWhen);
if (err != NO_ERROR) {
return err;
}
// If item->mGraphicBuffer is not null, this buffer has not been acquired
// before, so any prior EglImage created is using a stale buffer. This
// replaces any old EglImage with a new one (using the new buffer).
if (item->mGraphicBuffer != NULL) {
int slot = item->mBuf;
mEglSlots[slot].mEglImage = new EglImage(item->mGraphicBuffer);
}
return NO_ERROR;
}
status_t GLConsumer::updateAndReleaseLocked(const BufferQueue::BufferItem& item)
{
...
// Update the GLConsumer state.
mCurrentTexture = buf;
mCurrentTextureImage = mEglSlots[buf].mEglImage;
...
return err;
}
GLConsumer::EglImage::EglImage(sp<GraphicBuffer> graphicBuffer) :
mGraphicBuffer(graphicBuffer),
mEglImage(EGL_NO_IMAGE_KHR),
mEglDisplay(EGL_NO_DISPLAY) {
}
status_t GLConsumer::EglImage::createIfNeeded(EGLDisplay eglDisplay,
const Rect& cropRect,
bool forceCreation) {
...
// If there's no image, create one.
if (mEglImage == EGL_NO_IMAGE_KHR) {
mEglDisplay = eglDisplay;
mCropRect = cropRect;
mEglImage = createImage(mEglDisplay, mGraphicBuffer, mCropRect);
}
...
return OK;
}
EGLImageKHR GLConsumer::EglImage::createImage(EGLDisplay dpy,
const sp<GraphicBuffer>& graphicBuffer, const Rect& crop) {
EGLClientBuffer cbuf = (EGLClientBuffer)graphicBuffer->getNativeBuffer();
EGLint attrs[] = {
EGL_IMAGE_PRESERVED_KHR, EGL_TRUE,
EGL_IMAGE_CROP_LEFT_ANDROID, crop.left,
EGL_IMAGE_CROP_TOP_ANDROID, crop.top,
EGL_IMAGE_CROP_RIGHT_ANDROID, crop.right,
EGL_IMAGE_CROP_BOTTOM_ANDROID, crop.bottom,
EGL_NONE,
};
if (!crop.isValid()) {
// No crop rect to set, so terminate the attrib array before the crop.
attrs[2] = EGL_NONE;
} else if (!isEglImageCroppable(crop)) {
// The crop rect is not at the origin, so we can't set the crop on the
// EGLImage because that's not allowed by the EGL_ANDROID_image_crop
// extension. In the future we can add a layered extension that
// removes this restriction if there is hardware that can support it.
attrs[2] = EGL_NONE;
}
EGLImageKHR image = eglCreateImageKHR(dpy, EGL_NO_CONTEXT,
EGL_NATIVE_BUFFER_ANDROID, cbuf, attrs);
if (image == EGL_NO_IMAGE_KHR) {
EGLint error = eglGetError();
ALOGE("error creating EGLImage: %#x", error);
}
return image;
}
void GLConsumer::EglImage::bindToTextureTarget(uint32_t texTarget) {
glEGLImageTargetTexture2DOES(texTarget, (GLeglImageOES)mEglImage);
}
这样整个SurfaceTexture的逻辑就清楚了,本质就是GLConsumer,引用着一个材质texName,从生产者Surface中获取最新帧缓存,并绑定到TexName。