Video: use libswscale to handle video format conversion
Jeffrey Pfau jeffrey@endrift.com
Tue, 28 Oct 2014 02:17:18 -0700
3 files changed,
30 insertions(+),
32 deletions(-)
M
CMakeLists.txt
→
CMakeLists.txt
@@ -82,7 +82,7 @@ add_definitions(-DBINARY_NAME="${BINARY_NAME}" -DPROJECT_NAME="${PROJECT_NAME}" -DPROJECT_VERSION="${LIB_VERSION_STRING}")
# Feature dependencies find_feature(USE_CLI_DEBUGGER "libedit") -find_feature(USE_FFMPEG "libavcodec;libavformat;libavresample;libavutil") +find_feature(USE_FFMPEG "libavcodec;libavformat;libavresample;libavutil;libswscale") find_feature(USE_PNG "ZLIB;PNG") find_feature(USE_LIBZIP "libzip")@@ -135,10 +135,10 @@ source_group("ARM debugger" FILES ${DEBUGGER_SRC})
if(USE_FFMPEG) add_definitions(-DUSE_FFMPEG) - include_directories(AFTER ${LIBAVCODEC_INCLUDE_DIRS} ${LIBAVFORMAT_INCLUDE_DIRS} ${LIBAVRESAMPLE_INCLUDE_DIRS} ${LIBAVUTIL_INCLUDE_DIRS}) - link_directories(${LIBAVCODEC_LIBRARY_DIRS} ${LIBAVFORMAT_LIBRARY_DIRS} ${LIBAVRESAMPLE_LIBRARY_DIRS} ${LIBAVUTIL_LIBRARY_DIRS}) + include_directories(AFTER ${LIBAVCODEC_INCLUDE_DIRS} ${LIBAVFORMAT_INCLUDE_DIRS} ${LIBAVRESAMPLE_INCLUDE_DIRS} ${LIBAVUTIL_INCLUDE_DIRS} ${LIBSWSCALE_INCLUDE_DIRS}) + link_directories(${LIBAVCODEC_LIBRARY_DIRS} ${LIBAVFORMAT_LIBRARY_DIRS} ${LIBAVRESAMPLE_LIBRARY_DIRS} ${LIBAVUTIL_LIBRARY_DIRS} ${LIBSWSCALE_LIBRARY_DIRS}) list(APPEND UTIL_SRC "${CMAKE_SOURCE_DIR}/src/platform/ffmpeg/ffmpeg-encoder.c") - list(APPEND DEPENDENCY_LIB ${LIBAVCODEC_LIBRARIES} ${LIBAVFORMAT_LIBRARIES} ${LIBAVRESAMPLE_LIBRARIES} ${LIBAVUTIL_LIBRARIES}) + list(APPEND DEPENDENCY_LIB ${LIBAVCODEC_LIBRARIES} ${LIBAVFORMAT_LIBRARIES} ${LIBAVRESAMPLE_LIBRARIES} ${LIBAVUTIL_LIBRARIES} ${LIBSWSCALE_LIBRARIES}) endif() if(USE_PNG)
M
src/platform/ffmpeg/ffmpeg-encoder.c
→
src/platform/ffmpeg/ffmpeg-encoder.c
@@ -5,6 +5,9 @@
#include <libavutil/imgutils.h> #include <libavutil/opt.h> +#include <libavresample/avresample.h> +#include <libswscale/swscale.h> + static void _ffmpegPostVideoFrame(struct GBAAVStream*, struct GBAVideoRenderer* renderer); static void _ffmpegPostAudioFrame(struct GBAAVStream*, int32_t left, int32_t right);@@ -91,11 +94,19 @@ static const struct {
enum AVPixelFormat format; int priority; } priorities[] = { - { AV_PIX_FMT_RGB24, 0 }, - { AV_PIX_FMT_BGR0, 1 }, - { AV_PIX_FMT_YUV422P, 2 }, - { AV_PIX_FMT_YUV444P, 3 }, - { AV_PIX_FMT_YUV420P, 4 } + { AV_PIX_FMT_RGB555, 0 }, + { AV_PIX_FMT_BGR555, 0 }, + { AV_PIX_FMT_RGB565, 1 }, + { AV_PIX_FMT_BGR565, 1 }, + { AV_PIX_FMT_RGB24, 2 }, + { AV_PIX_FMT_BGR24, 2 }, + { AV_PIX_FMT_BGR0, 3 }, + { AV_PIX_FMT_RGB0, 3 }, + { AV_PIX_FMT_0BGR, 3 }, + { AV_PIX_FMT_0RGB, 3 }, + { AV_PIX_FMT_YUV422P, 4 }, + { AV_PIX_FMT_YUV444P, 5 }, + { AV_PIX_FMT_YUV420P, 6 } }; AVCodec* codec = avcodec_find_encoder_by_name(vcodec); if (!codec) {@@ -202,6 +213,9 @@ encoder->videoFrame->format = encoder->video->pix_fmt;
encoder->videoFrame->width = encoder->video->width; encoder->videoFrame->height = encoder->video->height; encoder->videoFrame->pts = 0; + encoder->scaleContext = sws_getContext(VIDEO_HORIZONTAL_PIXELS, VIDEO_VERTICAL_PIXELS, AV_PIX_FMT_0BGR32, + VIDEO_HORIZONTAL_PIXELS, VIDEO_VERTICAL_PIXELS, encoder->video->pix_fmt, + 0, 0, 0, 0); av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32); if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {@@ -236,6 +250,8 @@ if (encoder->resampleContext) {
avresample_close(encoder->resampleContext); } + sws_freeContext(encoder->scaleContext); + avformat_free_context(encoder->context); encoder->context = 0;@@ -294,9 +310,10 @@ struct FFmpegEncoder* encoder = (struct FFmpegEncoder*) stream;
if (!encoder->context) { return; } - uint32_t* pixels; + uint8_t* pixels; unsigned stride; renderer->getPixels(renderer, &stride, (void**) &pixels); + stride *= 4; AVPacket packet;@@ -307,26 +324,7 @@ av_frame_make_writable(encoder->videoFrame);
encoder->videoFrame->pts = av_rescale_q(encoder->currentVideoFrame, encoder->video->time_base, encoder->videoStream->time_base); ++encoder->currentVideoFrame; - unsigned x, y; - if (encoder->videoFrame->format == AV_PIX_FMT_BGR0) { - for (y = 0; y < VIDEO_VERTICAL_PIXELS; ++y) { - for (x = 0; x < VIDEO_HORIZONTAL_PIXELS; ++x) { - uint32_t pixel = pixels[stride * y + x]; - encoder->videoFrame->data[0][y * encoder->videoFrame->linesize[0] + x * 4] = pixel >> 16; - encoder->videoFrame->data[0][y * encoder->videoFrame->linesize[0] + x * 4 + 1] = pixel >> 8; - encoder->videoFrame->data[0][y * encoder->videoFrame->linesize[0] + x * 4 + 2] = pixel; - } - } - } else if (encoder->videoFrame->format == AV_PIX_FMT_RGB24) { - for (y = 0; y < VIDEO_VERTICAL_PIXELS; ++y) { - for (x = 0; x < VIDEO_HORIZONTAL_PIXELS; ++x) { - uint32_t pixel = pixels[stride * y + x]; - encoder->videoFrame->data[0][y * encoder->videoFrame->linesize[0] + x * 3] = pixel; - encoder->videoFrame->data[0][y * encoder->videoFrame->linesize[0] + x * 3 + 1] = pixel >> 8; - encoder->videoFrame->data[0][y * encoder->videoFrame->linesize[0] + x * 3 + 2] = pixel >> 16; - } - } - } + sws_scale(encoder->scaleContext, &pixels, &stride, 0, VIDEO_VERTICAL_PIXELS, encoder->videoFrame->data, encoder->videoFrame->linesize); int gotData; avcodec_encode_video2(encoder->video, &packet, encoder->videoFrame, &gotData);
M
src/platform/ffmpeg/ffmpeg-encoder.h
→
src/platform/ffmpeg/ffmpeg-encoder.h
@@ -5,7 +5,6 @@ #include "gba-thread.h"
#include <libavcodec/avcodec.h> #include <libavformat/avformat.h> -#include <libavresample/avresample.h> struct FFmpegEncoder { struct GBAAVStream d;@@ -30,13 +29,14 @@ AVFrame* audioFrame;
size_t currentAudioSample; int64_t currentAudioFrame; int64_t nextAudioPts; - AVAudioResampleContext* resampleContext; + struct AVAudioResampleContext* resampleContext; AVStream* audioStream; AVCodecContext* video; enum AVPixelFormat pixFormat; AVFrame* videoFrame; int64_t currentVideoFrame; + struct SwsContext* scaleContext; AVStream* videoStream; };