--- /dev/null
+From a1960a00ad4c6f67917f5f4bfcbf739634958dd6 Mon Sep 17 00:00:00 2001
+From: Marten Richter <marten.richter@freenet.de>
+Date: Sun, 19 Aug 2012 13:58:58 +0200
+Subject: [PATCH] Add transcoding Mpeg2 to Mpeg4 code initial revision
+
+---
+ libavcodec/Makefile | 2 +
+ libavcodec/allcodecs.c | 1 +
+ libavcodec/avcodec.h | 5 +
+ libavcodec/mpeg12.c | 82 +++-
+ libavcodec/mpeg4video.h | 5 +
+ libavcodec/mpeg4videodec.c | 2 +-
+ libavcodec/mpeg4videoenc.c | 32 +-
+ libavcodec/mpegvideo.c | 16 +-
+ libavcodec/mpegvideo.h | 6 +
+ libavcodec/mpegvideo_enc.c | 8 +-
+ libavcodec/mpegvideo_transcode.c | 888 ++++++++++++++++++++++++++++++++++++++
+ libavcodec/transcode.h | 69 +++
+ libavcodec/transcode_internal.h | 34 ++
+ libavutil/pixfmt.h | 2 +
+ 14 files changed, 1138 insertions(+), 14 deletions(-)
+ create mode 100644 libavcodec/mpegvideo_transcode.c
+ create mode 100644 libavcodec/transcode.h
+ create mode 100644 libavcodec/transcode_internal.h
+
+diff --git a/libavcodec/Makefile b/libavcodec/Makefile
+index 77126a6..46f1efc 100644
+--- a/libavcodec/Makefile
++++ b/libavcodec/Makefile
+@@ -9,6 +9,7 @@ HEADERS = avcodec.h \
+ vdpau.h \
+ version.h \
+ xvmc.h \
++ transcode.h \
+
+ OBJS = allcodecs.o \
+ audioconvert.o \
+@@ -264,6 +265,7 @@ OBJS-$(CONFIG_MPC7_DECODER) += mpc7.o mpc.o mpegaudiodec.o \
+ OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o mpegaudiodec.o \
+ mpegaudiodecheader.o mpegaudio.o \
+ mpegaudiodata.o
++OBJS-$(CONFIG_MPEG_MPEG4_DECODER) += mpegvideo_transcode.o
+ OBJS-$(CONFIG_MPEG_XVMC_DECODER) += mpegvideo_xvmc.o
+ OBJS-$(CONFIG_MPEG1VIDEO_DECODER) += mpeg12.o mpeg12data.o \
+ mpegvideo.o error_resilience.o
+diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
+index cb08e33..00fc162 100644
+--- a/libavcodec/allcodecs.c
++++ b/libavcodec/allcodecs.c
+@@ -146,6 +146,7 @@ void avcodec_register_all(void)
+ REGISTER_DECODER (MMVIDEO, mmvideo);
+ REGISTER_DECODER (MOTIONPIXELS, motionpixels);
+ REGISTER_DECODER (MPEG_XVMC, mpeg_xvmc);
++ REGISTER_DECODER (MPEG_MPEG4, mpeg_mpeg4);
+ REGISTER_ENCDEC (MPEG1VIDEO, mpeg1video);
+ REGISTER_ENCDEC (MPEG2VIDEO, mpeg2video);
+ REGISTER_ENCDEC (MPEG4, mpeg4);
+diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
+index 2b70b96..0cbd733 100644
+--- a/libavcodec/avcodec.h
++++ b/libavcodec/avcodec.h
+@@ -257,6 +257,7 @@ enum CodecID {
+ CODEC_ID_ZEROCODEC,
+ CODEC_ID_MSS1,
+ CODEC_ID_MSA1,
++ CODEC_ID_MPEG2VIDEO_MPEG4,
+
+ /* various PCM "codecs" */
+ CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
+@@ -696,6 +697,10 @@ typedef struct RcOverride{
+ * Audio encoder supports receiving a different number of samples in each call.
+ */
+ #define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000
++/**
++ * Codec can export data for transcoding (AV).
++ */
++#define CODEC_CAP_HWACCEL_TRANSCODE 0x20000
+
+ //The following defines may change, don't expect compatibility if you use them.
+ #define MB_TYPE_INTRA4x4 0x0001
+diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c
+index c40649d..01133e5 100644
+--- a/libavcodec/mpeg12.c
++++ b/libavcodec/mpeg12.c
+@@ -37,6 +37,7 @@
+ #include "bytestream.h"
+ #include "vdpau_internal.h"
+ #include "xvmc_internal.h"
++#include "transcode_internal.h"
+ #include "thread.h"
+
+ //#undef NDEBUG
+@@ -1183,6 +1184,8 @@ static enum PixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
+
+ if (avctx->xvmc_acceleration)
+ return avctx->get_format(avctx, pixfmt_xvmc_mpg2_420);
++ else if (avctx->codec->capabilities & CODEC_CAP_HWACCEL_TRANSCODE)
++ return PIX_FMT_TRANSCODE;
+ else if (avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) {
+ if (avctx->codec_id == CODEC_ID_MPEG1VIDEO)
+ return PIX_FMT_VDPAU_MPEG1;
+@@ -1289,7 +1292,8 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
+ // until then pix_fmt may be changed right after codec init
+ if (avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ||
+ avctx->hwaccel ||
+- s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
++ s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU ||
++ s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_TRANSCODE)
+ if (avctx->idct_algo == FF_IDCT_AUTO)
+ avctx->idct_algo = FF_IDCT_SIMPLE;
+
+@@ -1305,8 +1309,14 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
+ quant_matrix_rebuild(s->chroma_intra_matrix, old_permutation, s->dsp.idct_permutation);
+ quant_matrix_rebuild(s->chroma_inter_matrix, old_permutation, s->dsp.idct_permutation);
+
++ if (CONFIG_MPEG_MPEG4_DECODER && s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_TRANSCODE) {
++ if (ff_mpeg_transcode_decode_postinit(avctx)<0)
++ return -2;
++ }
+ s1->mpeg_enc_ctx_allocated = 1;
+ }
++
++
+ return 0;
+ }
+
+@@ -1916,7 +1926,8 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
+
+ ff_MPV_frame_end(s);
+
+- if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
++ if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay
++ || (s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_TRANSCODE)) {//no reordering for transcoding
+ *pict = s->current_picture_ptr->f;
+ ff_print_debug_info(s, pict);
+ } else {
+@@ -2034,7 +2045,8 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
+ avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
+
+ if (avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT || avctx->hwaccel ||
+- s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
++ s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU ||
++ s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_TRANSCODE)
+ if (avctx->idct_algo == FF_IDCT_AUTO)
+ avctx->idct_algo = FF_IDCT_SIMPLE;
+
+@@ -2262,7 +2274,8 @@ static int decode_chunks(AVCodecContext *avctx,
+ ff_vdpau_mpeg_picture_complete(s2, buf, buf_size, s->slice_count);
+
+ if (slice_end(avctx, picture)) {
+- if (s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice
++ if (s2->last_picture_ptr || s2->low_delay ||
++ (avctx->codec->capabilities & CODEC_CAP_HWACCEL_TRANSCODE) ) //FIXME merge with the stuff in mpeg_decode_slice
+ *data_size = sizeof(AVPicture);
+ }
+ }
+@@ -2528,6 +2541,67 @@ AVCodec ff_mpeg2video_decoder = {
+ .profiles = NULL_IF_CONFIG_SMALL(mpeg2_video_profiles),
+ };
+
++#if CONFIG_MPEG_MPEG4_DECODER
++
++static av_cold int mpeg_transcode_decode_init(AVCodecContext *avctx)
++{
++ Mpeg1Context *s = avctx->priv_data;
++ MpegEncContext *s2;
++
++
++ if (avctx->active_thread_type & FF_THREAD_SLICE)
++ return -1;
++ if (!(avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
++ return -1;
++ mpeg_decode_init(avctx);
++
++ avctx->pix_fmt = PIX_FMT_TRANSCODE;
++
++ s->mpeg_enc_ctx.transcode_context = (void*) av_mallocz(sizeof(MpegEncContext));
++ if (!s->mpeg_enc_ctx.transcode_context) {
++ return AVERROR(ENOMEM);
++ }
++ s2=(MpegEncContext*)s->mpeg_enc_ctx.transcode_context;
++ s2->current_picture_ptr =&s2->current_picture;
++
++
++
++ s->mpeg_enc_ctx.transcode = 1;
++
++ return 0;
++}
++
++static int mpeg_transcode_decode_end(AVCodecContext *avctx)
++{
++ MpegEncContext *s2;
++ Mpeg1Context *s = avctx->priv_data;
++ s2=(MpegEncContext*)s->mpeg_enc_ctx.transcode_context;
++
++ if (s2->current_picture_ptr)
++ ff_free_picture(s2,&s2->current_picture);
++ if (s2)
++ av_free(s2);
++
++ return mpeg_decode_end(avctx);
++}
++
++
++AVCodec ff_mpeg_mpeg4_decoder = {
++ .name = "mpegvideo_mpeg4",
++ .type = AVMEDIA_TYPE_VIDEO,
++ .id = CODEC_ID_MPEG2VIDEO_MPEG4,
++ .priv_data_size = sizeof(Mpeg1Context),
++ .init = mpeg_transcode_decode_init,
++ .close = mpeg_transcode_decode_end,
++ .decode = mpeg_decode_frame,
++ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED| CODEC_CAP_HWACCEL_TRANSCODE | CODEC_CAP_DELAY,
++ .flush = flush,
++ .long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video to MPEG-4 part 2 transcoder for hardware accelaration"),
++};
++
++#endif
++
++
+ #if CONFIG_MPEG_XVMC_DECODER
+ static av_cold int mpeg_mc_decode_init(AVCodecContext *avctx)
+ {
+diff --git a/libavcodec/mpeg4video.h b/libavcodec/mpeg4video.h
+index 64c0243..a8963df 100644
+--- a/libavcodec/mpeg4video.h
++++ b/libavcodec/mpeg4video.h
+@@ -88,6 +88,7 @@ void ff_mpeg4_encode_mb(MpegEncContext *s,
+ void ff_mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
+ int dir);
+ void ff_set_mpeg4_time(MpegEncContext * s);
++void ff_set_frame_distances(MpegEncContext * s);
+ void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number);
+
+ int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb);
+@@ -102,6 +103,10 @@ int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s);
+ int ff_mpeg4_decode_video_packet_header(MpegEncContext *s);
+ void ff_mpeg4_init_direct_mv(MpegEncContext *s);
+
++#if CONFIG_MPEG_MPEG4_DECODER
++void ff_mpeg4_transcode_init_tables(MpegEncContext* s);
++#endif
++
+ /**
+ *
+ * @return the mb_type
+diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c
+index 3fcc6d0..d588f57 100644
+--- a/libavcodec/mpeg4videodec.c
++++ b/libavcodec/mpeg4videodec.c
+@@ -863,7 +863,7 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
+ dc_pred_dir= (s->pred_dir_table[s->mb_x + s->mb_y*s->mb_stride]<<n)&32;
+ }else{
+ level = mpeg4_decode_dc(s, n, &dc_pred_dir);
+- if (level < 0)
++ if (level < 0)
+ return -1;
+ }
+ block[0] = level;
+diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c
+index 95bb3a5..a0dd6b2 100644
+--- a/libavcodec/mpeg4videoenc.c
++++ b/libavcodec/mpeg4videoenc.c
+@@ -499,7 +499,8 @@ void ff_mpeg4_encode_mb(MpegEncContext * s,
+ assert(mb_type>=0);
+
+ /* nothing to do if this MB was skipped in the next P Frame */
+- if (s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { //FIXME avoid DCT & ...
++ if (!(CONFIG_MPEG_MPEG4_DECODER && s->transcode) &&
++ s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { //FIXME avoid DCT & ...
+ s->skip_count++;
+ s->mv[0][0][0]=
+ s->mv[0][0][1]=
+@@ -614,10 +615,11 @@ void ff_mpeg4_encode_mb(MpegEncContext * s,
+ s->p_tex_bits+= get_bits_diff(s);
+ }
+
+- }else{ /* s->pict_type==AV_PICTURE_TYPE_B */
++ }else{ /* s->pict_type==AV_PICTURE_TYPE_P */
+ cbp= get_p_cbp(s, block, motion_x, motion_y);
+
+- if ((cbp | motion_x | motion_y | s->dquant) == 0 && s->mv_type==MV_TYPE_16X16) {
++ if ((cbp | motion_x | motion_y | s->dquant) == 0 && s->mv_type==MV_TYPE_16X16
++ && !(CONFIG_MPEG_MPEG4_DECODER && s->transcode) ) {
+ /* check if the B frames can skip it too, as we must skip it if we skip here
+ why didn't they just compress the skip-mb bits instead of reusing them ?! */
+ if(s->max_b_frames>0){
+@@ -860,6 +862,8 @@ static void mpeg4_encode_gop_header(MpegEncContext * s){
+ time = s->current_picture_ptr->f.pts;
+ if(s->reordered_input_picture[1])
+ time = FFMIN(time, s->reordered_input_picture[1]->f.pts);
++ if (CONFIG_MPEG_MPEG4_DECODER && s->transcode)
++ time = FFMIN(time, s->last_picture.f.pts);
+ time= time*s->avctx->time_base.num;
+
+ seconds= time/s->avctx->time_base.den;
+@@ -1263,6 +1267,28 @@ static av_cold int encode_init(AVCodecContext *avctx)
+ return 0;
+ }
+
++#if CONFIG_MPEG_MPEG4_DECODER
++
++void ff_mpeg4_transcode_init_tables(MpegEncContext* s)
++{
++ init_uni_dc_tab();
++ ff_init_rl(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]);
++ init_uni_mpeg4_rl_tab(&ff_mpeg4_rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len);
++ init_uni_mpeg4_rl_tab(&ff_h263_rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len);
++ s->min_qcoeff= -2048;
++ s->max_qcoeff= 2047;
++ s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len;
++ s->inter_ac_vlc_last_length= uni_mpeg4_inter_rl_len + 128*64;
++ s->luma_dc_vlc_length= uni_DCtab_lum_len;
++ s->ac_esc_length= 7+2+1+6+1+12+1;
++ s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table;
++ s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table;
++
++
++}
++
++#endif
++
+ void ff_mpeg4_init_partitions(MpegEncContext *s)
+ {
+ uint8_t *start= put_bits_ptr(&s->pb);
+diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
+index 574893e..8f25689 100644
+--- a/libavcodec/mpegvideo.c
++++ b/libavcodec/mpegvideo.c
+@@ -38,6 +38,7 @@
+ #include "msmpeg4.h"
+ #include "faandct.h"
+ #include "xvmc_internal.h"
++#include "transcode_internal.h"
+ #include "thread.h"
+ #include <limits.h>
+
+@@ -379,7 +380,7 @@ fail: // for the FF_ALLOCZ_OR_GOTO macro
+ /**
+ * Deallocate a picture.
+ */
+-static void free_picture(MpegEncContext *s, Picture *pic)
++void ff_free_picture(MpegEncContext *s, Picture *pic)
+ {
+ int i;
+
+@@ -996,7 +997,7 @@ void ff_MPV_common_end(MpegEncContext *s)
+
+ if (s->picture && !s->avctx->internal->is_copy) {
+ for (i = 0; i < s->picture_count; i++) {
+- free_picture(s, &s->picture[i]);
++ ff_free_picture(s, &s->picture[i]);
+ }
+ }
+ av_freep(&s->picture);
+@@ -1353,6 +1354,9 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
+
+ if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
+ return ff_xvmc_field_start(s, avctx);
++ if (CONFIG_MPEG_MPEG4_DECODER &&
++ s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_TRANSCODE)
++ return ff_transcode_start_picture(s, avctx);
+
+ return 0;
+ }
+@@ -1366,6 +1370,9 @@ void ff_MPV_frame_end(MpegEncContext *s)
+ // just to make sure that all data is rendered.
+ if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
+ ff_xvmc_field_end(s);
++ } else if (CONFIG_MPEG_MPEG4_DECODER &&
++ s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_TRANSCODE) {
++ ff_transcode_end_picture(s);
+ } else if ((s->error_count || s->encoding) &&
+ !s->avctx->hwaccel &&
+ !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
+@@ -1941,6 +1948,11 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
+ ff_xvmc_decode_mb(s);//xvmc uses pblocks
+ return;
+ }
++ if ( CONFIG_MPEG_MPEG4_DECODER &&
++ s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_TRANSCODE){
++ ff_transcode_decode_mb(s); //transcode does no real decoding
++ return;
++ }
+
+ if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
+ /* save DCT coefficients */
+diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
+index f5b20e6..8d37178 100644
+--- a/libavcodec/mpegvideo.h
++++ b/libavcodec/mpegvideo.h
+@@ -702,6 +702,9 @@ typedef struct MpegEncContext {
+
+ /* temp buffers for rate control */
+ float *cplx_tab, *bits_tab;
++ /* transcode, encoding context: a pointer to another MpegEncContext*/
++ void *transcode_context;
++ int transcode; // 0 no transcoding, 1 activated
+ } MpegEncContext;
+
+ #define REBASE_PICTURE(pic, new_ctx, old_ctx) (pic ? \
+@@ -745,6 +748,7 @@ void ff_MPV_common_end(MpegEncContext *s);
+ void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]);
+ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx);
+ void ff_MPV_frame_end(MpegEncContext *s);
++void ff_MPV_encode_defaults(MpegEncContext *s);
+ int ff_MPV_encode_init(AVCodecContext *avctx);
+ int ff_MPV_encode_end(AVCodecContext *avctx);
+ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
+@@ -787,6 +791,8 @@ void ff_copy_picture(Picture *dst, Picture *src);
+ */
+ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared);
+
++void ff_free_picture(MpegEncContext *s, Picture *pic);
++
+ extern const enum PixelFormat ff_pixfmt_list_420[];
+ extern const enum PixelFormat ff_hwaccel_pixfmt_list_420[];
+
+diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
+index f88df92..a35c292 100644
+--- a/libavcodec/mpegvideo_enc.c
++++ b/libavcodec/mpegvideo_enc.c
+@@ -256,7 +256,7 @@ static void update_duplicate_context_after_me(MpegEncContext *dst,
+ * Set the given MpegEncContext to defaults for encoding.
+ * the changed fields will not depend upon the prior state of the MpegEncContext.
+ */
+-static void MPV_encode_defaults(MpegEncContext *s)
++void ff_MPV_encode_defaults(MpegEncContext *s)
+ {
+ int i;
+ ff_MPV_common_defaults(s);
+@@ -275,7 +275,7 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
+ int i;
+ int chroma_h_shift, chroma_v_shift;
+
+- MPV_encode_defaults(s);
++ ff_MPV_encode_defaults(s);
+
+ switch (avctx->codec_id) {
+ case CODEC_ID_MPEG2VIDEO:
+@@ -3085,7 +3085,7 @@ static int estimate_qp(MpegEncContext *s, int dry_run){
+ }
+
+ /* must be called before writing the header */
+-static void set_frame_distances(MpegEncContext * s){
++void ff_set_frame_distances(MpegEncContext * s){
+ assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
+ s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
+
+@@ -3114,7 +3114,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
+ /* we need to initialize some time vars before we can encode b-frames */
+ // RAL: Condition added for MPEG1VIDEO
+ if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
+- set_frame_distances(s);
++ ff_set_frame_distances(s);
+ if(CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4)
+ ff_set_mpeg4_time(s);
+
+diff --git a/libavcodec/mpegvideo_transcode.c b/libavcodec/mpegvideo_transcode.c
+new file mode 100644
+index 0000000..e2e56e1
+--- /dev/null
++++ b/libavcodec/mpegvideo_transcode.c
+@@ -0,0 +1,888 @@
++/*
++ * Transcoder
++ * Copyright (c) 2012 Marten Richter
++ *
++ * For the code took from the encoder:
++ * Copyright (c) 2000,2001 Fabrice Bellard
++ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
++ * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
++ * and probably many others see the git commit notes for details
++ *
++ * This file is part of Libav.
++ *
++ * Libav is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * Libav is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with Libav; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include "avcodec.h"
++#include "dsputil.h"
++#include "mpegvideo.h"
++#include "mpegvideo_common.h"
++#include "mpeg4video.h"
++
++#include "mpeg12.h"
++#include "h263.h"
++
++#undef NDEBUG
++#include <assert.h>
++
++#include "transcode.h"
++#include "transcode_internal.h"
++
++int transcode_qmul_table[32];
++
++/**
++ * initialise the encoding contexts, after the properties of the incoming stream are known
++ * TODO: remove unnecessary inits
++ */
++int ff_mpeg_transcode_decode_postinit(AVCodecContext* avctx)
++{
++ // Now init the encode context
++ Mpeg1Context *s2;
++ MpegEncContext *s;
++ int i;
++ int chroma_h_shift, chroma_v_shift;
++ int savetimebaseden;
++
++
++ s2 = avctx->priv_data;
++ s = s2->mpeg_enc_ctx.transcode_context;
++
++ ff_MPV_encode_defaults(s);
++ s->codec_id=CODEC_ID_MPEG4;
++
++ s->input_picture_number = 0;
++ s->transcode = 1;
++ avctx->qmin = 1;
++
++
++ s->bit_rate = avctx->bit_rate;
++ s->width = avctx->width+32;
++ s->height = avctx->height;
++ s->gop_size = avctx->gop_size;
++ s->avctx = avctx;
++ s->flags = avctx->flags;
++ s->flags2 = avctx->flags2;
++ s->max_b_frames = 100; // we have b frames
++ //s->codec_id = avctx->codec->id;
++ s->strict_std_compliance = avctx->strict_std_compliance;
++ s->quarter_sample = 0;
++ s->mpeg_quant = 0;
++ s->rtp_mode = 0; //we need it for qscale
++ s->vol_sprite_usage=0;
++ s->intra_dc_precision = avctx->intra_dc_precision;
++ s->user_specified_pts = AV_NOPTS_VALUE;
++
++ if (s->gop_size <= 1) {
++ s->intra_only = 1;
++ s->gop_size = 12;
++ } else {
++ s->intra_only = 0;
++ }
++
++ s->me_method = avctx->me_method;
++ s->adaptive_quant = (s->avctx->lumi_masking ||
++ s->avctx->dark_masking ||
++ s->avctx->temporal_cplx_masking ||
++ s->avctx->spatial_cplx_masking ||
++ s->avctx->p_masking ||
++ s->avctx->border_masking ||
++ (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
++ !s->fixed_qscale;
++ s->loop_filter = 0; //no loop for mpeg4
++
++ if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG1VIDEO ||
++ s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG) {
++ // (a + x * 3 / 8) / x
++ s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
++ s->inter_quant_bias = 0;
++ } else {
++ s->intra_quant_bias = 0;
++ // (a - x / 4) / x
++ s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
++ }
++
++ avcodec_get_chroma_sub_sample(PIX_FMT_YUV422P, &chroma_h_shift,
++ &chroma_v_shift);
++ if (/*avctx->codec_id == CODEC_ID_MPEG4 &&*/
++ s->avctx->time_base.den > (1 << 16) - 1)
++ {
++ av_log(avctx, AV_LOG_ERROR,
++ "timebase %d/%d not supported by MPEG 4 standard, "
++ "the maximum admitted value for the timebase denominator "
++ "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
++ (1 << 16) - 1);
++ return -1;
++ }
++ s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
++
++ // mpeg4 paramter
++ s->out_format = FMT_H263;
++ s->h263_pred = 1;
++ s->unrestricted_mv = 1; //does not really matter
++ s->low_delay = s->max_b_frames ? 0 : 1;
++ avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
++ avctx->has_b_frames = !s->low_delay;
++
++ s->encoding = 1;
++ s->progressive_frame =
++ s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
++ CODEC_FLAG_INTERLACED_ME) ||
++ s->alternate_scan);
++ /* init */
++ if (ff_MPV_common_init(s) < 0)
++ return -1;
++
++ if (!s->dct_quantize)
++ s->dct_quantize = ff_dct_quantize_c;
++ /* if (!s->denoise_dct)
++ s->denoise_dct = denoise_dct_c;
++ s->fast_dct_quantize = s->dct_quantize;
++ if (avctx->trellis)
++ s->dct_quantize = dct_quantize_trellis_c;*/
++
++
++ s->quant_precision = 5;
++
++ ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
++ ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
++
++ if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
++ ff_h263_encode_init(s);
++
++ /* init q matrix */
++ for (i = 0; i < 64; i++) {
++ int j = s->dsp.idct_permutation[i];
++ int j2 = s2->mpeg_enc_ctx.dsp.idct_permutation[i];
++ if (/*CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4 && */
++ s->mpeg_quant) {
++ s->intra_matrix[j] = s2->mpeg_enc_ctx.intra_matrix[j2];
++ s->inter_matrix[j] = s2->mpeg_enc_ctx.inter_matrix[j2];
++ } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
++ s->intra_matrix[j] =
++ s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
++ } else {
++ /* mpeg1/2 */
++ s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
++ s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
++ }
++
++ }
++ ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
++ s->intra_matrix, s->intra_quant_bias, avctx->qmin,
++ 31, 1);
++ ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
++ s->inter_matrix, s->inter_quant_bias, avctx->qmin,
++ 31, 0);
++
++ ff_mpeg4_transcode_init_tables(s);
++ for (i=1;i<32;i++) {
++ transcode_qmul_table[i]=(1<<QMAT_SHIFT)/(i<<1);
++ }
++
++ /* init the picture */
++
++
++ return ff_alloc_picture(s,&s->current_picture, 1);
++
++}
++
++int ff_transcode_start_picture(MpegEncContext *dec, AVCodecContext *avctx)
++{
++ Mpeg1Context *s2;
++ MpegEncContext *s;
++ int i;
++ struct transcode_pix_fmt *render = (struct transcode_pix_fmt*)dec->current_picture.f.data[0];
++ s2 = dec->avctx->priv_data;
++ s = s2->mpeg_enc_ctx.transcode_context;
++
++ assert(avctx);
++ if (dec->picture_structure != PICT_FRAME)
++ {
++ av_log(avctx, AV_LOG_ERROR, "field mode is not supported %d\n",dec->picture_structure);
++ }
++
++ if (!render || render->transcode_id != AV_TRANSCODE_ID ||
++ !render->packet.data || !render->packet.size ) {
++ av_log(avctx, AV_LOG_ERROR,
++ "Render token doesn't look as expected.\n");
++ return -1; // make sure that this is a transcode packet
++ }
++
++
++ s->input_picture_number=s->input_picture_number+1;
++
++ for(i=0; i<3; i++){
++ /* init last dc values */
++ /* note: quant matrix value (8) is implied here */
++ s->last_dc[i] = 128 << s->intra_dc_precision;
++
++ s->current_picture.f.error[i] = 0;
++ }
++ s->mb_skip_run = 0;
++ memset(s->last_mv, 0, sizeof(s->last_mv));
++
++ s->last_mv_dir = 0;
++
++
++
++ //now init the put bit contexts
++
++ s->slice_context_count=1;
++ init_put_bits(&s->pb, render->packet.data, render->packet.size);
++
++ //ff_mpeg4_transcode_write_picture_headers(dec,avctx); //moved to slice decoding
++ return 0;
++}
++
++
++
++
++void ff_transcode_end_picture(MpegEncContext *dec)
++{
++ Mpeg1Context *s2;
++ MpegEncContext *s;
++ struct transcode_pix_fmt *render;
++ s2 = dec->avctx->priv_data;
++ s = s2->mpeg_enc_ctx.transcode_context;
++ ff_mpeg4_stuffing(&s->pb);
++ avpriv_align_put_bits(&s->pb);
++ flush_put_bits(&s->pb);
++
++ render = (struct transcode_pix_fmt*)dec->current_picture.f.data[0];
++ render->packet.size=put_bits_count(&s->pb)/8;
++
++
++
++}
++
++//copy from mpegvideo_enc.c fix me
++
++static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
++ int last_index)
++{
++ int i;
++ const int maxlevel = s->max_qcoeff;
++ const int minlevel = s->min_qcoeff;
++ int overflow = 0;
++
++ if (s->mb_intra) {
++ i = 1; // skip clipping of intra dc
++ } else
++ i = 0;
++
++ for (; i <= last_index; i++) {
++ const int j = s->intra_scantable.scantable[i];
++ int level = block[j];
++
++ if (level > maxlevel) {
++ level = maxlevel;
++ overflow++;
++ } else if (level < minlevel) {
++ level = minlevel;
++ overflow++;
++ }
++
++ block[j] = level;
++ }
++
++ if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
++ av_log(s->avctx, AV_LOG_INFO,
++ "warning, clipping %d dct coefficients to %d..%d\n",
++ overflow, minlevel, maxlevel);
++}
++
++
++static inline int quantize_emulated_intra_c(MpegEncContext *s,
++ DCTELEM *block, int n,
++ int qscale,int dec_last_index,
++ int *overflow)
++{
++ int i, j, level, last_non_zero, q, start_i;
++ const uint8_t *scantable= s->intra_scantable.scantable;
++ int bias;
++ int qmul;
++ int max=0;
++ unsigned int threshold1, threshold2;
++
++
++ //qmul=(1<<QMAT_SHIFT)/(qscale<<1);
++ qmul=transcode_qmul_table[qscale];
++
++
++
++ q=(qscale<<1);
++ /* note: block[0] is assumed to be positive */
++ //av_log(s, AV_LOG_ERROR, "b: %d",block[0]);
++ if (block[0]>=0)
++ block[0] = (block[0] + (q >> 1)) / q;
++ else
++ block[0] =-( (-block[0] + (q >> 1)) / q);
++
++ level=block[0];
++ if (level>=0) {
++ level=((level<<1)+1)*qscale-1+(qscale&1)+1024;
++ } else {
++ level=-((((-level)<<1)+1)*qscale-1+(qscale&1))+1024;
++ }
++
++ max=block[0];
++ start_i = 1;
++
++ if (block[0]) last_non_zero = 0;
++ else last_non_zero=-1;
++ bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
++
++ threshold1= (1<<QMAT_SHIFT) - bias - 1;
++ threshold2= (threshold1<<1);
++ for(i=dec_last_index/*63*/;i>=start_i;i--) {
++ j = scantable[i];
++ level = block[j] * qmul;
++ if(((unsigned)(level+threshold1))>threshold2){
++ last_non_zero = i;
++ break;
++ }else{
++ block[j]=0;
++ }
++ }
++
++ for(i=start_i; i<=last_non_zero; i++) {
++ j = scantable[i];
++ level = block[j] * qmul;
++ if(((unsigned)(level+threshold1))>threshold2){
++ if(level>0){
++ level= (bias + level)>>(QMAT_SHIFT);
++ block[j]= level;
++ }else{
++ level= (bias - level)>>(QMAT_SHIFT);
++ block[j]= -level;
++ }
++ max |=level;
++ }else{
++ block[j]=0;
++ }
++ }
++ *overflow= s->max_qcoeff < max; //overflow might have happened
++
++
++ return last_non_zero;
++}
++
++
++
++static inline int quantize_c(MpegEncContext *s,
++ DCTELEM *block, int n,
++ int qscale,int dec_last_index,
++ int *overflow)
++{
++ int i, j, level, last_non_zero, q, start_i;
++ const uint8_t *scantable= s->intra_scantable.scantable;
++ int bias;
++ int qmul;
++ int max=0;
++ unsigned int threshold1, threshold2;
++
++
++ //qmul=(1<<QMAT_SHIFT)/(qscale<<1);
++ qmul=transcode_qmul_table[qscale];
++
++
++ if (s->mb_intra) {
++ if (!s->h263_aic) {
++ if (n < 4)
++ q = s->y_dc_scale;
++ else
++ q = s->c_dc_scale;
++ // q = q << 3;
++ } else
++ /* For AIC we skip quant/dequant of INTRADC */
++ q = 1;// << 3;
++
++ /* note: block[0] is assumed to be positive */
++ block[0] = (block[0] + (q >> 1)) / q;
++ start_i = 1;
++ last_non_zero = 0;
++ bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
++ } else {
++ start_i = 0;
++ last_non_zero = -1;
++ bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
++ }
++ threshold1= (1<<QMAT_SHIFT) - bias - 1;
++ threshold2= (threshold1<<1);
++
++ for(i=dec_last_index/*63*/;i>=start_i;i--) {
++ j = scantable[i];
++ level = block[j] * qmul;
++ if(((unsigned)(level+threshold1))>threshold2){
++ last_non_zero = i;
++ break;
++ }else{
++ block[j]=0;
++ }
++ }
++
++ for(i=start_i; i<=last_non_zero; i++) {
++ j = scantable[i];
++ level = block[j] * qmul;
++
++ // if( bias+level >= (1<<QMAT_SHIFT)
++ // || bias-level >= (1<<QMAT_SHIFT)){
++ if(((unsigned)(level+threshold1))>threshold2){
++ if(level>0){
++ level= (bias + level)>>(QMAT_SHIFT);
++ block[j]= level;
++ }else{
++ level= (bias - level)>>(QMAT_SHIFT);
++ block[j]= -level;
++ }
++ max |=level;
++ }else{
++ block[j]=0;
++ }
++ }
++ *overflow= s->max_qcoeff < max; //overflow might have happened
++
++
++
++ return last_non_zero;
++}
++
++#define ZEROS8 0,0,0,0,0,0,0,0
++#define ZEROS64 ZEROS8,ZEROS8,ZEROS8,ZEROS8,ZEROS8,ZEROS8,ZEROS8,ZEROS8
++
++
++
++static inline void add_zero_mb(MpegEncContext *s)
++{
++ DCTELEM zero_block[6][64]={{ZEROS64},{ZEROS64},{ZEROS64},{ZEROS64},{ZEROS64},{ZEROS64}};
++ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
++
++ s->dquant=0;
++ ff_update_block_index(s);
++
++ if (s->pict_type==AV_PICTURE_TYPE_I) {
++ int q;
++ s->mb_intra=1;
++ if (s->qscale>4){ // this is not good for quantization
++ s->dquant=-2;
++ s->qscale=s->qscale-2;
++ ff_set_qscale(s, s->qscale);
++ }
++ q=s->y_dc_scale;
++ zero_block[0][0]=zero_block[1][0]=
++ zero_block[2][0]=zero_block[3][0]=(1024+(q>>1))/q;
++ q=s->c_dc_scale;
++ zero_block[4][0]=zero_block[5][0]=(1024+(q>>1))/q;
++ s->block_last_index[0]=s->block_last_index[1]=
++ s->block_last_index[2]=s->block_last_index[3]=
++ s->block_last_index[4]=s->block_last_index[5]=0;
++
++ if ( (s->h263_pred || s->h263_aic))
++ s->mbintra_table[mb_xy]=1;
++ } else {
++ s->mb_intra=0;
++ s->mv_type=MV_TYPE_16X16;
++ s->mv_dir=MV_DIR_FORWARD;
++ s->mv[0][0][0]=0;
++ s->mv[0][0][1]=0;
++ if ((s->h263_pred || s->h263_aic)) {
++
++ if(s->mbintra_table[mb_xy])
++ ff_clean_intra_table_entries(s);
++ } else {
++ s->last_dc[0] =
++ s->last_dc[1] =
++ s->last_dc[2] = 128 << s->intra_dc_precision;
++ }
++ s->block_last_index[0]=s->block_last_index[1]=
++ s->block_last_index[2]=s->block_last_index[3]=
++ s->block_last_index[4]=s->block_last_index[5]=-1;
++
++ }
++
++
++
++ ff_mpeg4_encode_mb(s, zero_block, 0, 0);
++ ff_h263_update_motion_val(s);
++}
++
++void ff_transcode_decode_mb(MpegEncContext *dec)
++{
++ Mpeg1Context *s2;
++ MpegEncContext *s;
++ int dquant;
++ int i;
++ int dir;
++
++ int motion_x;
++ int motion_y;
++ int savetimebaseden;
++ int mb_xy;
++ int emulate=0;
++ s2 = dec->avctx->priv_data;
++ s = s2->mpeg_enc_ctx.transcode_context;
++
++ s->mb_y = dec->mb_y;
++
++ s->mb_x = dec->mb_x+1;
++
++ savetimebaseden=s->avctx->time_base.den;//the way how mpeg2 and mpeg4 handle interlaced video differs
++ if (!s->progressive_sequence) {
++ s->avctx->time_base.den=s->avctx->time_base.den>>1;
++ }
++
++
++
++ if (dec->mb_x==0 && dec->mb_y==0) { // a bit ugly but qscale is not set before
++ s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
++ ff_mpeg4_transcode_write_picture_headers(dec,dec->avctx);
++
++ }
++
++
++
++
++
++ if (s->mb_x==1) {
++ s->first_slice_line=s->mb_y==0;
++
++ s->mb_x = 0;
++ ff_init_block_index(s); //necessary for prediction
++
++ add_zero_mb(s);
++ s->mb_x = 1;
++ }
++ mb_xy= s->mb_y * s->mb_stride + s->mb_x;
++
++
++
++ // the decoder must be updated, important we assume mpeg12 as input
++ if (!dec->mb_intra) {
++ dec->last_dc[0] =
++ dec->last_dc[1] =
++ dec->last_dc[2] = 128 << dec->intra_dc_precision;
++ }
++
++
++ dquant=-s->qscale+(dec->qscale>>1 | 1);
++ if (dquant>2) dquant=2;
++ else if (dquant<-2) dquant=-2;
++
++ if (s->pict_type == AV_PICTURE_TYPE_B) //b frames do not support dquant=1 or dquant=-1
++ {
++ if (dquant==1) dquant=0; // always try to quantize better not worss
++ else if (dquant==-1) dquant=-2;
++ }
++
++ s->qscale=s->qscale+dquant;
++
++ if (dquant) ff_set_qscale(s, s->qscale);
++ s->dquant = dquant;
++
++
++
++
++
++ //to do convert handling
++
++
++ s->interlaced_dct = dec->interlaced_dct;
++ s->mb_intra = dec->mb_intra;
++ if (!s->mb_intra) {
++ s->mv_dir = dec->mv_dir;
++ s->mv_type = dec->mv_type;
++ for (dir=0; dir < 2; dir++) {
++ for (i=0; i < 2; i++) {
++ s->mv[dir][i][0] = dec->mv[dir][i][0];
++ s->mv[dir][i][1] = dec->mv[dir][i][1];
++ s->field_select[dir][i] = dec->field_select[dir][i];
++ }
++ }
++ motion_x = motion_y = 0;
++ if (s->mv_type != MV_TYPE_FIELD) {
++ if (s->mv_dir == MV_DIR_FORWARD) {
++ motion_x = s->mv[0][0][0];
++ motion_y = s->mv[0][0][1];
++ } else if (s->mv_dir == MV_DIR_BACKWARD) {
++ motion_x = s->mv[1][0][0];
++ motion_y = s->mv[1][0][1];
++ } // set motion also in direct mode TODO
++ }
++ }
++
++ if (s->pict_type == AV_PICTURE_TYPE_B){
++ if (s->mb_intra) {
++
++ // TODO Emulate intra blocks
++ //but first set to non intra will break dc prediction otherwise
++ s->mb_intra=0;
++ s->mv_type=MV_TYPE_16X16;
++
++
++ if (s->b_code<=s->f_code) {
++ s->mv_dir = MV_DIR_FORWARD;
++ motion_y = s->mv[0][0][1]=0;
++ if (s->mb_x<(s->mb_width>>1)) {
++ motion_x = s->mv[0][0][0]=-s->mb_x<<5;
++ } else {
++ motion_x = s->mv[0][0][0]=(s->mb_width-s->mb_x)<<5;
++ }
++ } else {
++ motion_y = s->mv[1][0][1]=0;
++ s->mv_dir = MV_DIR_BACKWARD;
++ if (s->mb_x<(s->mb_width>>1)) {
++ motion_x = s->mv[1][0][0]=-s->mb_x<<5;
++ } else {
++ motion_x = s->mv[1][0][0]=(s->mb_width-s->mb_x)<<5;
++ }
++ }
++ if (abs(motion_x)>1023) {
++ s->mv_dir = MV_DIR_BACKWARD|MV_DIR_FORWARD;
++ motion_x = s->mv[1][0][0]=s->mv[0][0][0]=0;
++ av_log(s->avctx, AV_LOG_ERROR, "emulation failed intra in b picture %d %d %d\n",
++ s->mb_x,s->mb_y,s->input_picture_number);
++ for (i=0;i<6;i++) {
++ dec->block_last_index[i]=-1; //skip them
++ }
++
++ }
++
++
++
++ // dec->block[4][0]=2048; //should give awkward color for spotting them
++
++ for (i=0;i<6;i++) {
++ dec->block[i][0]-=1024;
++ }
++
++
++ emulate=1;
++ } else {
++ if (s->mv_type == MV_TYPE_16X8) { //this is not supported by mpeg4 emulate with 8x8
++ s->mv_type=MV_TYPE_16X16;
++ // TODO figure out if MV_TYPE_DIRECT would give a better quality
++ // figure also out if a mean motion vector will be better
++ av_log(s->avctx, AV_LOG_ERROR, "16X8 in b picture %d %d %d\n",
++ s->mb_x,s->mb_y,s->input_picture_number);
++
++ }
++ }
++ } else {
++ if (s->mv_type == MV_TYPE_DMV) { //this is not supported by mpeg4 emulate with 16x16
++ s->mv_type=MV_TYPE_16X16;
++ // do we have to scale motion vector dunno
++ av_log(s->avctx, AV_LOG_ERROR, "DMV in p picture %d %d %d\n",
++ s->mb_x,s->mb_y,s->input_picture_number);
++ }
++
++ }
++
++ ff_update_block_index(s);
++
++ /* update DC predictors for P macroblocks */ //must come after the update of block ndices
++ if (!s->mb_intra) {
++ if ((s->h263_pred || s->h263_aic)) {
++ if(s->mbintra_table[mb_xy])
++ ff_clean_intra_table_entries(s);
++ } else {
++ s->last_dc[0] =
++ s->last_dc[1] =
++ s->last_dc[2] = 128 << s->intra_dc_precision;
++ }
++
++
++
++ }
++ else if ( (s->h263_pred || s->h263_aic))
++ s->mbintra_table[mb_xy]=1;
++
++
++
++ if (!emulate) {
++ for (i = 0; i < 6; i++) {
++
++ if (dec->block_last_index[i]>=0) {
++ int overflow;
++ s->block_last_index[i] = quantize_c(s, dec->block[i], i, s->qscale,
++ dec->block_last_index[i], &overflow);
++ // FIXME we could decide to change to quantizer instead of
++ // clipping
++ // JS: I don't think that would be a good idea it could lower
++ // quality instead of improve it. Just INTRADC clipping
++ // deserves changes in quantizer
++ if (overflow)
++ clip_coeffs(s, dec->block[i], s->block_last_index[i]);
++ } else
++ s->block_last_index[i] = -1;
++ }
++ } else {
++ for (i = 0; i < 6; i++) {
++
++ if (dec->block_last_index[i]>=0) {
++ int overflow;
++ s->block_last_index[i] = quantize_emulated_intra_c(s, dec->block[i], i, s->qscale,
++ dec->block_last_index[i], &overflow);
++ // FIXME we could decide to change to quantizer instead of
++ // clipping
++ // JS: I don't think that would be a good idea it could lower
++ // quality instead of improve it. Just INTRADC clipping
++ // deserves changes in quantizer
++ if (overflow)
++ clip_coeffs(s, dec->block[i], s->block_last_index[i]);
++ } else
++ s->block_last_index[i] = -1;
++ }
++
++ }
++
++
++
++
++ ff_mpeg4_encode_mb(s, dec->block, motion_x, motion_y);
++ ff_h263_update_motion_val(s);
++
++ if (s->mb_x==s->mb_width-2) {
++ s->mb_x = s->mb_width-1;
++ add_zero_mb(s);
++ }
++
++ s->avctx->time_base.den=savetimebaseden;
++
++
++}
++
++void ff_mpeg4_transcode_write_picture_headers(MpegEncContext *dec,AVCodecContext* avctx)
++{
++ Mpeg1Context *s2;
++ MpegEncContext *s;
++ int save_profile;
++ int save_level;
++ s2 = avctx->priv_data;
++ s = s2->mpeg_enc_ctx.transcode_context;
++
++ //now transfer the decoded data to the encoding context
++
++
++
++
++
++
++ //picture header
++ s->pict_type = dec->pict_type;
++
++ s->qscale = (dec->qscale>>1|1); //qscale is set in the slices maybe move this function to slice, attention so is modifing qscale in the of the picture
++ ff_set_qscale(s, s->qscale);
++
++ s->no_rounding = 0; //does not matter related to gmc
++ s->progressive_sequence = dec->progressive_sequence;
++ s->current_picture_ptr->f.top_field_first = dec->top_field_first;
++ s->alternate_scan = dec->alternate_scan;
++ if (dec->current_picture.f.pkt_pts!=AV_NOPTS_VALUE && dec->current_picture.f.pkt_pts) {
++ //thanks to god someone is supplying hopefully clean pts values, use them!!!
++ s->current_picture_ptr->f.pts=dec->current_picture.f.pkt_pts;
++ } else {
++ // this is ugly and will probably fail,
++ // it assumes a constant b frames distance
++
++ s->current_picture_ptr->f.pts=s->input_picture_number+0; //dirty hack, but works
++
++ if (s->pict_type==AV_PICTURE_TYPE_B)
++ {
++ // we guess a sequence with two b frames inbetween I and P frames,
++ // as long as no DIRECT mv is used this should be fine if we are wrong
++ s->current_picture_ptr->f.pts=
++ s->current_picture_ptr->f.pts-3;
++ // if (s->current_picture_ptr->f.pts==s->last_time_base) s->current_picture_ptr->f.pts++;
++
++
++ }
++
++ }
++ if (s->pict_type!=AV_PICTURE_TYPE_B) {
++ s->last_picture.f.pts=s->reordered_pts; // dirty hack
++ s->reordered_pts=s->current_picture_ptr->f.pts;
++ }
++
++
++ if (s->alternate_scan) {
++ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable,ff_alternate_vertical_scan);
++ ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable,ff_alternate_vertical_scan);
++ ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable,ff_alternate_vertical_scan);
++ ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable,ff_alternate_vertical_scan);
++ } else {
++ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable,ff_zigzag_direct);
++ ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable,ff_zigzag_direct);
++ ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable,ff_alternate_horizontal_scan);
++ ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable,ff_alternate_vertical_scan);
++ }
++
++ s->strict_std_compliance = FF_COMPLIANCE_VERY_STRICT-1;
++ s->data_partitioning = 0;
++ s->f_code = dec->mpeg_f_code[0][0];
++ if (dec->mpeg_f_code[0][1] > s->f_code) {
++ s->f_code = dec->mpeg_f_code[0][1];
++ }
++
++ //we select the maximum code, but not more than the maximal possible value,
++
++
++ s->b_code = dec->mpeg_f_code[1][0];
++ if (dec->mpeg_f_code[1][1] > s->b_code) {
++ s->b_code = dec->mpeg_f_code[1][1];
++ }
++
++ if (s->pict_type == AV_PICTURE_TYPE_B) {
++
++ if (s->b_code<=s->f_code) {
++ s->f_code=FFMIN(FFMAX(av_log2(s->width)-2,s->f_code),7);
++ } else {
++ s->b_code=FFMIN(FFMAX(av_log2(s->width)-2,s->b_code),7);
++ }
++ }
++
++ //vol header
++ save_profile = s->avctx->profile;
++ save_level = s->avctx->level;
++ s->avctx->profile = FF_PROFILE_MPEG4_ADVANCED_SIMPLE;
++ s->avctx->level = 1;
++
++ s->width = dec->width+32; // check if we need emulation black bars
++ s->height = dec->height;
++ s->mb_stride = dec->mb_stride+2; //emulating black bars
++ s->quarter_sample = 0;
++ s->ac_pred = 0;
++
++ s->me.mb_var_sum_temp =
++ s->me.mc_mb_var_sum_temp = 0;
++
++
++
++ ff_set_frame_distances(s);
++ ff_set_mpeg4_time(s);
++
++ s->me.scene_change_score=0;
++
++ if (s2->closed_gop) s->flags|=CODEC_FLAG_CLOSED_GOP;
++ else s->flags&=~CODEC_FLAG_CLOSED_GOP;//transverse gop flag
++
++
++ ff_mpeg4_encode_picture_header(s, 0);
++ s->avctx->profile = save_profile;
++ s->avctx->level = save_level;
++
++
++
++
++}
+diff --git a/libavcodec/transcode.h b/libavcodec/transcode.h
+new file mode 100644
+index 0000000..c1c8c85
+--- /dev/null
++++ b/libavcodec/transcode.h
+@@ -0,0 +1,69 @@
++/*
++ * Copyright (C) 2012 Marten Richter
++ *
++ * This file is part of Libav.
++ *
++ * Libav is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * Libav is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with Libav; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#ifndef AVCODEC_TRANSCODE_H
++#define AVCODEC_TRANSCODE_H
++
++/**
++ * @file
++ * @ingroup lavc_codec_hwaccel_mpeg4_transcode
++ * Public libavcodec Mpeg4TranscodeHeader header.
++ */
++
++#include "avcodec.h"
++
++/**
++ * @defgroup lavc_codec_hwaccel_mpeg4_transcode Mpeg4Transcode
++ * @ingroup lavc_codec_hwaccel
++ *
++ * @{
++ */
++
++#define AV_TRANSCODE_ID 0x54524E53 /**< special value to ensure that regular pixel routines haven't corrupted the struct
++ the number speak for the letters TRNS */
++
++struct transcode_pix_fmt {
++ /** The field contains the special constant value AV_TRANSCODE_ID.
++ It is used as a test that the application correctly uses the API,
++ and that there is no corruption caused by pixel routines.
++ - application - set during initialization
++ - libavcodec - unchanged
++ */
++ int transcode_id;
++
++ /** AVPacket structure containing the buffer getting all the transcoded data
++ - application - set the pointer during initialization
++ - libavcodec - fills bitstream to packet
++ */
++ AVPacket packet;
++
++ /** Indicates that a new frame start in this frame
++ - application - unchanged
++ - libavcodec - set
++ */
++ int new_frame;
++
++};
++
++/**
++ * @}
++ */
++
++#endif /* AVCODEC_XVMC_H */
+diff --git a/libavcodec/transcode_internal.h b/libavcodec/transcode_internal.h
+new file mode 100644
+index 0000000..b837fde
+--- /dev/null
++++ b/libavcodec/transcode_internal.h
+@@ -0,0 +1,34 @@
++/*
++ * Transcode internal functions
++ *
++ * This file is part of Libav.
++ *
++ * Libav is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; either
++ * version 2.1 of the License, or (at your option) any later version.
++ *
++ * Libav is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with Libav; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#ifndef AVCODEC_TRANSCODE_INTERNAL_H
++#define AVCODEC_TRANSCODE_INTERNAL_H
++
++#include "avcodec.h"
++#include "mpegvideo.h"
++
++int ff_mpeg_transcode_decode_postinit(AVCodecContext* avctx);
++void ff_mpeg4_transcode_write_picture_headers(MpegEncContext *dec,AVCodecContext* avctx);
++void ff_transcode_decode_mb(MpegEncContext *dec);
++int ff_transcode_start_picture(MpegEncContext *dec, AVCodecContext *avctx);
++void ff_transcode_end_picture(MpegEncContext *dec);
++void ff_mpeg4_transcode_mb(MpegEncContext * s, DCTELEM block[6][64], int motion_x, int motion_y);
++
++#endif /* AVCODEC_TRANSCODE_INTERNAL_H */
+diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h
+index bd898bd..9600629 100644
+--- a/libavutil/pixfmt.h
++++ b/libavutil/pixfmt.h
+@@ -157,6 +157,8 @@ enum PixelFormat {
+ PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian
+ PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian
+ PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian
++
++ PIX_FMT_TRANSCODE, ///< HW decoding of mpeg4 data[0] contains a transcode_pix_fmt structure
+ PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+ };
+
+--
+1.7.10.4
+
//decoding_mode=VPE_XVMC_IDCT;
framebuf_framenum=0;
moco_shader=NULL;
-#endif
ogl_forward_ref_frame_num=0;
ogl_backward_ref_frame_num=0;
ogl_forward_ref_frame=NULL;
ogl_backward_ref_frame=NULL;
+#endif
+
+#ifdef VPE_LIBAV_MPEG2_TRANSCODING
+ transcodecodec_libav=NULL;
+ transcodecodec_context_libav=NULL;
+#endif
+
#ifdef BENCHMARK_FPS
time_in_decoder=0;
#endif
+#if defined(VPE_LIBAV_MPEG2_TRANSCODING) || defined(VPE_LIBAV_SUPPORT)
+ av_register_all();
+#endif
+
#ifdef VPE_LIBAV_SUPPORT
- av_register_all();
+
if (decoding_mode==VPE_NO_XVMC) {
mpeg2codec_libav=avcodec_find_decoder(CODEC_ID_MPEG2VIDEO);
} else {
Log::getInstance()->log("Video", Log::DEBUG, "Find libav mpeg2 codec failed");
return 0;
}
-
#endif
+#ifdef VPE_LIBAV_MPEG2_TRANSCODING
+ transcodecodec_libav=avcodec_find_decoder(CODEC_ID_MPEG2VIDEO_MPEG4);
+ if (transcodecodec_libav==NULL) {
+ Log::getInstance()->log("Video", Log::DEBUG, "Find libav mpeg2 transcoder failed");
+ return 0;
+ }
+#endif
+
+
threadStart();
return 1;
}
OMX_ERRORTYPE VideoVPEOGL::EmptyBufferDone_OMX(OMX_IN OMX_HANDLETYPE hcomp,OMX_IN OMX_PTR appdata,OMX_IN OMX_BUFFERHEADERTYPE* buffer){
- Log::getInstance()->log("Video", Log::NOTICE, "EmptyBufferDone");
+ //Log::getInstance()->log("Video", Log::NOTICE, "EmptyBufferDone");
VideoVPEOGL *video=(VideoVPEOGL *)getInstance();
video->ReturnEmptyOMXBuffer(buffer);
return OMX_ErrorNone;
void VideoVPEOGL::ReturnEmptyOMXBuffer(OMX_BUFFERHEADERTYPE* buffer){
input_bufs_omx_mutex.Lock();
- Log::getInstance()->log("Video", Log::NOTICE, "ReturnEmptyOMXBuffer %d",input_bufs_omx_free.size());
+ //Log::getInstance()->log("Video", Log::NOTICE, "ReturnEmptyOMXBuffer %d",input_bufs_omx_free.size());
input_bufs_omx_free.push_back(buffer);
- Log::getInstance()->log("Video", Log::NOTICE, "ReturnEmptyOMXBuffer %d",input_bufs_omx_free.size());
+ //Log::getInstance()->log("Video", Log::NOTICE, "ReturnEmptyOMXBuffer %d",input_bufs_omx_free.size());
input_bufs_omx_mutex.Unlock();
}
return OMX_ErrorNone;
}
+#ifdef VPE_LIBAV_MPEG2_TRANSCODING
+
+ enum PixelFormat VideoVPEOGL::get_format_transcode(struct AVCodecContext *s, const enum PixelFormat *fmt)
+ {
+ enum PixelFormat ret_pix=PIX_FMT_NONE;
+
+ while (*fmt!=PIX_FMT_NONE) {
+ if (*fmt== PIX_FMT_TRANSCODE ) {
+ ret_pix=PIX_FMT_TRANSCODE;
+ }
+ fmt++;
+ }
+ return ret_pix;
+ }
+
+ int VideoVPEOGL::reget_buffer_transcode(struct AVCodecContext *c, AVFrame *pic)
+ {
+ Log::getInstance()->log("Video", Log::DEBUG,"Buffer reusing! Should not happen!Not Implemented! \n");
+
+ return -1;
+ }
+
+ int VideoVPEOGL::get_buffer_transcode(struct AVCodecContext *c, AVFrame *pic)
+ {
+
+ //reget logic from mplayer
+ if (pic->opaque && pic->data[0] && (!pic->buffer_hints ||pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE )){
+ Log::getInstance()->log("Video", Log::DEBUG,"Buffer reusing! Should not happen!\n");
+ return 0;
+ }
+
+
+ if (c->pix_fmt!=PIX_FMT_TRANSCODE ) {
+ Log::getInstance()->log("Video", Log::DEBUG,"We only support transcode pixel fmt\n");
+ return 0;
+ }
+ VideoVPEOGL *video=(VideoVPEOGL *)getInstance();
+ transcode_pix_fmt *pix_trans=NULL;
+ OMX_BUFFERHEADERTYPE* our_buf=video->GetFreeOMXBufferforlibav(&pix_trans);
+ if (our_buf==NULL|| pix_trans==NULL) {
+ Log::getInstance()->log("Video", Log::DEBUG,"Getting buffer failed\n");
+ return -1;
+ } else {
+
+ }
+
+
+
+
+ //populate pict
+ pic->type=FF_BUFFER_TYPE_USER; // we are controlling the buffers
+
+ pic->base[0]=(uint8_t*)our_buf; // our buf
+ //pic->extended_data=pic->data;
+ if(c->pkt) pic->pkt_pts=c->pkt->pts;
+ else pic->pkt_pts=AV_NOPTS_VALUE;
+ pic->width=c->width;
+ pic->height=c->height;
+ pic->format=c->pix_fmt;
+ pic->sample_aspect_ratio=c->sample_aspect_ratio;
+ pic->reordered_opaque= c->reordered_opaque;
+ //pic->age=INT_MAX;
+ pic->data[0]=(uint8_t*)pix_trans;
+ pix_trans->transcode_id=AV_TRANSCODE_ID;
+ pix_trans->packet.data=(uint8_t*)our_buf->pBuffer;
+ pix_trans->packet.size=our_buf->nAllocLen;
+ //that is all folks
+
+ return 0;
+
+ }
+
+ OMX_BUFFERHEADERTYPE* VideoVPEOGL::GetFreeOMXBufferforlibav(transcode_pix_fmt **pix_trans)
+ {
+ OMX_BUFFERHEADERTYPE* returned_buf=NULL;
+ *pix_trans=NULL;
+ int time_out=0;
+
+ while (returned_buf==NULL && time_out<100){
+ input_bufs_omx_mutex.Lock();
+ if (input_bufs_omx_free.size()!=0) {
+ returned_buf=input_bufs_omx_free.front();
+ returned_buf->nFilledLen=0;
+ returned_buf->nOffset=0;
+ returned_buf->nTimeStamp=0;
+ input_bufs_omx_free.pop_front();
+ input_bufs_omx_in_libav.push_back(returned_buf);
+ input_bufs_omx_mutex.Unlock();
+ returned_buf->nFlags=OMX_BUFFERFLAG_TIME_UNKNOWN;
+ break;
+ }
+ input_bufs_omx_mutex.Unlock();
+ Log::getInstance()->log("Video", Log::DEBUG, "GetFreeOMXBuffer_libav no free sample block");
+ time_out++;
+ MILLISLEEP(20);
+ }
+ if (returned_buf) {
+ *pix_trans=pix_fmt_omx_free.front();
+ pix_fmt_omx_free.pop_front(); // we assume that there is always a twin
+ }
+ return returned_buf;
+ }
+
+ void VideoVPEOGL::release_buffer_transcode(struct AVCodecContext *c, AVFrame *pic)
+ {
+ VideoVPEOGL *video=(VideoVPEOGL *)getInstance();
+ video->ReturnEmptyOMXBuffer_libav((OMX_BUFFERHEADERTYPE*) pic->base[0],(transcode_pix_fmt *)pic->data[0]);
+ pic->data[0]=NULL; //without doing this avcodec is going to cry
+ }
+
+ void VideoVPEOGL::ReturnEmptyOMXBuffer_libav(OMX_BUFFERHEADERTYPE* buffer,transcode_pix_fmt *pix_fmt){
+ input_bufs_omx_mutex.Lock();
+ // We only return it, if it was not passed to OMX!
+ Log::getInstance()->log("Video", Log::NOTICE, "ReturnEmptyOMXBuffer_libav %d",input_bufs_omx_free.size());
+ pix_fmt_omx_free.push_back(pix_fmt);
+ list<OMX_BUFFERHEADERTYPE*>::iterator itty=input_bufs_omx_in_libav.begin();
+ while (itty!=input_bufs_omx_in_libav.end()) {
+ if (*itty==buffer) {
+ input_bufs_omx_in_libav.remove(buffer);
+ input_bufs_omx_free.push_back(buffer);
+ Log::getInstance()->log("Video", Log::NOTICE, "ReturnEmptyOMXBuffer_libav %d",input_bufs_omx_free.size());
+ break;
+ }
+ }
+ input_bufs_omx_mutex.Unlock();
+ }
+
+
+#endif
+
#endif
int VideoVPEOGL::shutdown()
// close(fdVideo);
return 1;
}
-
+#ifdef VPE_LIBAV_SUPPORT
int VideoVPEOGL::AllocateYUV400OglTexture(VPEOGLFrame* outframe,int width,int height,int stride)
{
Log::getInstance()->log("Video", Log::NOTICE, "Allocate ogl texture 400");
}
+#endif
void VideoVPEOGL::threadMethod()
{
long long run=0;
while (1) {
bool sleep=true;
- bool upload=false;
- bool view=false;
+
run++;
#ifdef VPE_LIBAV_SUPPORT
+ bool upload=false;
+ bool view=false;
dec_frame_libav_mutex.Lock();
AVFrame* dec_frame_libav_uploading_int=NULL;
}
if (doomx) {
if (AllocateCodecsOMX()) {
- decoding_backend=VPE_DECODER_OMX;
+ //decoding_backend=VPE_DECODER_OMX;
return 1;
// Otherwise fall back to libav
} else {
}
+#ifdef VPE_LIBAV_MPEG2_TRANSCODING
+
+int VideoVPEOGL::InitTranscoderLibAV() {
+ transcodecodec_context_libav = avcodec_alloc_context3(transcodecodec_libav);
+ if (!transcodecodec_context_libav) {
+ Log::getInstance()->log("Video", Log::DEBUG, "Alloc avcodec context failed!");
+ return 0;
+ }
+
+ transcodecodec_context_libav->slice_flags = SLICE_FLAG_CODED_ORDER;
+
+ transcodecodec_context_libav->pix_fmt = PIX_FMT_TRANSCODE;
+ transcodecodec_context_libav->get_format = get_format_transcode;
+ transcodecodec_context_libav->get_buffer = get_buffer_transcode;
+ transcodecodec_context_libav->reget_buffer = reget_buffer_transcode;
+ transcodecodec_context_libav->release_buffer = release_buffer_transcode;
+ //transcodecodec_context_libav->flags |= CODEC_FLAG_TRUNCATED;
+ //transcodecodec_context_libav->time_base.den=9000; //pts values 90 KHz Clock /10;
+
+ int avc_ret = avcodec_open2(transcodecodec_context_libav, transcodecodec_libav, NULL);
+ if (avc_ret < 0) {
+ Log::getInstance()->log("Video", Log::DEBUG, "Opening libav codec failed \n");
+ return 0;
+ }
+
+ memset(&incom_packet_libav, 0, sizeof(incom_packet_libav));
+ incom_packet_libav_size = 200000;
+ incom_packet_libav.data = (uint8_t*) av_malloc(incom_packet_libav_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ return 1;
+}
+
+
+
+int VideoVPEOGL::DeInitTranscoderLibAV() {
+
+ av_free(incom_packet_libav.data);
+ incom_packet_libav.data=NULL;
+ incom_packet_libav_size=0;
+
+ if (transcodecodec_context_libav) {
+ avcodec_close(transcodecodec_context_libav);
+ av_free(transcodecodec_context_libav);
+ transcodecodec_context_libav=NULL;
+
+ }
+
+ return 1;
+}
+
+#endif
+
#ifdef VPE_OMX_SUPPORT
int VideoVPEOGL::AllocateCodecsOMX()
{
if (h264) {
ft_type.eCompressionFormat=OMX_VIDEO_CodingAVC;
} else {
- ft_type.eCompressionFormat=OMX_VIDEO_CodingMPEG2;
+ //ft_type.eCompressionFormat=OMX_VIDEO_CodingMPEG2;
+ ft_type.eCompressionFormat=OMX_VIDEO_CodingMPEG4;
+ decoding_backend=VPE_DECODER_OMX_libav_TRANSCODE;
+ InitTranscoderLibAV();
}
Demuxer* demux=Demuxer::getInstance();
return 0;
}
+ if (decoding_backend!=VPE_DECODER_OMX_libav_TRANSCODE) decoding_backend=VPE_DECODER_OMX;
omx_running=true;
return 1;
port_def_type.bBuffersContiguous,port_def_type.nBufferAlignment);*/
port_def_type.nBufferCountActual=60;
+ port_def_type.nBufferSize=max(port_def_type.nBufferSize,200000); // for transcoder important
error=OMX_SetParameter(omx_vid_dec,OMX_IndexParamPortDefinition, &port_def_type);
}
input_bufs_omx_all.push_back(buf_head);
input_bufs_omx_free.push_back(buf_head);
+#ifdef VPE_LIBAV_MPEG2_TRANSCODING
+ transcode_pix_fmt* new_pix=NULL;
+ new_pix=(transcode_pix_fmt*)malloc(sizeof(transcode_pix_fmt));
+ pix_fmt_omx_all.push_back(new_pix);
+ pix_fmt_omx_free.push_back(new_pix);
+#endif
}
omx_first_frame=true;
input_bufs_omx_mutex.Unlock();
return 0;
}
+
+#ifdef VPE_LIBAV_MPEG2_TRANSCODING
+ free(pix_fmt_omx_all[i]);
+#endif
}
input_bufs_omx_all.clear();
input_bufs_omx_free.clear();
+#ifdef VPE_LIBAV_MPEG2_TRANSCODING
+ input_bufs_omx_in_libav.clear();
+ pix_fmt_omx_all.clear();
+ pix_fmt_omx_free.clear();
+#endif
input_bufs_omx_mutex.Unlock();
}
{
OMX_ERRORTYPE error;
omx_running=false;
+#ifdef VPE_LIBAV_MPEG2_TRANSCODING
+ if (decoding_backend==VPE_DECODER_OMX_libav_TRANSCODE)
+ DeInitTranscoderLibAV();
+#endif
+
if (omx_vid_dec) {
// first flush all buffers
default: case 0: return 0; // no backend runnigng
#ifdef VPE_OMX_SUPPORT
case VPE_DECODER_OMX: return DeliverMediaPacketOMX(packet,buffer,samplepos);
+#ifdef VPE_LIBAV_MPEG2_TRANSCODING
+ case VPE_DECODER_OMX_libav_TRANSCODE: return DeliverMediaPacketOMXTranscode(packet,buffer,samplepos);
+#endif
#endif
#ifdef VPE_LIBAV_SUPPORT
case VPE_DECODER_libav: return DeliverMediaPacketlibav(packet,buffer,samplepos);
#endif
+#ifdef VPE_LIBAV_MPEG2_TRANSCODING
+
+
+
+int VideoVPEOGL::DecodePacketOMXTranscode()
+{
+ unsigned int haveToCopy=incom_packet_libav.size;
+ if (incom_packet_libav.size==0) return 1; // we are already empty
+
+
+ while (haveToCopy>0) {
+ int dec_bytes=0;
+ int frame_ready=0;
+
+ // Log::getInstance()->log("Video", Log::DEBUG, "Push data to decoder");
+
+#ifdef BENCHMARK_FPS
+ int cur_time=getTimeMS();
+#endif
+ dec_bytes=avcodec_decode_video2(transcodecodec_context_libav, &transcode_frame_libav,
+ &frame_ready, &incom_packet_libav);
+#ifdef BENCHMARK_FPS
+ time_in_decoder+=getTimeMS()-cur_time;
+ if (frame_ready) num_frames++;
+ if ((num_frames%100)==0) {
+ float fps=1000./(float)(time_in_decoder);
+ fps*=((float)num_frames);
+ Log::getInstance()->log("Video", Log::NOTICE, "Current Pure Decoding FPS %g", fps);
+ }
+#endif
+ if (dec_bytes<0) {
+ Log::getInstance()->log("Video", Log::DEBUG, "Decoding frame failed %x", dec_bytes);
+ return 0;
+ }
+ haveToCopy-=dec_bytes;
+ if (frame_ready) {
+ struct transcode_pix_fmt *transcode=(struct transcode_pix_fmt *)transcode_frame_libav.data[0];
+ //if (!benchmark) fwrite(transcode->packet.data,transcode->packet.size,1,output_file);
+
+
+ //add omx magic, this should be an omx packet
+ OMX_BUFFERHEADERTYPE* omx_packet=(OMX_BUFFERHEADERTYPE*)transcode_frame_libav.base[0];
+ omx_packet->nFilledLen=transcode->packet.size;
+
+ input_bufs_omx_mutex.Lock();
+ input_bufs_omx_in_libav.remove(omx_packet); // say that it is passed down to the decoder
+ input_bufs_omx_mutex.Unlock();
+
+ OMX_ERRORTYPE error=OMX_EmptyThisBuffer(omx_vid_dec,omx_packet);
+ if (error!=OMX_ErrorNone){
+ Log::getInstance()->log("Video", Log::DEBUG, "OMX_EmptyThisBuffer failed %x", error);
+ }
+
+
+
+ }
+
+ }
+ incom_packet_libav.size=0;
+ return 1;
+
+}
+
+
+UINT VideoVPEOGL::DeliverMediaPacketOMXTranscode(MediaPacket packet,
+ const UCHAR* buffer,
+ UINT *samplepos)
+{
+ //Later add fail back code for libav
+ /*if (!videoon) {
+ *samplepos+=packet.length;
+ return packet.length;
+ }*/
+
+ if (!omx_running) return 0; // if we are not runnig do not do this
+
+
+ if (iframemode) {
+ //samplepos=0;
+ MILLISLEEP(10);
+ return 0; //Not in iframe mode!
+ }
+
+ UINT headerstrip=0;
+ if (packet.disconti) {
+ firstsynched=false;
+ if (!DecodePacketOMXTranscode()) return 0;
+ }
+
+ /*Inspect PES-Header */
+ if (*samplepos==0) {//stripheader
+ headerstrip=buffer[packet.pos_buffer+8]+9/*is this right*/;
+ *samplepos+=headerstrip;
+ if ( packet.synched ) {
+
+ if (!DecodePacketOMXTranscode()) return 0; // WriteOut old Data
+
+
+ incom_packet_libav.pts=packet.pts/3600;
+ incom_packet_libav.dts=packet.dts/3600;
+ // reftime1=packet.presentation_time;
+ // reftime2=reftime1+1;
+ firstsynched=true;
+ } else {
+ incom_packet_libav.pts=0;
+ incom_packet_libav.dts=0;
+ if (!firstsynched) {//
+ *samplepos=packet.length;//if we have not processed at least one
+ return packet.length;//synched packet ignore it!
+ }
+ }
+ }
+
+
+
+
+
+
+ /*if (cur_input_buf_omx->nFilledLen==0) {//will only be changed on first packet
+ /*if (packet.disconti) {
+ ms->SetDiscontinuity(TRUE);
+ } else {
+ ms->SetDiscontinuity(FALSE);
+ }*
+ //if (packet.synched) {
+
+ //lastreftimePTS=packet.pts;
+ if (omx_first_frame) { // TODO time
+ cur_input_buf_omx->nFlags=OMX_BUFFERFLAG_STARTTIME;
+ omx_first_frame=false;
+ } else
+
+ //}
+ //else
+ //{
+ cur_input_buf_omx->nFlags=OMX_BUFFERFLAG_TIME_UNKNOWN;
+
+
+ // ms->SetSyncPoint(TRUE);
+ //}
+
+ }*/
+ unsigned int haveToCopy=packet.length-*samplepos;
+
+ if ((incom_packet_libav_size-incom_packet_libav.size)< haveToCopy) {
+ // if the buffer is to small reallocate
+ incom_packet_libav_size+=haveToCopy;
+ incom_packet_libav.data=(uint8_t*)av_realloc(incom_packet_libav.data,incom_packet_libav_size+FF_INPUT_BUFFER_PADDING_SIZE);
+ Log::getInstance()->log("Video", Log::DEBUG, "Reallocate avpacket buffer to %d", incom_packet_libav_size);
+ }
+ memcpy(incom_packet_libav.data+incom_packet_libav.size,buffer+packet.pos_buffer+*samplepos,haveToCopy);
+ incom_packet_libav.size+=haveToCopy;
+
+ *samplepos+=haveToCopy;
+
+
+ return *samplepos;
+
+
+}
+
+#endif
+
#ifdef VPE_LIBAV_SUPPORT
int VideoVPEOGL::DecodePacketlibav()