summaryrefslogtreecommitdiff
path: root/contrib/ffmpeg/vhook
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/ffmpeg/vhook')
-rw-r--r--contrib/ffmpeg/vhook/Makefile51
-rw-r--r--contrib/ffmpeg/vhook/drawtext.c531
-rw-r--r--contrib/ffmpeg/vhook/fish.c380
-rw-r--r--contrib/ffmpeg/vhook/imlib2.c450
-rw-r--r--contrib/ffmpeg/vhook/null.c116
-rw-r--r--contrib/ffmpeg/vhook/ppm.c367
-rw-r--r--contrib/ffmpeg/vhook/watermark.c661
7 files changed, 2556 insertions, 0 deletions
diff --git a/contrib/ffmpeg/vhook/Makefile b/contrib/ffmpeg/vhook/Makefile
new file mode 100644
index 000000000..06b48935e
--- /dev/null
+++ b/contrib/ffmpeg/vhook/Makefile
@@ -0,0 +1,51 @@
+include ../config.mak
+
+VPATH=$(SRC_PATH_BARE)/vhook
+
+CFLAGS=-I$(BUILD_ROOT) -I$(SRC_PATH) -I$(SRC_PATH)/libavutil -I$(SRC_PATH)/libavcodec \
+ -I$(SRC_PATH)/libavformat -I$(SRC_PATH)/libswscale $(VHOOKCFLAGS) -DHAVE_AV_CONFIG_H
+LDFLAGS+= -g
+
+HOOKS=null$(SLIBSUF) fish$(SLIBSUF) ppm$(SLIBSUF) watermark$(SLIBSUF)
+ALLHOOKS=$(HOOKS) imlib2$(SLIBSUF) drawtext$(SLIBSUF)
+
+ifeq ($(HAVE_IMLIB2),yes)
+ HOOKS += imlib2$(SLIBSUF)
+ LIBS_imlib2$(SLIBSUF) = -lImlib2
+endif
+
+ifeq ($(HAVE_FREETYPE2),yes)
+ HOOKS += drawtext$(SLIBSUF)
+ CFLAGS += `freetype-config --cflags`
+ LIBS_drawtext$(SLIBSUF) = `freetype-config --libs`
+endif
+
+SRCS := $(HOOKS:$(SLIBSUF)=.c)
+
+all: $(HOOKS)
+
+depend dep: $(SRCS)
+ $(CC) -MM $(CFLAGS) $^ 1>.depend
+
+install: $(HOOKS)
+ install -d "$(shlibdir)/vhook"
+ install -m 755 $(HOOKS) "$(shlibdir)/vhook"
+
+uninstall:
+ rm -f $(addprefix $(shlibdir)/vhook/,$(ALLHOOKS))
+ -rmdir "$(shlibdir)/vhook/"
+
+%$(SLIBSUF): %.o
+ $(CC) $(LDFLAGS) -o $@ $(VHOOKSHFLAGS) $< $(VHOOKLIBS) $(LIBS_$@)
+
+clean:
+ rm -f *.o *.d *~ *.a *.lib *.so *.dylib *.dll
+
+distclean: clean
+ rm -f .depend
+
+.PHONY: all depend dep clean distclean install* uninstall*
+
+ifneq ($(wildcard .depend),)
+include .depend
+endif
diff --git a/contrib/ffmpeg/vhook/drawtext.c b/contrib/ffmpeg/vhook/drawtext.c
new file mode 100644
index 000000000..081847620
--- /dev/null
+++ b/contrib/ffmpeg/vhook/drawtext.c
@@ -0,0 +1,531 @@
+/*
+ * drawtext.c: print text over the screen
+ ******************************************************************************
+ * Options:
+ * -f <filename> font filename (MANDATORY!!!)
+ * -s <pixel_size> font size in pixels [default 16]
+ * -b print background
+ * -o outline glyphs (use the bg color)
+ * -x <pos> x position ( >= 0) [default 0]
+ * -y <pos> y position ( >= 0) [default 0]
+ * -t <text> text to print (will be passed to strftime())
+ * MANDATORY: will be used even when -T is used.
+ * in this case, -t will be used if some error
+ * occurs
+ * -T <filename> file with the text (re-read every frame)
+ * -c <#RRGGBB> foreground color ('internet' way) [default #ffffff]
+ * -C <#RRGGBB> background color ('internet' way) [default #000000]
+ *
+ ******************************************************************************
+ * Features:
+ * - True Type, Type1 and others via FreeType2 library
+ * - Font kerning (better output)
+ * - Line Wrap (if the text doesn't fit, the next char go to the next line)
+ * - Background box
+ * - Outline
+ ******************************************************************************
+ * Author: Gustavo Sverzut Barbieri <gsbarbieri@yahoo.com.br>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define MAXSIZE_TEXT 1024
+
+#include "framehook.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <string.h>
+#include <unistd.h>
+#undef time
+#include <sys/time.h>
+#include <time.h>
+
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#include FT_GLYPH_H
+
+#define SCALEBITS 10
+#define ONE_HALF (1 << (SCALEBITS - 1))
+#define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
+
+#define RGB_TO_YUV(rgb_color, yuv_color) do { \
+ yuv_color[0] = (FIX(0.29900) * rgb_color[0] + FIX(0.58700) * rgb_color[1] + FIX(0.11400) * rgb_color[2] + ONE_HALF) >> SCALEBITS; \
+ yuv_color[2] = ((FIX(0.50000) * rgb_color[0] - FIX(0.41869) * rgb_color[1] - FIX(0.08131) * rgb_color[2] + ONE_HALF - 1) >> SCALEBITS) + 128; \
+ yuv_color[1] = ((- FIX(0.16874) * rgb_color[0] - FIX(0.33126) * rgb_color[1] + FIX(0.50000) * rgb_color[2] + ONE_HALF - 1) >> SCALEBITS) + 128; \
+} while (0)
+
+#define COPY_3(dst,src) { \
+ dst[0]=src[0]; \
+ dst[1]=src[1]; \
+ dst[2]=src[2]; \
+}
+
+
+
+#define SET_PIXEL(picture, yuv_color, x, y) { \
+ picture->data[0][ (x) + (y)*picture->linesize[0] ] = yuv_color[0]; \
+ picture->data[1][ ((x/2) + (y/2)*picture->linesize[1]) ] = yuv_color[1]; \
+ picture->data[2][ ((x/2) + (y/2)*picture->linesize[2]) ] = yuv_color[2]; \
+}
+
+#define GET_PIXEL(picture, yuv_color, x, y) { \
+ yuv_color[0] = picture->data[0][ (x) + (y)*picture->linesize[0] ]; \
+ yuv_color[1] = picture->data[1][ (x/2) + (y/2)*picture->linesize[1] ]; \
+ yuv_color[2] = picture->data[2][ (x/2) + (y/2)*picture->linesize[2] ]; \
+}
+
+
+typedef struct {
+ unsigned char *text;
+ char *file;
+ unsigned int x;
+ unsigned int y;
+ int bg;
+ int outline;
+ unsigned char bgcolor[3]; /* YUV */
+ unsigned char fgcolor[3]; /* YUV */
+ FT_Library library;
+ FT_Face face;
+ FT_Glyph glyphs[ 255 ];
+ FT_Bitmap bitmaps[ 255 ];
+ int advance[ 255 ];
+ int bitmap_left[ 255 ];
+ int bitmap_top[ 255 ];
+ unsigned int glyphs_index[ 255 ];
+ int text_height;
+ int baseline;
+ int use_kerning;
+} ContextInfo;
+
+
+void Release(void *ctx)
+{
+ if (ctx)
+ av_free(ctx);
+}
+
+
+static int ParseColor(char *text, unsigned char yuv_color[3])
+{
+ char tmp[3];
+ unsigned char rgb_color[3];
+ int i;
+
+ tmp[2] = '\0';
+
+ if ((!text) || (strlen(text) != 7) || (text[0] != '#') )
+ return -1;
+
+ for (i=0; i < 3; i++)
+ {
+ tmp[0] = text[i*2+1];
+ tmp[1] = text[i*2+2];
+
+ rgb_color[i] = strtol(tmp, NULL, 16);
+ }
+
+ RGB_TO_YUV(rgb_color, yuv_color);
+
+ return 0;
+}
+
+int Configure(void **ctxp, int argc, char *argv[])
+{
+ int c;
+ int error;
+ ContextInfo *ci=NULL;
+ char *font=NULL;
+ unsigned int size=16;
+ FT_BBox bbox;
+ int yMax, yMin;
+ *ctxp = av_mallocz(sizeof(ContextInfo));
+ ci = (ContextInfo *) *ctxp;
+
+ /* configure Context Info */
+ ci->text = NULL;
+ ci->file = NULL;
+ ci->x = ci->y = 0;
+ ci->fgcolor[0]=255;
+ ci->fgcolor[1]=128;
+ ci->fgcolor[2]=128;
+ ci->bgcolor[0]=0;
+ ci->fgcolor[1]=128;
+ ci->fgcolor[2]=128;
+ ci->bg = 0;
+ ci->outline = 0;
+ ci->text_height = 0;
+
+ optind = 0;
+ while ((c = getopt(argc, argv, "f:t:T:x:y:s:c:C:bo")) > 0) {
+ switch (c) {
+ case 'f':
+ font = optarg;
+ break;
+ case 't':
+ ci->text = av_strdup(optarg);
+ break;
+ case 'T':
+ ci->file = av_strdup(optarg);
+ break;
+ case 'x':
+ ci->x = (unsigned int) atoi(optarg);
+ break;
+ case 'y':
+ ci->y = (unsigned int) atoi(optarg);
+ break;
+ case 's':
+ size = (unsigned int) atoi(optarg);
+ break;
+ case 'c':
+ if (ParseColor(optarg, ci->fgcolor) == -1)
+ {
+ av_log(NULL, AV_LOG_ERROR, "Invalid foreground color: '%s'. You must specify the color in the internet way(packaged hex): #RRGGBB, ie: -c #ffffff (for white foreground)\n", optarg);
+ return -1;
+ }
+ break;
+ case 'C':
+ if (ParseColor(optarg, ci->bgcolor) == -1)
+ {
+ av_log(NULL, AV_LOG_ERROR, "Invalid foreground color: '%s'. You must specify the color in the internet way(packaged hex): #RRGGBB, ie: -c #ffffff (for white foreground)\n", optarg);
+ return -1;
+ }
+ break;
+ case 'b':
+ ci->bg=1;
+ break;
+ case 'o':
+ ci->outline=1;
+ break;
+ case '?':
+ av_log(NULL, AV_LOG_ERROR, "Unrecognized argument '%s'\n", argv[optind]);
+ return -1;
+ }
+ }
+
+ if (!ci->text)
+ {
+ av_log(NULL, AV_LOG_ERROR, "No text provided (-t text)\n");
+ return -1;
+ }
+
+ if (ci->file)
+ {
+ FILE *fp;
+ if ((fp=fopen(ci->file, "r")) == NULL)
+ {
+ av_log(NULL, AV_LOG_INFO, "WARNING: The file could not be opened. Using text provided with -t switch: %s", strerror(errno));
+ }
+ else
+ {
+ fclose(fp);
+ }
+ }
+
+ if (!font)
+ {
+ av_log(NULL, AV_LOG_ERROR, "No font file provided! (-f filename)\n");
+ return -1;
+ }
+
+ if ((error = FT_Init_FreeType(&(ci->library))) != 0)
+ {
+ av_log(NULL, AV_LOG_ERROR, "Could not load FreeType (error# %d).\n", error);
+ return -1;
+ }
+
+ if ((error = FT_New_Face( ci->library, font, 0, &(ci->face) )) != 0)
+ {
+ av_log(NULL, AV_LOG_ERROR, "Could not load face: %s (error# %d).\n", font, error);
+ return -1;
+ }
+
+ if ((error = FT_Set_Pixel_Sizes( ci->face, 0, size)) != 0)
+ {
+ av_log(NULL, AV_LOG_ERROR, "Could not set font size to %d pixels (error# %d).\n", size, error);
+ return -1;
+ }
+
+ ci->use_kerning = FT_HAS_KERNING(ci->face);
+
+ /* load and cache glyphs */
+ yMax = -32000;
+ yMin = 32000;
+ for (c=0; c < 256; c++)
+ {
+ /* Load char */
+ error = FT_Load_Char( ci->face, (unsigned char) c, FT_LOAD_RENDER | FT_LOAD_MONOCHROME );
+ if (error) continue; /* ignore errors */
+
+ /* Save bitmap */
+ ci->bitmaps[c] = ci->face->glyph->bitmap;
+ /* Save bitmap left */
+ ci->bitmap_left[c] = ci->face->glyph->bitmap_left;
+ /* Save bitmap top */
+ ci->bitmap_top[c] = ci->face->glyph->bitmap_top;
+
+ /* Save advance */
+ ci->advance[c] = ci->face->glyph->advance.x >> 6;
+
+ /* Save glyph */
+ error = FT_Get_Glyph( ci->face->glyph, &(ci->glyphs[c]) );
+ /* Save glyph index */
+ ci->glyphs_index[c] = FT_Get_Char_Index( ci->face, (unsigned char) c );
+
+ /* Measure text height to calculate text_height (or the maximum text height) */
+ FT_Glyph_Get_CBox( ci->glyphs[ c ], ft_glyph_bbox_pixels, &bbox );
+ if (bbox.yMax > yMax)
+ yMax = bbox.yMax;
+ if (bbox.yMin < yMin)
+ yMin = bbox.yMin;
+
+ }
+
+ ci->text_height = yMax - yMin;
+ ci->baseline = yMax;
+
+ return 0;
+}
+
+
+
+
+static inline void draw_glyph(AVPicture *picture, FT_Bitmap *bitmap, unsigned int x, unsigned int y, unsigned int width, unsigned int height, unsigned char yuv_fgcolor[3], unsigned char yuv_bgcolor[3], int outline)
+{
+ int r, c;
+ int spixel, dpixel[3], in_glyph=0;
+
+ if (bitmap->pixel_mode == ft_pixel_mode_mono)
+ {
+ in_glyph = 0;
+ for (r=0; (r < bitmap->rows) && (r+y < height); r++)
+ {
+ for (c=0; (c < bitmap->width) && (c+x < width); c++)
+ {
+ /* pixel in the picture (destination) */
+ GET_PIXEL(picture, dpixel, (c+x), (y+r));
+
+ /* pixel in the glyph bitmap (source) */
+ spixel = bitmap->buffer[r*bitmap->pitch +c/8] & (0x80>>(c%8));
+
+ if (spixel)
+ COPY_3(dpixel, yuv_fgcolor);
+
+ if (outline)
+ {
+ /* border detection: */
+ if ( (!in_glyph) && (spixel) )
+ /* left border detected */
+ {
+ in_glyph = 1;
+ /* draw left pixel border */
+ if (c-1 >= 0)
+ SET_PIXEL(picture, yuv_bgcolor, (c+x-1), (y+r));
+ }
+ else if ( (in_glyph) && (!spixel) )
+ /* right border detected */
+ {
+ in_glyph = 0;
+ /* 'draw' right pixel border */
+ COPY_3(dpixel, yuv_bgcolor);
+ }
+
+ if (in_glyph)
+ /* see if we have a top/bottom border */
+ {
+ /* top */
+ if ( (r-1 >= 0) && (! bitmap->buffer[(r-1)*bitmap->pitch +c/8] & (0x80>>(c%8))) )
+ /* we have a top border */
+ SET_PIXEL(picture, yuv_bgcolor, (c+x), (y+r-1));
+
+ /* bottom */
+ if ( (r+1 < height) && (! bitmap->buffer[(r+1)*bitmap->pitch +c/8] & (0x80>>(c%8))) )
+ /* we have a bottom border */
+ SET_PIXEL(picture, yuv_bgcolor, (c+x), (y+r+1));
+
+ }
+ }
+
+ SET_PIXEL(picture, dpixel, (c+x), (y+r));
+ }
+ }
+ }
+}
+
+
+static inline void draw_box(AVPicture *picture, unsigned int x, unsigned int y, unsigned int width, unsigned int height, unsigned char yuv_color[3])
+{
+ int i, j;
+
+ for (j = 0; (j < height); j++)
+ for (i = 0; (i < width); i++)
+ {
+ SET_PIXEL(picture, yuv_color, (i+x), (y+j));
+ }
+
+}
+
+
+
+
+void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ FT_Face face = ci->face;
+ FT_GlyphSlot slot = face->glyph;
+ unsigned char *text = ci->text;
+ unsigned char c;
+ int x = 0, y = 0, i=0, size=0;
+ unsigned char buff[MAXSIZE_TEXT];
+ unsigned char tbuff[MAXSIZE_TEXT];
+ time_t now = time(0);
+ int str_w, str_w_max;
+ FT_Vector pos[MAXSIZE_TEXT];
+ FT_Vector delta;
+
+ if (ci->file)
+ {
+ int fd = open(ci->file, O_RDONLY);
+
+ if (fd < 0)
+ {
+ text = ci->text;
+ av_log(NULL, AV_LOG_INFO, "WARNING: The file could not be opened. Using text provided with -t switch: %s", strerror(errno));
+ }
+ else
+ {
+ int l = read(fd, tbuff, sizeof(tbuff) - 1);
+
+ if (l >= 0)
+ {
+ tbuff[l] = 0;
+ text = tbuff;
+ }
+ else
+ {
+ text = ci->text;
+ av_log(NULL, AV_LOG_INFO, "WARNING: The file could not be read. Using text provided with -t switch: %s", strerror(errno));
+ }
+ close(fd);
+ }
+ }
+ else
+ {
+ text = ci->text;
+ }
+
+ strftime(buff, sizeof(buff), text, localtime(&now));
+
+ text = buff;
+
+ size = strlen(text);
+
+
+
+
+ /* measure string size and save glyphs position*/
+ str_w = str_w_max = 0;
+ x = ci->x;
+ y = ci->y;
+ for (i=0; i < size; i++)
+ {
+ c = text[i];
+
+ /* kerning */
+ if ( (ci->use_kerning) && (i > 0) && (ci->glyphs_index[c]) )
+ {
+ FT_Get_Kerning( ci->face,
+ ci->glyphs_index[ text[i-1] ],
+ ci->glyphs_index[c],
+ ft_kerning_default,
+ &delta );
+
+ x += delta.x >> 6;
+ }
+
+ if (( (x + ci->advance[ c ]) >= width ) || ( c == '\n' ))
+ {
+ str_w = width - ci->x - 1;
+
+ y += ci->text_height;
+ x = ci->x;
+ }
+
+
+ /* save position */
+ pos[i].x = x + ci->bitmap_left[c];
+ pos[i].y = y - ci->bitmap_top[c] + ci->baseline;
+
+
+ x += ci->advance[c];
+
+
+ if (str_w > str_w_max)
+ str_w_max = str_w;
+
+ }
+
+
+
+
+ if (ci->bg)
+ {
+ /* Check if it doesn't pass the limits */
+ if ( str_w_max + ci->x >= width )
+ str_w_max = width - ci->x - 1;
+ if ( y >= height )
+ y = height - 1 - 2*ci->y;
+
+ /* Draw Background */
+ draw_box( picture, ci->x, ci->y, str_w_max, y - ci->y, ci->bgcolor );
+ }
+
+
+
+ /* Draw Glyphs */
+ for (i=0; i < size; i++)
+ {
+ c = text[i];
+
+ if (
+ ( (c == '_') && (text == ci->text) ) || /* skip '_' (consider as space)
+ IF text was specified in cmd line
+ (which doesn't like neasted quotes) */
+ ( c == '\n' ) /* Skip new line char, just go to new line */
+ )
+ continue;
+
+ /* now, draw to our target surface */
+ draw_glyph( picture,
+ &(ci->bitmaps[ c ]),
+ pos[i].x,
+ pos[i].y,
+ width,
+ height,
+ ci->fgcolor,
+ ci->bgcolor,
+ ci->outline );
+
+ /* increment pen position */
+ x += slot->advance.x >> 6;
+ }
+
+
+}
+
diff --git a/contrib/ffmpeg/vhook/fish.c b/contrib/ffmpeg/vhook/fish.c
new file mode 100644
index 000000000..2a30d2847
--- /dev/null
+++ b/contrib/ffmpeg/vhook/fish.c
@@ -0,0 +1,380 @@
+/*
+ * Fish Detector Hook
+ * Copyright (c) 2002 Philip Gladstone
+ *
+ * This file implements a fish detector. It is used to see when a
+ * goldfish passes in front of the camera. It does this by counting
+ * the number of input pixels that fall within a particular HSV
+ * range.
+ *
+ * It takes a multitude of arguments:
+ *
+ * -h <num>-<num> the range of H values that are fish
+ * -s <num>-<num> the range of S values that are fish
+ * -v <num>-<num> the range of V values that are fish
+ * -z zap all non-fish values to black
+ * -l <num> limit the number of saved files to <num>
+ * -i <num> only check frames every <num> seconds
+ * -t <num> the threshold for the amount of fish pixels (range 0-1)
+ * -d turn debugging on
+ * -D <directory> where to put the fish images
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <stdlib.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <string.h>
+#include <time.h>
+#include <stdio.h>
+#include <dirent.h>
+
+#include "framehook.h"
+#include "dsputil.h"
+#include "avformat.h"
+#include "swscale.h"
+
+static int sws_flags = SWS_BICUBIC;
+
+#define SCALEBITS 10
+#define ONE_HALF (1 << (SCALEBITS - 1))
+#define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
+
+#define YUV_TO_RGB1_CCIR(cb1, cr1)\
+{\
+ cb = (cb1) - 128;\
+ cr = (cr1) - 128;\
+ r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
+ g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
+ ONE_HALF;\
+ b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
+}
+
+#define YUV_TO_RGB2_CCIR(r, g, b, y1)\
+{\
+ yt = ((y1) - 16) * FIX(255.0/219.0);\
+ r = cm[(yt + r_add) >> SCALEBITS];\
+ g = cm[(yt + g_add) >> SCALEBITS];\
+ b = cm[(yt + b_add) >> SCALEBITS];\
+}
+
+
+
+
+typedef struct {
+ int h; /* 0 .. 360 */
+ int s; /* 0 .. 255 */
+ int v; /* 0 .. 255 */
+} HSV;
+
+typedef struct {
+ int zapping;
+ int threshold;
+ HSV dark, bright;
+ char *dir;
+ int file_limit;
+ int debug;
+ int min_interval;
+ int64_t next_pts;
+ int inset;
+ int min_width;
+ struct SwsContext *toRGB_convert_ctx;
+} ContextInfo;
+
+static void dorange(const char *s, int *first, int *second, int maxval)
+{
+ sscanf(s, "%d-%d", first, second);
+ if (*first > maxval)
+ *first = maxval;
+ if (*second > maxval)
+ *second = maxval;
+}
+
+void Release(void *ctx)
+{
+ ContextInfo *ci;
+ ci = (ContextInfo *) ctx;
+
+ if (ctx) {
+ sws_freeContext(ci->toRGB_convert_ctx);
+ av_free(ctx);
+ }
+}
+
+int Configure(void **ctxp, int argc, char *argv[])
+{
+ ContextInfo *ci;
+ int c;
+
+ *ctxp = av_mallocz(sizeof(ContextInfo));
+ ci = (ContextInfo *) *ctxp;
+
+ optind = 0;
+
+ ci->dir = "/tmp";
+ ci->threshold = 100;
+ ci->file_limit = 100;
+ ci->min_interval = 1000000;
+ ci->inset = 10; /* Percent */
+
+ while ((c = getopt(argc, argv, "w:i:dh:s:v:zl:t:D:")) > 0) {
+ switch (c) {
+ case 'h':
+ dorange(optarg, &ci->dark.h, &ci->bright.h, 360);
+ break;
+ case 's':
+ dorange(optarg, &ci->dark.s, &ci->bright.s, 255);
+ break;
+ case 'v':
+ dorange(optarg, &ci->dark.v, &ci->bright.v, 255);
+ break;
+ case 'z':
+ ci->zapping = 1;
+ break;
+ case 'l':
+ ci->file_limit = atoi(optarg);
+ break;
+ case 'i':
+ ci->min_interval = 1000000 * atof(optarg);
+ break;
+ case 't':
+ ci->threshold = atof(optarg) * 1000;
+ if (ci->threshold > 1000 || ci->threshold < 0) {
+ fprintf(stderr, "Invalid threshold value '%s' (range is 0-1)\n", optarg);
+ return -1;
+ }
+ break;
+ case 'w':
+ ci->min_width = atoi(optarg);
+ break;
+ case 'd':
+ ci->debug++;
+ break;
+ case 'D':
+ ci->dir = av_strdup(optarg);
+ break;
+ default:
+ fprintf(stderr, "Unrecognized argument '%s'\n", argv[optind]);
+ return -1;
+ }
+ }
+
+ fprintf(stderr, "Fish detector configured:\n");
+ fprintf(stderr, " HSV range: %d,%d,%d - %d,%d,%d\n",
+ ci->dark.h,
+ ci->dark.s,
+ ci->dark.v,
+ ci->bright.h,
+ ci->bright.s,
+ ci->bright.v);
+ fprintf(stderr, " Threshold is %d%% pixels\n", ci->threshold / 10);
+
+
+ return 0;
+}
+
+static void get_hsv(HSV *hsv, int r, int g, int b)
+{
+ int i, v, x, f;
+
+ x = (r < g) ? r : g;
+ if (b < x)
+ x = b;
+ v = (r > g) ? r : g;
+ if (b > v)
+ v = b;
+
+ if (v == x) {
+ hsv->h = 0;
+ hsv->s = 0;
+ hsv->v = v;
+ return;
+ }
+
+ if (r == v) {
+ f = g - b;
+ i = 0;
+ } else if (g == v) {
+ f = b - r;
+ i = 2 * 60;
+ } else {
+ f = r - g;
+ i = 4 * 60;
+ }
+
+ hsv->h = i + (60 * f) / (v - x);
+ if (hsv->h < 0)
+ hsv->h += 360;
+
+ hsv->s = (255 * (v - x)) / v;
+ hsv->v = v;
+
+ return;
+}
+
+void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ int rowsize = picture->linesize[0];
+
+#if 0
+ printf("pix_fmt = %d, width = %d, pts = %lld, ci->next_pts = %lld\n",
+ pix_fmt, width, pts, ci->next_pts);
+#endif
+
+ if (pts < ci->next_pts)
+ return;
+
+ if (width < ci->min_width)
+ return;
+
+ ci->next_pts = pts + 1000000;
+
+ if (pix_fmt == PIX_FMT_YUV420P) {
+ uint8_t *y, *u, *v;
+ int width2 = width >> 1;
+ int inrange = 0;
+ int pixcnt;
+ int h;
+ int h_start, h_end;
+ int w_start, w_end;
+
+ h_end = 2 * ((ci->inset * height) / 200);
+ h_start = height - h_end;
+
+ w_end = (ci->inset * width2) / 100;
+ w_start = width2 - w_end;
+
+ pixcnt = ((h_start - h_end) >> 1) * (w_start - w_end);
+
+ y = picture->data[0] + h_end * picture->linesize[0] + w_end * 2;
+ u = picture->data[1] + h_end * picture->linesize[1] / 2 + w_end;
+ v = picture->data[2] + h_end * picture->linesize[2] / 2 + w_end;
+
+ for (h = h_start; h > h_end; h -= 2) {
+ int w;
+
+ for (w = w_start; w > w_end; w--) {
+ unsigned int r,g,b;
+ HSV hsv;
+ int cb, cr, yt, r_add, g_add, b_add;
+
+ YUV_TO_RGB1_CCIR(u[0], v[0]);
+ YUV_TO_RGB2_CCIR(r, g, b, y[0]);
+
+ get_hsv(&hsv, r, g, b);
+
+ if (ci->debug > 1)
+ fprintf(stderr, "(%d,%d,%d) -> (%d,%d,%d)\n",
+ r,g,b,hsv.h,hsv.s,hsv.v);
+
+
+ if (hsv.h >= ci->dark.h && hsv.h <= ci->bright.h &&
+ hsv.s >= ci->dark.s && hsv.s <= ci->bright.s &&
+ hsv.v >= ci->dark.v && hsv.v <= ci->bright.v) {
+ inrange++;
+ } else if (ci->zapping) {
+ y[0] = y[1] = y[rowsize] = y[rowsize + 1] = 16;
+ u[0] = 128;
+ v[0] = 128;
+ }
+
+ y+= 2;
+ u++;
+ v++;
+ }
+
+ y += picture->linesize[0] * 2 - (w_start - w_end) * 2;
+ u += picture->linesize[1] - (w_start - w_end);
+ v += picture->linesize[2] - (w_start - w_end);
+ }
+
+ if (ci->debug)
+ fprintf(stderr, "Fish: Inrange=%d of %d = %d threshold\n", inrange, pixcnt, 1000 * inrange / pixcnt);
+
+ if (inrange * 1000 / pixcnt >= ci->threshold) {
+ /* Save to file */
+ int size;
+ char *buf;
+ AVPicture picture1;
+ static int frame_counter;
+ static int foundfile;
+
+ if ((frame_counter++ % 20) == 0) {
+ /* Check how many files we have */
+ DIR *d;
+
+ foundfile = 0;
+
+ d = opendir(ci->dir);
+ if (d) {
+ struct dirent *dent;
+
+ while ((dent = readdir(d))) {
+ if (strncmp("fishimg", dent->d_name, 7) == 0) {
+ if (strcmp(".ppm", dent->d_name + strlen(dent->d_name) - 4) == 0) {
+ foundfile++;
+ }
+ }
+ }
+ closedir(d);
+ }
+ }
+
+ if (foundfile < ci->file_limit) {
+ FILE *f;
+ char fname[256];
+
+ size = avpicture_get_size(PIX_FMT_RGB24, width, height);
+ buf = av_malloc(size);
+
+ avpicture_fill(&picture1, buf, PIX_FMT_RGB24, width, height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ width, height, pix_fmt,
+ width, height, PIX_FMT_RGB24,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
+ exit(1);
+ }
+ // img_convert parameters are 2 first destination, then 4 source
+ // sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, height,
+ picture1.data, picture1.linesize);
+
+ /* Write out the PPM file */
+ snprintf(fname, sizeof(fname), "%s/fishimg%ld_%"PRId64".ppm", ci->dir, (long)(av_gettime() / 1000000), pts);
+ f = fopen(fname, "w");
+ if (f) {
+ fprintf(f, "P6 %d %d 255\n", width, height);
+ fwrite(buf, width * height * 3, 1, f);
+ fclose(f);
+ }
+
+ av_free(buf);
+ ci->next_pts = pts + ci->min_interval;
+ }
+ }
+ }
+}
+
diff --git a/contrib/ffmpeg/vhook/imlib2.c b/contrib/ffmpeg/vhook/imlib2.c
new file mode 100644
index 000000000..87c54cf0b
--- /dev/null
+++ b/contrib/ffmpeg/vhook/imlib2.c
@@ -0,0 +1,450 @@
+/*
+ * imlib2 based hook
+ * Copyright (c) 2002 Philip Gladstone
+ *
+ * This module implements a text overlay for a video image. Currently it
+ * supports a fixed overlay or reading the text from a file. The string
+ * is passed through strftime so that it is easy to imprint the date and
+ * time onto the image.
+ *
+ * You may also overlay an image (even semi-transparent) like TV stations do.
+ * You may move either the text or the image around your video to create
+ * scrolling credits, for example.
+ *
+ * Text fonts are being looked for in FONTPATH
+ *
+ * Options:
+ *
+ * -c <color> The color of the text
+ * -F <fontname> The font face and size
+ * -t <text> The text
+ * -f <filename> The filename to read text from
+ * -x <expresion> X coordinate of text or image
+ * -y <expresion> Y coordinate of text or image
+ * -i <filename> The filename to read a image from
+ *
+ * Expresions are functions of:
+ * N // frame number (starting at zero)
+ * H // frame height
+ * W // frame width
+ * h // image height
+ * w // image width
+ * X // previous x
+ * Y // previous y
+ *
+
+ Examples:
+
+ FONTPATH="/cygdrive/c/WINDOWS/Fonts/"
+ FONTPATH="$FONTPATH:/usr/share/imlib2/data/fonts/"
+ FONTPATH="$FONTPATH:/usr/X11R6/lib/X11/fonts/TTF/"
+ export FONTPATH
+
+ ffmpeg -i input.avi -vhook \
+ 'vhook/imlib2.dll -x W*(0.5+0.25*sin(N/47*PI))-w/2 -y H*(0.5+0.50*cos(N/97*PI))-h/2 -i /usr/share/imlib2/data/images/bulb.png'
+ -acodec copy -sameq output.avi
+
+ ffmpeg -i input.avi -vhook \
+ 'vhook/imlib2.dll -c red -F Vera.ttf/20 -x 150+0.5*N -y 70+0.25*N -t Hello'
+ -acodec copy -sameq output.avi
+
+ * This module is very much intended as an example of what could be done.
+ *
+ * One caution is that this is an expensive process -- in particular the
+ * conversion of the image into RGB and back is time consuming. For some
+ * special cases -- e.g. painting black text -- it would be faster to paint
+ * the text into a bitmap and then combine it directly into the YUV
+ * image. However, this code is fast enough to handle 10 fps of 320x240 on a
+ * 900MHz Duron in maybe 15% of the CPU.
+
+ * See further statistics on Pentium4, 3GHz, FFMpeg is SVN-r6798
+ * Input movie is 20.2 seconds of PAL DV on AVI
+ * Output movie is DVD compliant VOB.
+ *
+ ffmpeg -i input.avi -target pal-dvd out.vob
+ # 13.516s just transcode
+ ffmpeg -i input.avi -vhook /usr/local/bin/vhook/null.dll -target pal-dvd out.vob
+ # 23.546s transcode and img_convert
+ ffmpeg -i input.avi -vhook \
+ 'vhook/imlib2.dll -c red -F Vera/20 -x 150-0.5*N -y 70+0.25*N -t Hello_person' \
+ -target pal-dvd out.vob
+ # 21.454s transcode, img_convert and move text around
+ ffmpeg -i input.avi -vhook \
+ 'vhook/imlib2.dll -x 150-0.5*N -y 70+0.25*N -i /usr/share/imlib2/data/images/bulb.png' \
+ -target pal-dvd out.vob
+ # 20.828s transcode, img_convert and move image around
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "framehook.h"
+#include "swscale.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <string.h>
+#include <unistd.h>
+#undef time
+#include <sys/time.h>
+#include <time.h>
+#include <X11/Xlib.h>
+#include <Imlib2.h>
+#include "eval.h"
+
+const char *const_names[]={
+ "PI",
+ "E",
+ "N", // frame number (starting at zero)
+ "H", // frame height
+ "W", // frame width
+ "h", // image height
+ "w", // image width
+ "X", // previous x
+ "Y", // previous y
+ NULL
+};
+
+static int sws_flags = SWS_BICUBIC;
+
+typedef struct {
+ int dummy;
+ Imlib_Font fn;
+ char *text;
+ char *file;
+ int r, g, b;
+ double x, y;
+ char *fileImage;
+ struct _CachedImage *cache;
+ Imlib_Image imageOverlaid;
+ AVEvalExpr *eval_x, *eval_y;
+ char *expr_x, *expr_y;
+ int frame_number;
+ int imageOverlaid_width, imageOverlaid_height;
+
+ // This vhook first converts frame to RGB ...
+ struct SwsContext *toRGB_convert_ctx;
+ // ... and then converts back frame from RGB to initial format
+ struct SwsContext *fromRGB_convert_ctx;
+} ContextInfo;
+
+typedef struct _CachedImage {
+ struct _CachedImage *next;
+ Imlib_Image image;
+ int width;
+ int height;
+} CachedImage;
+
+void Release(void *ctx)
+{
+ ContextInfo *ci;
+ ci = (ContextInfo *) ctx;
+
+ if (ci->cache) {
+ imlib_context_set_image(ci->cache->image);
+ imlib_free_image();
+ av_free(ci->cache);
+ }
+ if (ctx) {
+ if (ci->imageOverlaid) {
+ imlib_context_set_image(ci->imageOverlaid);
+ imlib_free_image();
+ }
+ ff_eval_free(ci->expr_x);
+ ff_eval_free(ci->expr_y);
+ sws_freeContext(ci->toRGB_convert_ctx);
+ sws_freeContext(ci->fromRGB_convert_ctx);
+ av_free(ctx);
+ }
+}
+
+int Configure(void **ctxp, int argc, char *argv[])
+{
+ int c;
+ ContextInfo *ci;
+ char *font = "LucidaSansDemiBold/16";
+ char *fp = getenv("FONTPATH");
+ char *color = 0;
+ FILE *f;
+ char *p;
+
+ *ctxp = av_mallocz(sizeof(ContextInfo));
+ ci = (ContextInfo *) *ctxp;
+
+ ci->x = 0.0;
+ ci->y = 0.0;
+ ci->expr_x = "0.0";
+ ci->expr_y = "0.0";
+
+ optind = 0;
+
+ /* Use ':' to split FONTPATH */
+ if (fp)
+ while (p = strchr(fp, ':')) {
+ *p = 0;
+ imlib_add_path_to_font_path(fp);
+ fp = p + 1;
+ }
+ if ((fp) && (*fp))
+ imlib_add_path_to_font_path(fp);
+
+
+ while ((c = getopt(argc, argv, "c:f:F:t:x:y:i:")) > 0) {
+ switch (c) {
+ case 'c':
+ color = optarg;
+ break;
+ case 'F':
+ font = optarg;
+ break;
+ case 't':
+ ci->text = av_strdup(optarg);
+ break;
+ case 'f':
+ ci->file = av_strdup(optarg);
+ break;
+ case 'x':
+ ci->expr_x = av_strdup(optarg);
+ break;
+ case 'y':
+ ci->expr_y = av_strdup(optarg);
+ break;
+ case 'i':
+ ci->fileImage = av_strdup(optarg);
+ break;
+ case '?':
+ fprintf(stderr, "Unrecognized argument '%s'\n", argv[optind]);
+ return -1;
+ }
+ }
+
+ if (ci->text || ci->file) {
+ ci->fn = imlib_load_font(font);
+ if (!ci->fn) {
+ fprintf(stderr, "Failed to load font '%s'\n", font);
+ return -1;
+ }
+ imlib_context_set_font(ci->fn);
+ imlib_context_set_direction(IMLIB_TEXT_TO_RIGHT);
+ }
+
+ if (color) {
+ char buff[256];
+ int done = 0;
+
+ f = fopen("/usr/share/X11/rgb.txt", "r");
+ if (!f)
+ f = fopen("/usr/lib/X11/rgb.txt", "r");
+ if (!f) {
+ fprintf(stderr, "Failed to find rgb.txt\n");
+ return -1;
+ }
+ while (fgets(buff, sizeof(buff), f)) {
+ int r, g, b;
+ char colname[80];
+
+ if (sscanf(buff, "%d %d %d %64s", &r, &g, &b, colname) == 4 &&
+ strcasecmp(colname, color) == 0) {
+ ci->r = r;
+ ci->g = g;
+ ci->b = b;
+ /* fprintf(stderr, "%s -> %d,%d,%d\n", colname, r, g, b); */
+ done = 1;
+ break;
+ }
+ }
+ fclose(f);
+ if (!done) {
+ fprintf(stderr, "Unable to find color '%s' in rgb.txt\n", color);
+ return -1;
+ }
+ }
+ imlib_context_set_color(ci->r, ci->g, ci->b, 255);
+
+ /* load the image (for example, credits for a movie) */
+ if (ci->fileImage) {
+ ci->imageOverlaid = imlib_load_image_immediately(ci->fileImage);
+ if (!(ci->imageOverlaid)){
+ av_log(NULL, AV_LOG_ERROR, "Couldn't load image '%s'\n", ci->fileImage);
+ return -1;
+ }
+ imlib_context_set_image(ci->imageOverlaid);
+ ci->imageOverlaid_width = imlib_image_get_width();
+ ci->imageOverlaid_height = imlib_image_get_height();
+ }
+
+ if (!(ci->eval_x = ff_parse(ci->expr_x, const_names, NULL, NULL, NULL, NULL, NULL))){
+ av_log(NULL, AV_LOG_ERROR, "Couldn't parse x expression '%s'\n", ci->expr_x);
+ return -1;
+ }
+
+ if (!(ci->eval_y = ff_parse(ci->expr_y, const_names, NULL, NULL, NULL, NULL, NULL))){
+ av_log(NULL, AV_LOG_ERROR, "Couldn't parse y expression '%s'\n", ci->expr_y);
+ return -1;
+ }
+
+ return 0;
+}
+
+static Imlib_Image get_cached_image(ContextInfo *ci, int width, int height)
+{
+ CachedImage *cache;
+
+ for (cache = ci->cache; cache; cache = cache->next) {
+ if (width == cache->width && height == cache->height)
+ return cache->image;
+ }
+
+ return NULL;
+}
+
+static void put_cached_image(ContextInfo *ci, Imlib_Image image, int width, int height)
+{
+ CachedImage *cache = av_mallocz(sizeof(*cache));
+
+ cache->image = image;
+ cache->width = width;
+ cache->height = height;
+ cache->next = ci->cache;
+ ci->cache = cache;
+}
+
+void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ AVPicture picture1;
+ Imlib_Image image;
+ DATA32 *data;
+
+ image = get_cached_image(ci, width, height);
+
+ if (!image) {
+ image = imlib_create_image(width, height);
+ put_cached_image(ci, image, width, height);
+ }
+
+ imlib_context_set_image(image);
+ data = imlib_image_get_data();
+
+ avpicture_fill(&picture1, (uint8_t *) data, PIX_FMT_RGBA32, width, height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ width, height, pix_fmt,
+ width, height, PIX_FMT_RGBA32,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
+ exit(1);
+ }
+
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, height,
+ picture1.data, picture1.linesize);
+
+ imlib_image_set_has_alpha(0);
+
+ {
+ int wid, hig, h_a, v_a;
+ char buff[1000];
+ char tbuff[1000];
+ char *tbp = ci->text;
+ time_t now = time(0);
+ char *p, *q;
+ int y;
+
+ double const_values[]={
+ M_PI,
+ M_E,
+ ci->frame_number, // frame number (starting at zero)
+ height, // frame height
+ width, // frame width
+ ci->imageOverlaid_height, // image height
+ ci->imageOverlaid_width, // image width
+ ci->x, // previous x
+ ci->y, // previous y
+ 0
+ };
+
+ if (ci->file) {
+ int fd = open(ci->file, O_RDONLY);
+
+ if (fd < 0) {
+ tbp = "[File not found]";
+ } else {
+ int l = read(fd, tbuff, sizeof(tbuff) - 1);
+
+ if (l >= 0) {
+ tbuff[l] = 0;
+ tbp = tbuff;
+ } else {
+ tbp = "[I/O Error]";
+ }
+ close(fd);
+ }
+ }
+
+ if (tbp)
+ strftime(buff, sizeof(buff), tbp, localtime(&now));
+ else if (!(ci->imageOverlaid))
+ strftime(buff, sizeof(buff), "[No data]", localtime(&now));
+
+ ci->x = ff_parse_eval(ci->eval_x, const_values, ci);
+ ci->y = ff_parse_eval(ci->eval_y, const_values, ci);
+ y = ci->y;
+
+ if (!(ci->imageOverlaid))
+ for (p = buff; p; p = q) {
+ q = strchr(p, '\n');
+ if (q)
+ *q++ = 0;
+
+ imlib_text_draw_with_return_metrics(ci->x, y, p, &wid, &hig, &h_a, &v_a);
+ y += v_a;
+ }
+
+ if (ci->imageOverlaid) {
+ imlib_context_set_image(image);
+ imlib_blend_image_onto_image(ci->imageOverlaid, 0,
+ 0, 0, ci->imageOverlaid_width, ci->imageOverlaid_height,
+ ci->x, ci->y, ci->imageOverlaid_width, ci->imageOverlaid_height);
+ }
+
+ }
+
+ ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx,
+ width, height, PIX_FMT_RGBA32,
+ width, height, pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->fromRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the fromRGB conversion context\n");
+ exit(1);
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->fromRGB_convert_ctx,
+ picture1.data, picture1.linesize, 0, height,
+ picture->data, picture->linesize);
+
+ ci->frame_number++;
+}
+
diff --git a/contrib/ffmpeg/vhook/null.c b/contrib/ffmpeg/vhook/null.c
new file mode 100644
index 000000000..041e5abda
--- /dev/null
+++ b/contrib/ffmpeg/vhook/null.c
@@ -0,0 +1,116 @@
+/*
+ * Null Video Hook
+ * Copyright (c) 2002 Philip Gladstone
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <stdio.h>
+
+#include "framehook.h"
+#include "swscale.h"
+
+static int sws_flags = SWS_BICUBIC;
+
+typedef struct {
+ int dummy;
+
+ // This vhook first converts frame to RGB ...
+ struct SwsContext *toRGB_convert_ctx;
+
+ // ... and later converts back frame from RGB to initial format
+ struct SwsContext *fromRGB_convert_ctx;
+
+} ContextInfo;
+
+void Release(void *ctx)
+{
+ ContextInfo *ci;
+ ci = (ContextInfo *) ctx;
+
+ if (ctx) {
+ sws_freeContext(ci->toRGB_convert_ctx);
+ sws_freeContext(ci->fromRGB_convert_ctx);
+ av_free(ctx);
+ }
+}
+
+int Configure(void **ctxp, int argc, char *argv[])
+{
+ fprintf(stderr, "Called with argc=%d\n", argc);
+
+ *ctxp = av_mallocz(sizeof(ContextInfo));
+ return 0;
+}
+
+void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ char *buf = 0;
+ AVPicture picture1;
+ AVPicture *pict = picture;
+
+ (void) ci;
+
+ if (pix_fmt != PIX_FMT_RGB24) {
+ int size;
+
+ size = avpicture_get_size(PIX_FMT_RGB24, width, height);
+ buf = av_malloc(size);
+
+ avpicture_fill(&picture1, buf, PIX_FMT_RGB24, width, height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ width, height, pix_fmt,
+ width, height, PIX_FMT_RGB24,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
+ exit(1);
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, height,
+ picture1.data, picture1.linesize);
+
+ pict = &picture1;
+ }
+
+ /* Insert filter code here */
+
+ if (pix_fmt != PIX_FMT_RGB24) {
+ ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx,
+ width, height, PIX_FMT_RGB24,
+ width, height, pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->fromRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the fromRGB conversion context\n");
+ exit(1);
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->fromRGB_convert_ctx,
+ picture1.data, picture1.linesize, 0, height,
+ picture->data, picture->linesize);
+ }
+
+ av_free(buf);
+}
+
diff --git a/contrib/ffmpeg/vhook/ppm.c b/contrib/ffmpeg/vhook/ppm.c
new file mode 100644
index 000000000..51badd58d
--- /dev/null
+++ b/contrib/ffmpeg/vhook/ppm.c
@@ -0,0 +1,367 @@
+/*
+ * PPM Video Hook
+ * Copyright (c) 2003 Charles Yates
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <ctype.h>
+#include "framehook.h"
+#include "avformat.h"
+#include "swscale.h"
+
+static int sws_flags = SWS_BICUBIC;
+
+/** Bi-directional pipe structure.
+*/
+
+typedef struct rwpipe
+{
+ int pid;
+ FILE *reader;
+ FILE *writer;
+}
+rwpipe;
+
+/** Create a bidirectional pipe for the given command.
+*/
+
+static rwpipe *rwpipe_open( int argc, char *argv[] )
+{
+ rwpipe *this = av_mallocz( sizeof( rwpipe ) );
+
+ if ( this != NULL )
+ {
+ int input[ 2 ];
+ int output[ 2 ];
+
+ pipe( input );
+ pipe( output );
+
+ this->pid = fork();
+
+ if ( this->pid == 0 )
+ {
+#define COMMAND_SIZE 10240
+ char *command = av_mallocz( COMMAND_SIZE );
+ int i;
+
+ strcpy( command, "" );
+ for ( i = 0; i < argc; i ++ )
+ {
+ pstrcat( command, COMMAND_SIZE, argv[ i ] );
+ pstrcat( command, COMMAND_SIZE, " " );
+ }
+
+ dup2( output[ 0 ], STDIN_FILENO );
+ dup2( input[ 1 ], STDOUT_FILENO );
+
+ close( input[ 0 ] );
+ close( input[ 1 ] );
+ close( output[ 0 ] );
+ close( output[ 1 ] );
+
+ execl("/bin/sh", "sh", "-c", command, (char*)NULL );
+ exit( 255 );
+ }
+ else
+ {
+ close( input[ 1 ] );
+ close( output[ 0 ] );
+
+ this->reader = fdopen( input[ 0 ], "r" );
+ this->writer = fdopen( output[ 1 ], "w" );
+ }
+ }
+
+ return this;
+}
+
+/** Read data from the pipe.
+*/
+
+static FILE *rwpipe_reader( rwpipe *this )
+{
+ if ( this != NULL )
+ return this->reader;
+ else
+ return NULL;
+}
+
+/** Write data to the pipe.
+*/
+
+static FILE *rwpipe_writer( rwpipe *this )
+{
+ if ( this != NULL )
+ return this->writer;
+ else
+ return NULL;
+}
+
+/* Read a number from the pipe - assumes PNM style headers.
+*/
+
+static int rwpipe_read_number( rwpipe *rw )
+{
+ int value = 0;
+ int c = 0;
+ FILE *in = rwpipe_reader( rw );
+
+ do
+ {
+ c = fgetc( in );
+
+ while( c != EOF && !isdigit( c ) && c != '#' )
+ c = fgetc( in );
+
+ if ( c == '#' )
+ while( c != EOF && c != '\n' )
+ c = fgetc( in );
+ }
+ while ( c != EOF && !isdigit( c ) );
+
+ while( c != EOF && isdigit( c ) )
+ {
+ value = value * 10 + ( c - '0' );
+ c = fgetc( in );
+ }
+
+ return value;
+}
+
+/** Read a PPM P6 header.
+*/
+
+static int rwpipe_read_ppm_header( rwpipe *rw, int *width, int *height )
+{
+ char line[ 3 ];
+ FILE *in = rwpipe_reader( rw );
+ int max;
+
+ fgets( line, 3, in );
+ if ( !strncmp( line, "P6", 2 ) )
+ {
+ *width = rwpipe_read_number( rw );
+ *height = rwpipe_read_number( rw );
+ max = rwpipe_read_number( rw );
+ return max != 255 || *width <= 0 || *height <= 0;
+ }
+ return 1;
+}
+
+/** Close the pipe and process.
+*/
+
+static void rwpipe_close( rwpipe *this )
+{
+ if ( this != NULL )
+ {
+ fclose( this->reader );
+ fclose( this->writer );
+ waitpid( this->pid, NULL, 0 );
+ av_free( this );
+ }
+}
+
+/** Context info for this vhook - stores the pipe and image buffers.
+*/
+
+typedef struct
+{
+ rwpipe *rw;
+ int size1;
+ char *buf1;
+ int size2;
+ char *buf2;
+
+ // This vhook first converts frame to RGB ...
+ struct SwsContext *toRGB_convert_ctx;
+ // ... then processes it via a PPM command pipe ...
+ // ... and finally converts back frame from RGB to initial format
+ struct SwsContext *fromRGB_convert_ctx;
+}
+ContextInfo;
+
+/** Initialise the context info for this vhook.
+*/
+
+int Configure(void **ctxp, int argc, char *argv[])
+{
+ if ( argc > 1 )
+ {
+ *ctxp = av_mallocz(sizeof(ContextInfo));
+ if ( ctxp != NULL && argc > 1 )
+ {
+ ContextInfo *info = (ContextInfo *)*ctxp;
+ info->rw = rwpipe_open( argc - 1, &argv[ 1 ] );
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/** Process a frame.
+*/
+
+void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts)
+{
+ int err = 0;
+ ContextInfo *ci = (ContextInfo *) ctx;
+ AVPicture picture1;
+ AVPicture picture2;
+ AVPicture *pict = picture;
+ int out_width;
+ int out_height;
+ int i;
+ uint8_t *ptr = NULL;
+ FILE *in = rwpipe_reader( ci->rw );
+ FILE *out = rwpipe_writer( ci->rw );
+
+ /* Check that we have a pipe to talk to. */
+ if ( in == NULL || out == NULL )
+ err = 1;
+
+ /* Convert to RGB24 if necessary */
+ if ( !err && pix_fmt != PIX_FMT_RGB24 )
+ {
+ int size = avpicture_get_size(PIX_FMT_RGB24, width, height);
+
+ if ( size != ci->size1 )
+ {
+ av_free( ci->buf1 );
+ ci->buf1 = av_malloc(size);
+ ci->size1 = size;
+ err = ci->buf1 == NULL;
+ }
+
+ if ( !err )
+ {
+ avpicture_fill(&picture1, ci->buf1, PIX_FMT_RGB24, width, height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ width, height, pix_fmt,
+ width, height, PIX_FMT_RGB24,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
+ exit(1);
+ }
+
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, height,
+ picture1.data, picture1.linesize);
+
+ pict = &picture1;
+ }
+ }
+
+ /* Write out the PPM */
+ if ( !err )
+ {
+ ptr = pict->data[ 0 ];
+ fprintf( out, "P6\n%d %d\n255\n", width, height );
+ for ( i = 0; !err && i < height; i ++ )
+ {
+ err = !fwrite( ptr, width * 3, 1, out );
+ ptr += pict->linesize[ 0 ];
+ }
+ if ( !err )
+ err = fflush( out );
+ }
+
+ /* Read the PPM returned. */
+ if ( !err && !rwpipe_read_ppm_header( ci->rw, &out_width, &out_height ) )
+ {
+ int size = avpicture_get_size(PIX_FMT_RGB24, out_width, out_height);
+
+ if ( size != ci->size2 )
+ {
+ av_free( ci->buf2 );
+ ci->buf2 = av_malloc(size);
+ ci->size2 = size;
+ err = ci->buf2 == NULL;
+ }
+
+ if ( !err )
+ {
+ avpicture_fill(&picture2, ci->buf2, PIX_FMT_RGB24, out_width, out_height);
+ ptr = picture2.data[ 0 ];
+ for ( i = 0; !err && i < out_height; i ++ )
+ {
+ err = !fread( ptr, out_width * 3, 1, in );
+ ptr += picture2.linesize[ 0 ];
+ }
+ }
+ }
+
+ /* Convert the returned PPM back to the input format */
+ if ( !err )
+ {
+ /* The out_width/out_height returned from the PPM
+ * filter won't necessarily be the same as width and height
+ * but it will be scaled anyway to width/height.
+ */
+ av_log(NULL, AV_LOG_DEBUG,
+ "PPM vhook: Input dimensions: %d x %d Output dimensions: %d x %d\n",
+ width, height, out_width, out_height);
+ ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx,
+ out_width, out_height, PIX_FMT_RGB24,
+ width, height, pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->fromRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the fromRGB conversion context\n");
+ exit(1);
+ }
+
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->fromRGB_convert_ctx,
+ picture2.data, picture2.linesize, 0, out_height,
+ picture->data, picture->linesize);
+ }
+}
+
+/** Clean up the effect.
+*/
+
+void Release(void *ctx)
+{
+ ContextInfo *ci;
+ ci = (ContextInfo *) ctx;
+
+ if (ctx)
+ {
+ rwpipe_close( ci->rw );
+ av_free( ci->buf1 );
+ av_free( ci->buf2 );
+ sws_freeContext(ci->toRGB_convert_ctx);
+ sws_freeContext(ci->fromRGB_convert_ctx);
+ av_free(ctx);
+ }
+}
+
diff --git a/contrib/ffmpeg/vhook/watermark.c b/contrib/ffmpeg/vhook/watermark.c
new file mode 100644
index 000000000..4d2acd2aa
--- /dev/null
+++ b/contrib/ffmpeg/vhook/watermark.c
@@ -0,0 +1,661 @@
+/*
+ * Watermark Hook
+ * Copyright (c) 2005 Marcus Engene myfirstname(at)mylastname.se
+ *
+ * parameters for watermark:
+ * -m nbr = nbr is 0..1. 0 is the default mode, see below.
+ * -t nbr = nbr is six digit hex. Threshold.
+ * -f file = file is the watermark image filename. You must specify this!
+ *
+ * MODE 0:
+ * The watermark picture works like this (assuming color intensities 0..0xff):
+ * Per color do this:
+ * If mask color is 0x80, no change to the original frame.
+ * If mask color is < 0x80 the abs difference is subtracted from the frame. If
+ * result < 0, result = 0
+ * If mask color is > 0x80 the abs difference is added to the frame. If result
+ * > 0xff, result = 0xff
+ *
+ * You can override the 0x80 level with the -t flag. E.g. if threshold is
+ * 000000 the color value of watermark is added to the destination.
+ *
+ * This way a mask that is visible both in light pictures and in dark can be
+ * made (fex by using a picture generated by Gimp and the bump map tool).
+ *
+ * An example watermark file is at
+ * http://engene.se/ffmpeg_watermark.gif
+ *
+ * MODE 1:
+ * Per color do this:
+ * If mask color > threshold color then the watermark pixel is used.
+ *
+ * Example usage:
+ * ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif' -an out.mov
+ * ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif -m 1 -t 222222' -an out.mov
+ *
+ * Note that the entire vhook argument is encapsulated in ''. This
+ * way, arguments to the vhook won't be mixed up with those for ffmpeg.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+//#include <fcntl.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include "common.h"
+#include "avformat.h"
+
+#include "framehook.h"
+#include "cmdutils.h"
+#include "swscale.h"
+
+static int sws_flags = SWS_BICUBIC;
+
+typedef struct {
+ char filename[2000];
+ int x_size;
+ int y_size;
+
+ /* get_watermark_picture() variables */
+ AVFormatContext *pFormatCtx;
+ const char *p_ext;
+ int videoStream;
+ int frameFinished;
+ AVCodecContext *pCodecCtx;
+ AVCodec *pCodec;
+ AVFrame *pFrame;
+ AVPacket packet;
+ int numBytes;
+ uint8_t *buffer;
+ int i;
+ AVInputFormat *file_iformat;
+ AVStream *st;
+ int is_done;
+ AVFrame *pFrameRGB;
+ int thrR;
+ int thrG;
+ int thrB;
+ int mode;
+
+ // This vhook first converts frame to RGB ...
+ struct SwsContext *toRGB_convert_ctx;
+ // ... then converts a watermark and applies it to the RGB frame ...
+ struct SwsContext *watermark_convert_ctx;
+ // ... and finally converts back frame from RGB to initial format
+ struct SwsContext *fromRGB_convert_ctx;
+} ContextInfo;
+
+int get_watermark_picture(ContextInfo *ci, int cleanup);
+
+
+/****************************************************************************
+ *
+ ****************************************************************************/
+void Release(void *ctx)
+{
+ ContextInfo *ci;
+ ci = (ContextInfo *) ctx;
+
+ if (ci) {
+ get_watermark_picture(ci, 1);
+ sws_freeContext(ci->toRGB_convert_ctx);
+ sws_freeContext(ci->watermark_convert_ctx);
+ sws_freeContext(ci->fromRGB_convert_ctx);
+ }
+ av_free(ctx);
+}
+
+
+/****************************************************************************
+ *
+ ****************************************************************************/
+int Configure(void **ctxp, int argc, char *argv[])
+{
+ ContextInfo *ci;
+ int c;
+ int tmp = 0;
+
+ if (0 == (*ctxp = av_mallocz(sizeof(ContextInfo)))) return -1;
+ ci = (ContextInfo *) *ctxp;
+
+ optind = 1;
+
+ // Struct is mallocz:ed so no need to reset.
+ ci->thrR = 0x80;
+ ci->thrG = 0x80;
+ ci->thrB = 0x80;
+
+ while ((c = getopt(argc, argv, "f:m:t:")) > 0) {
+ switch (c) {
+ case 'f':
+ strncpy(ci->filename, optarg, 1999);
+ ci->filename[1999] = 0;
+ break;
+ case 'm':
+ ci->mode = atoi(optarg);
+ break;
+ case 't':
+ if (1 != sscanf(optarg, "%x", &tmp)) {
+ av_log(NULL, AV_LOG_ERROR, "Watermark: argument to -t must be a 6 digit hex number\n");
+ return -1;
+ }
+ ci->thrR = (tmp >> 16) & 0xff;
+ ci->thrG = (tmp >> 8) & 0xff;
+ ci->thrB = (tmp >> 0) & 0xff;
+ break;
+ default:
+ av_log(NULL, AV_LOG_ERROR, "Watermark: Unrecognized argument '%s'\n", argv[optind]);
+ return -1;
+ }
+ }
+
+ //
+ if (0 == ci->filename[0]) {
+ av_log(NULL, AV_LOG_ERROR, "Watermark: There is no filename specified.\n");
+ return -1;
+ }
+
+ av_register_all();
+ return get_watermark_picture(ci, 0);
+}
+
+
+/****************************************************************************
+ * For mode 0 (the original one)
+ ****************************************************************************/
+static void Process0(void *ctx,
+ AVPicture *picture,
+ enum PixelFormat pix_fmt,
+ int src_width,
+ int src_height,
+ int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ char *buf = 0;
+ AVPicture picture1;
+ AVPicture *pict = picture;
+
+ AVFrame *pFrameRGB;
+ int xm_size;
+ int ym_size;
+
+ int x;
+ int y;
+ int offs, offsm;
+ int mpoffs;
+ uint32_t *p_pixel = 0;
+ uint32_t pixel_meck;
+ uint32_t pixel;
+ uint32_t pixelm;
+ int tmp;
+ int thrR = ci->thrR;
+ int thrG = ci->thrG;
+ int thrB = ci->thrB;
+
+ if (pix_fmt != PIX_FMT_RGBA32) {
+ int size;
+
+ size = avpicture_get_size(PIX_FMT_RGBA32, src_width, src_height);
+ buf = av_malloc(size);
+
+ avpicture_fill(&picture1, buf, PIX_FMT_RGBA32, src_width, src_height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ src_width, src_height, pix_fmt,
+ src_width, src_height, PIX_FMT_RGBA32,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
+ exit(1);
+ }
+
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, src_height,
+ picture1.data, picture1.linesize);
+
+ pict = &picture1;
+ }
+
+ /* Insert filter code here */ /* ok */
+
+ // Get me next frame
+ if (0 > get_watermark_picture(ci, 0)) {
+ return;
+ }
+ // These are the three original static variables in the ffmpeg hack.
+ pFrameRGB = ci->pFrameRGB;
+ xm_size = ci->x_size;
+ ym_size = ci->y_size;
+
+ // I'll do the *4 => <<2 crap later. Most compilers understand that anyway.
+ // According to avcodec.h PIX_FMT_RGBA32 is handled in endian specific manner.
+ for (y=0; y<src_height; y++) {
+ offs = y * (src_width * 4);
+ offsm = (((y * ym_size) / src_height) * 4) * xm_size; // offsm first in maskline. byteoffs!
+ for (x=0; x<src_width; x++) {
+ mpoffs = offsm + (((x * xm_size) / src_width) * 4);
+ p_pixel = (uint32_t *)&((pFrameRGB->data[0])[mpoffs]);
+ pixelm = *p_pixel;
+ p_pixel = (uint32_t *)&((pict->data[0])[offs]);
+ pixel = *p_pixel;
+// pixelm = *((uint32_t *)&(pFrameRGB->data[mpoffs]));
+ pixel_meck = pixel & 0xff000000;
+
+ // R
+ tmp = (int)((pixel >> 16) & 0xff) + (int)((pixelm >> 16) & 0xff) - thrR;
+ if (tmp > 255) tmp = 255;
+ if (tmp < 0) tmp = 0;
+ pixel_meck |= (tmp << 16) & 0xff0000;
+ // G
+ tmp = (int)((pixel >> 8) & 0xff) + (int)((pixelm >> 8) & 0xff) - thrG;
+ if (tmp > 255) tmp = 255;
+ if (tmp < 0) tmp = 0;
+ pixel_meck |= (tmp << 8) & 0xff00;
+ // B
+ tmp = (int)((pixel >> 0) & 0xff) + (int)((pixelm >> 0) & 0xff) - thrB;
+ if (tmp > 255) tmp = 255;
+ if (tmp < 0) tmp = 0;
+ pixel_meck |= (tmp << 0) & 0xff;
+
+
+ // test:
+ //pixel_meck = pixel & 0xff000000;
+ //pixel_meck |= (pixelm & 0x00ffffff);
+
+ *p_pixel = pixel_meck;
+
+ offs += 4;
+ } // foreach X
+ } // foreach Y
+
+
+
+
+ if (pix_fmt != PIX_FMT_RGBA32) {
+ ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx,
+ src_width, src_height, PIX_FMT_RGBA32,
+ src_width, src_height, pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->fromRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the fromRGB conversion context\n");
+ exit(1);
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->fromRGB_convert_ctx,
+ picture1.data, picture1.linesize, 0, src_height,
+ picture->data, picture->linesize);
+ }
+
+ av_free(buf);
+}
+
+
+/****************************************************************************
+ * For mode 1 (the original one)
+ ****************************************************************************/
+static void Process1(void *ctx,
+ AVPicture *picture,
+ enum PixelFormat pix_fmt,
+ int src_width,
+ int src_height,
+ int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ char *buf = 0;
+ AVPicture picture1;
+ AVPicture *pict = picture;
+
+ AVFrame *pFrameRGB;
+ int xm_size;
+ int ym_size;
+
+ int x;
+ int y;
+ int offs, offsm;
+ int mpoffs;
+ uint32_t *p_pixel = 0;
+ uint32_t pixel;
+ uint32_t pixelm;
+
+ if (pix_fmt != PIX_FMT_RGBA32) {
+ int size;
+
+ size = avpicture_get_size(PIX_FMT_RGBA32, src_width, src_height);
+ buf = av_malloc(size);
+
+ avpicture_fill(&picture1, buf, PIX_FMT_RGBA32, src_width, src_height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ src_width, src_height, pix_fmt,
+ src_width, src_height, PIX_FMT_RGBA32,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
+ exit(1);
+ }
+
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, src_height,
+ picture1.data, picture1.linesize);
+
+ pict = &picture1;
+ }
+
+ /* Insert filter code here */ /* ok */
+
+ // Get me next frame
+ if (0 > get_watermark_picture(ci, 0)) {
+ return;
+ }
+ // These are the three original static variables in the ffmpeg hack.
+ pFrameRGB = ci->pFrameRGB;
+ xm_size = ci->x_size;
+ ym_size = ci->y_size;
+
+ // I'll do the *4 => <<2 crap later. Most compilers understand that anyway.
+ // According to avcodec.h PIX_FMT_RGBA32 is handled in endian specific manner.
+ for (y=0; y<src_height; y++) {
+ offs = y * (src_width * 4);
+ offsm = (((y * ym_size) / src_height) * 4) * xm_size; // offsm first in maskline. byteoffs!
+ for (x=0; x<src_width; x++) {
+ mpoffs = offsm + (((x * xm_size) / src_width) * 4);
+ p_pixel = (uint32_t *)&((pFrameRGB->data[0])[mpoffs]);
+ pixelm = *p_pixel; /* watermark pixel */
+ p_pixel = (uint32_t *)&((pict->data[0])[offs]);
+ pixel = *p_pixel;
+
+ if (((pixelm >> 16) & 0xff) > ci->thrR ||
+ ((pixelm >> 8) & 0xff) > ci->thrG ||
+ ((pixelm >> 0) & 0xff) > ci->thrB)
+ {
+ *p_pixel = pixelm;
+ } else {
+ *p_pixel = pixel;
+ }
+ offs += 4;
+ } // foreach X
+ } // foreach Y
+
+ if (pix_fmt != PIX_FMT_RGBA32) {
+ ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx,
+ src_width, src_height, PIX_FMT_RGBA32,
+ src_width, src_height, pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->fromRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the fromRGB conversion context\n");
+ exit(1);
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->fromRGB_convert_ctx,
+ picture1.data, picture1.linesize, 0, src_height,
+ picture->data, picture->linesize);
+ }
+
+ av_free(buf);
+}
+
+
+/****************************************************************************
+ * This is the function ffmpeg.c callbacks.
+ ****************************************************************************/
+void Process(void *ctx,
+ AVPicture *picture,
+ enum PixelFormat pix_fmt,
+ int src_width,
+ int src_height,
+ int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ if (1 == ci->mode) {
+ return Process1(ctx, picture, pix_fmt, src_width, src_height, pts);
+ } else {
+ return Process0(ctx, picture, pix_fmt, src_width, src_height, pts);
+ }
+}
+
+
+/****************************************************************************
+ * When cleanup == 0, we try to get the next frame. If no next frame, nothing
+ * is done.
+ *
+ * This code follows the example on
+ * http://www.inb.uni-luebeck.de/~boehme/using_libavcodec.html
+ *
+ * 0 = ok, -1 = error
+ ****************************************************************************/
+int get_watermark_picture(ContextInfo *ci, int cleanup)
+{
+ if (1 == ci->is_done && 0 == cleanup) return 0;
+
+ // Yes, *pFrameRGB arguments must be null the first time otherwise it's not good..
+ // This block is only executed the first time we enter this function.
+ if (0 == ci->pFrameRGB &&
+ 0 == cleanup)
+ {
+
+ /*
+ * The last three parameters specify the file format, buffer size and format
+ * parameters; by simply specifying NULL or 0 we ask libavformat to auto-detect
+ * the format and use a default buffer size. (Didn't work!)
+ */
+ if (av_open_input_file(&ci->pFormatCtx, ci->filename, NULL, 0, NULL) != 0) {
+
+ // Martin says this should not be necessary but it failed for me sending in
+ // NULL instead of file_iformat to av_open_input_file()
+ ci->i = strlen(ci->filename);
+ if (0 == ci->i) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() No filename to watermark vhook\n");
+ return -1;
+ }
+ while (ci->i > 0) {
+ if (ci->filename[ci->i] == '.') {
+ ci->i++;
+ break;
+ }
+ ci->i--;
+ }
+ ci->p_ext = &(ci->filename[ci->i]);
+ ci->file_iformat = av_find_input_format (ci->p_ext);
+ if (0 == ci->file_iformat) {
+ av_log(NULL, AV_LOG_INFO, "get_watermark_picture() attempt to use image2 for [%s]\n", ci->p_ext);
+ ci->file_iformat = av_find_input_format ("image2");
+ }
+ if (0 == ci->file_iformat) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Really failed to find iformat [%s]\n", ci->p_ext);
+ return -1;
+ }
+ // now continues the Martin template.
+
+ if (av_open_input_file(&ci->pFormatCtx, ci->filename, ci->file_iformat, 0, NULL)!=0) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to open input file [%s]\n", ci->filename);
+ return -1;
+ }
+ }
+
+ /*
+ * This fills the streams field of the AVFormatContext with valid information.
+ */
+ if(av_find_stream_info(ci->pFormatCtx)<0) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to find stream info\n");
+ return -1;
+ }
+
+ /*
+ * As mentioned in the introduction, we'll handle only video streams, not audio
+ * streams. To make things nice and easy, we simply use the first video stream we
+ * find.
+ */
+ ci->videoStream=-1;
+ for(ci->i = 0; ci->i < ci->pFormatCtx->nb_streams; ci->i++)
+ if(ci->pFormatCtx->streams[ci->i]->codec->codec_type==CODEC_TYPE_VIDEO)
+ {
+ ci->videoStream = ci->i;
+ break;
+ }
+ if(ci->videoStream == -1) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to find any video stream\n");
+ return -1;
+ }
+
+ ci->st = ci->pFormatCtx->streams[ci->videoStream];
+ ci->x_size = ci->st->codec->width;
+ ci->y_size = ci->st->codec->height;
+
+ // Get a pointer to the codec context for the video stream
+ ci->pCodecCtx = ci->pFormatCtx->streams[ci->videoStream]->codec;
+
+
+ /*
+ * OK, so now we've got a pointer to the so-called codec context for our video
+ * stream, but we still have to find the actual codec and open it.
+ */
+ // Find the decoder for the video stream
+ ci->pCodec = avcodec_find_decoder(ci->pCodecCtx->codec_id);
+ if(ci->pCodec == NULL) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to find any codec\n");
+ return -1;
+ }
+
+ // Inform the codec that we can handle truncated bitstreams -- i.e.,
+ // bitstreams where frame boundaries can fall in the middle of packets
+ if (ci->pCodec->capabilities & CODEC_CAP_TRUNCATED)
+ ci->pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;
+
+ // Open codec
+ if(avcodec_open(ci->pCodecCtx, ci->pCodec)<0) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to open codec\n");
+ return -1;
+ }
+
+ // Hack to correct wrong frame rates that seem to be generated by some
+ // codecs
+ if (ci->pCodecCtx->time_base.den>1000 && ci->pCodecCtx->time_base.num==1)
+ ci->pCodecCtx->time_base.num=1000;
+
+ /*
+ * Allocate a video frame to store the decoded images in.
+ */
+ ci->pFrame = avcodec_alloc_frame();
+
+
+ /*
+ * The RGB image pFrameRGB (of type AVFrame *) is allocated like this:
+ */
+ // Allocate an AVFrame structure
+ ci->pFrameRGB=avcodec_alloc_frame();
+ if(ci->pFrameRGB==NULL) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to alloc pFrameRGB\n");
+ return -1;
+ }
+
+ // Determine required buffer size and allocate buffer
+ ci->numBytes = avpicture_get_size(PIX_FMT_RGBA32, ci->pCodecCtx->width,
+ ci->pCodecCtx->height);
+ ci->buffer = av_malloc(ci->numBytes);
+
+ // Assign appropriate parts of buffer to image planes in pFrameRGB
+ avpicture_fill((AVPicture *)ci->pFrameRGB, ci->buffer, PIX_FMT_RGBA32,
+ ci->pCodecCtx->width, ci->pCodecCtx->height);
+ }
+ // TODO loop, pingpong etc?
+ if (0 == cleanup)
+ {
+// av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() Get a frame\n");
+ while(av_read_frame(ci->pFormatCtx, &ci->packet)>=0)
+ {
+ // Is this a packet from the video stream?
+ if(ci->packet.stream_index == ci->videoStream)
+ {
+ // Decode video frame
+ avcodec_decode_video(ci->pCodecCtx, ci->pFrame, &ci->frameFinished,
+ ci->packet.data, ci->packet.size);
+
+ // Did we get a video frame?
+ if(ci->frameFinished)
+ {
+ // Convert the image from its native format to RGBA32
+ ci->watermark_convert_ctx =
+ sws_getCachedContext(ci->watermark_convert_ctx,
+ ci->pCodecCtx->width, ci->pCodecCtx->height, ci->pCodecCtx->pix_fmt,
+ ci->pCodecCtx->width, ci->pCodecCtx->height, PIX_FMT_RGBA32,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->watermark_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the watermark conversion context\n");
+ exit(1);
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->watermark_convert_ctx,
+ ci->pFrame->data, ci->pFrame->linesize, 0, ci->pCodecCtx->height,
+ ci->pFrameRGB->data, ci->pFrameRGB->linesize);
+
+ // Process the video frame (save to disk etc.)
+ //fprintf(stderr,"banan() New frame!\n");
+ //DoSomethingWithTheImage(ci->pFrameRGB);
+ return 0;
+ }
+ }
+
+ // Free the packet that was allocated by av_read_frame
+ av_free_packet(&ci->packet);
+ }
+ ci->is_done = 1;
+ return 0;
+ } // if 0 != cleanup
+
+ if (0 != cleanup)
+ {
+ // Free the RGB image
+ av_freep(&ci->buffer);
+ av_freep(&ci->pFrameRGB);
+
+ // Close the codec
+ if (0 != ci->pCodecCtx) {
+ avcodec_close(ci->pCodecCtx);
+ ci->pCodecCtx = 0;
+ }
+
+ // Close the video file
+ if (0 != ci->pFormatCtx) {
+ av_close_input_file(ci->pFormatCtx);
+ ci->pFormatCtx = 0;
+ }
+
+ ci->is_done = 0;
+ }
+ return 0;
+}
+
+
+void parse_arg_file(const char *filename)
+{
+}