Parcourir la source

YUV support in dumpmovie and makemovie.

git-svn-id: file:///srv/caca.zoy.org/var/lib/svn/libpipi/trunk@4704 92316355-f0b4-4df1-b90c-862c8a59935f
master
sam il y a 14 ans
Parent
révision
af83f814e8
3 fichiers modifiés avec 69 ajouts et 29 suppressions
  1. +35
    -21
      examples/dumpmovie.c
  2. +1
    -0
      pipi/pipi_internals.h
  3. +33
    -8
      pipi/sequence.c

+ 35
- 21
examples/dumpmovie.c Voir le fichier

@@ -37,11 +37,17 @@ int main(int argc, char *argv[])
pipi_pixels_t *p;
uint8_t *dst[4], *data = NULL;
char *parser;
int stream, pitch[4], i, k = 0;
int stream, pitch[4], i, k = 0, nframes = -1;
double skip_seconds = 0.0;

if(argc < 2)
if (argc < 2)
return EXIT_FAILURE;

if (argc > 2)
skip_seconds = atof(argv[2]);
if (argc > 3)
nframes = atoi(argv[3]);

/* Ensure our linear YUV values do not get gamma-corrected */
pipi_set_gamma(1.0);

@@ -72,9 +78,14 @@ int main(int argc, char *argv[])
if(avcodec_open(ctx, codec) < 0)
return EXIT_FAILURE;

skip_seconds /= av_q2d(fmt->streams[stream]->time_base);
av_seek_frame(fmt, stream, (int)(skip_seconds + 0.5), SEEK_SET);
//avformat_seek_file(fmt, stream, skip_bytes, skip_bytes,
// skip_bytes, AVSEEK_FLAG_BYTE);

frame = avcodec_alloc_frame();

for(;;)
for (k = 0; k < nframes || nframes == -1; /* k incremented below */)
{
int finished, ret, x, y;

@@ -112,34 +123,37 @@ int main(int argc, char *argv[])
image = pipi_new(ctx->width, ctx->height);
}

sws_scale(sws, (uint8_t const **)frame->data, frame->linesize, 0,
ctx->height, dst, pitch);
{
char buf[1024];
sprintf(buf, fmtstr, k);
printf("saving in %s\n", buf);

p = pipi_get_pixels(image, PIPI_PIXELS_RGBA_U8);
data = (uint8_t *)p->pixels;
sws_scale(sws, (uint8_t const **)frame->data, frame->linesize, 0,
ctx->height, dst, pitch);

for (y = 0; y < ctx->height; y++)
{
int off = y * ctx->width;
p = pipi_get_pixels(image, PIPI_PIXELS_RGBA_U8);
data = (uint8_t *)p->pixels;

for (x = 0; x < ctx->width; x++, off++)
for (y = 0; y < ctx->height; y++)
{
/* Reorder components to store YUVA */
data[4 * off] = dst[0][off];
data[4 * off + 1] = dst[2][off];
data[4 * off + 2] = dst[1][off];
data[4 * off + 3] = 0xff;
int off = y * ctx->width;

for (x = 0; x < ctx->width; x++, off++)
{
/* Reorder components to store YUVA */
data[4 * off] = dst[0][off];
data[4 * off + 1] = dst[2][off];
data[4 * off + 2] = dst[1][off];
data[4 * off + 3] = 0xff;
}
}
}

{
char buf[1024];
sprintf(buf, fmtstr, k++);
printf("saving in %s\n", buf);
pipi_save(image, buf);
}

av_free_packet(&packet);

k++;
}

return EXIT_SUCCESS;


+ 1
- 0
pipi/pipi_internals.h Voir le fichier

@@ -85,6 +85,7 @@ struct pipi_image
struct pipi_sequence
{
int w, h, fps;
uint8_t *convert_buf;

void *codec_priv;
};


+ 33
- 8
pipi/sequence.c Voir le fichier

@@ -69,6 +69,7 @@ pipi_sequence_t *pipi_open_sequence(char const *file,
seq->w = width;
seq->h = height;
seq->fps = fps;
seq->convert_buf = NULL;

ff = malloc(sizeof(ffmpeg_codec_t));
memset(ff, 0, sizeof(*ff));
@@ -200,8 +201,10 @@ int pipi_feed_sequence(pipi_sequence_t *seq, uint8_t const *buffer,
{
#if defined USE_FFMPEG
AVPacket packet;
uint8_t const *buflist[3];
int pitchlist[3];
size_t bytes;
int pitch;
int n;

ffmpeg_codec_t *ff = (ffmpeg_codec_t *)seq->codec_priv;

@@ -215,16 +218,38 @@ int pipi_feed_sequence(pipi_sequence_t *seq, uint8_t const *buffer,
}

if (!ff->sws_ctx)
ff->sws_ctx = sws_getContext(width, height, PIX_FMT_RGB32,
ff->cod_ctx->width,
ff->cod_ctx->height,
ff->cod_ctx->pix_fmt, SWS_BICUBIC,
NULL, NULL, NULL);
{
ff->sws_ctx = sws_getContext(width, height, PIX_FMT_YUV444P,
ff->cod_ctx->width,
ff->cod_ctx->height,
ff->cod_ctx->pix_fmt, SWS_BICUBIC,
NULL, NULL, NULL);
if (seq->convert_buf)
{
free(seq->convert_buf);
seq->convert_buf = NULL;
}
}
if (!ff->sws_ctx)
return -1;

pitch = width * 4;
sws_scale(ff->sws_ctx, &buffer, &pitch, 0, height,
/* Convert interleaved YUV to planar YUV */
if (!seq->convert_buf)
seq->convert_buf = malloc(width * height * 3);

for (n = 0; n < width * height; n++)
{
seq->convert_buf[n] = buffer[4 * n];
seq->convert_buf[n + width * height] = buffer[4 * n + 1];
seq->convert_buf[n + 2 * width * height] = buffer[4 * n + 2];
}

/* Feed the buffers to FFmpeg */
buflist[0] = seq->convert_buf;
buflist[1] = seq->convert_buf + 2 * width * height;
buflist[2] = seq->convert_buf + width * height;
pitchlist[0] = pitchlist[1] = pitchlist[2] = width;
sws_scale(ff->sws_ctx, buflist, pitchlist, 0, height,
ff->frame->data, ff->frame->linesize);

bytes = avcodec_encode_video(ff->cod_ctx, ff->buf,


Chargement…
Annuler
Enregistrer