00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #include "config.h"
00023 #include <inttypes.h>
00024 #include <math.h>
00025 #include <limits.h>
00026 #include "libavutil/avstring.h"
00027 #include "libavutil/colorspace.h"
00028 #include "libavutil/pixdesc.h"
00029 #include "libavutil/imgutils.h"
00030 #include "libavutil/dict.h"
00031 #include "libavutil/parseutils.h"
00032 #include "libavutil/samplefmt.h"
00033 #include "libavutil/avassert.h"
00034 #include "libavformat/avformat.h"
00035 #include "libavdevice/avdevice.h"
00036 #include "libswscale/swscale.h"
00037 #include "libavcodec/audioconvert.h"
00038 #include "libavutil/opt.h"
00039 #include "libavcodec/avfft.h"
00040
00041 #if CONFIG_AVFILTER
00042 # include "libavfilter/avcodec.h"
00043 # include "libavfilter/avfilter.h"
00044 # include "libavfilter/avfiltergraph.h"
00045 # include "libavfilter/vsink_buffer.h"
00046 #endif
00047
00048 #include <SDL.h>
00049 #include <SDL_thread.h>
00050
00051 #include "cmdutils.h"
00052
00053 #include <unistd.h>
00054 #include <assert.h>
00055
00056 const char program_name[] = "ffplay";
00057 const int program_birth_year = 2003;
00058
00059 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
00060 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
00061 #define MIN_FRAMES 5
00062
00063
00064
00065 #define SDL_AUDIO_BUFFER_SIZE 1024
00066
00067
00068 #define AV_SYNC_THRESHOLD 0.01
00069
00070 #define AV_NOSYNC_THRESHOLD 10.0
00071
00072 #define FRAME_SKIP_FACTOR 0.05
00073
00074
00075 #define SAMPLE_CORRECTION_PERCENT_MAX 10
00076
00077
00078 #define AUDIO_DIFF_AVG_NB 20
00079
00080
00081 #define SAMPLE_ARRAY_SIZE (2*65536)
00082
00083 static int sws_flags = SWS_BICUBIC;
00084
00085 typedef struct PacketQueue {
00086 AVPacketList *first_pkt, *last_pkt;
00087 int nb_packets;
00088 int size;
00089 int abort_request;
00090 SDL_mutex *mutex;
00091 SDL_cond *cond;
00092 } PacketQueue;
00093
00094 #define VIDEO_PICTURE_QUEUE_SIZE 2
00095 #define SUBPICTURE_QUEUE_SIZE 4
00096
00097 typedef struct VideoPicture {
00098 double pts;
00099 double target_clock;
00100 int64_t pos;
00101 SDL_Overlay *bmp;
00102 int width, height;
00103 int allocated;
00104 enum PixelFormat pix_fmt;
00105
00106 #if CONFIG_AVFILTER
00107 AVFilterBufferRef *picref;
00108 #endif
00109 } VideoPicture;
00110
00111 typedef struct SubPicture {
00112 double pts;
00113 AVSubtitle sub;
00114 } SubPicture;
00115
00116 enum {
00117 AV_SYNC_AUDIO_MASTER,
00118 AV_SYNC_VIDEO_MASTER,
00119 AV_SYNC_EXTERNAL_CLOCK,
00120 };
00121
00122 typedef struct VideoState {
00123 SDL_Thread *read_tid;
00124 SDL_Thread *video_tid;
00125 SDL_Thread *refresh_tid;
00126 AVInputFormat *iformat;
00127 int no_background;
00128 int abort_request;
00129 int paused;
00130 int last_paused;
00131 int seek_req;
00132 int seek_flags;
00133 int64_t seek_pos;
00134 int64_t seek_rel;
00135 int read_pause_return;
00136 AVFormatContext *ic;
00137
00138 int audio_stream;
00139
00140 int av_sync_type;
00141 double external_clock;
00142 int64_t external_clock_time;
00143
00144 double audio_clock;
00145 double audio_diff_cum;
00146 double audio_diff_avg_coef;
00147 double audio_diff_threshold;
00148 int audio_diff_avg_count;
00149 AVStream *audio_st;
00150 PacketQueue audioq;
00151 int audio_hw_buf_size;
00152
00153
00154 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
00155 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
00156 uint8_t *audio_buf;
00157 unsigned int audio_buf_size;
00158 int audio_buf_index;
00159 AVPacket audio_pkt_temp;
00160 AVPacket audio_pkt;
00161 enum AVSampleFormat audio_src_fmt;
00162 AVAudioConvert *reformat_ctx;
00163
00164 enum ShowMode {
00165 SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
00166 } show_mode;
00167 int16_t sample_array[SAMPLE_ARRAY_SIZE];
00168 int sample_array_index;
00169 int last_i_start;
00170 RDFTContext *rdft;
00171 int rdft_bits;
00172 FFTSample *rdft_data;
00173 int xpos;
00174
00175 SDL_Thread *subtitle_tid;
00176 int subtitle_stream;
00177 int subtitle_stream_changed;
00178 AVStream *subtitle_st;
00179 PacketQueue subtitleq;
00180 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
00181 int subpq_size, subpq_rindex, subpq_windex;
00182 SDL_mutex *subpq_mutex;
00183 SDL_cond *subpq_cond;
00184
00185 double frame_timer;
00186 double frame_last_pts;
00187 double frame_last_delay;
00188 double video_clock;
00189 int video_stream;
00190 AVStream *video_st;
00191 PacketQueue videoq;
00192 double video_current_pts;
00193 double video_current_pts_drift;
00194 int64_t video_current_pos;
00195 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
00196 int pictq_size, pictq_rindex, pictq_windex;
00197 SDL_mutex *pictq_mutex;
00198 SDL_cond *pictq_cond;
00199 #if !CONFIG_AVFILTER
00200 struct SwsContext *img_convert_ctx;
00201 #endif
00202
00203 char filename[1024];
00204 int width, height, xleft, ytop;
00205
00206 #if CONFIG_AVFILTER
00207 AVFilterContext *out_video_filter;
00208 #endif
00209
00210 float skip_frames;
00211 float skip_frames_index;
00212 int refresh;
00213 } VideoState;
00214
00215 static int opt_help(const char *opt, const char *arg);
00216
00217
00218 static AVInputFormat *file_iformat;
00219 static const char *input_filename;
00220 static const char *window_title;
00221 static int fs_screen_width;
00222 static int fs_screen_height;
00223 static int screen_width = 0;
00224 static int screen_height = 0;
00225 static int frame_width = 0;
00226 static int frame_height = 0;
00227 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
00228 static int audio_disable;
00229 static int video_disable;
00230 static int wanted_stream[AVMEDIA_TYPE_NB]={
00231 [AVMEDIA_TYPE_AUDIO]=-1,
00232 [AVMEDIA_TYPE_VIDEO]=-1,
00233 [AVMEDIA_TYPE_SUBTITLE]=-1,
00234 };
00235 static int seek_by_bytes=-1;
00236 static int display_disable;
00237 static int show_status = 1;
00238 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
00239 static int64_t start_time = AV_NOPTS_VALUE;
00240 static int64_t duration = AV_NOPTS_VALUE;
00241 static int step = 0;
00242 static int thread_count = 1;
00243 static int workaround_bugs = 1;
00244 static int fast = 0;
00245 static int genpts = 0;
00246 static int lowres = 0;
00247 static int idct = FF_IDCT_AUTO;
00248 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
00249 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
00250 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
00251 static int error_recognition = FF_ER_CAREFUL;
00252 static int error_concealment = 3;
00253 static int decoder_reorder_pts= -1;
00254 static int autoexit;
00255 static int exit_on_keydown;
00256 static int exit_on_mousedown;
00257 static int loop=1;
00258 static int framedrop=-1;
00259 static enum ShowMode show_mode = SHOW_MODE_NONE;
00260
00261 static int rdftspeed=20;
00262 #if CONFIG_AVFILTER
00263 static char *vfilters = NULL;
00264 #endif
00265
00266
00267 static int is_full_screen;
00268 static VideoState *cur_stream;
00269 static int64_t audio_callback_time;
00270
00271 static AVPacket flush_pkt;
00272
00273 #define FF_ALLOC_EVENT (SDL_USEREVENT)
00274 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
00275 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
00276
00277 static SDL_Surface *screen;
00278
00279 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
00280 {
00281 AVPacketList *pkt1;
00282
00283
00284 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
00285 return -1;
00286
00287 pkt1 = av_malloc(sizeof(AVPacketList));
00288 if (!pkt1)
00289 return -1;
00290 pkt1->pkt = *pkt;
00291 pkt1->next = NULL;
00292
00293
00294 SDL_LockMutex(q->mutex);
00295
00296 if (!q->last_pkt)
00297
00298 q->first_pkt = pkt1;
00299 else
00300 q->last_pkt->next = pkt1;
00301 q->last_pkt = pkt1;
00302 q->nb_packets++;
00303 q->size += pkt1->pkt.size + sizeof(*pkt1);
00304
00305 SDL_CondSignal(q->cond);
00306
00307 SDL_UnlockMutex(q->mutex);
00308 return 0;
00309 }
00310
00311
00312 static void packet_queue_init(PacketQueue *q)
00313 {
00314 memset(q, 0, sizeof(PacketQueue));
00315 q->mutex = SDL_CreateMutex();
00316 q->cond = SDL_CreateCond();
00317 packet_queue_put(q, &flush_pkt);
00318 }
00319
00320 static void packet_queue_flush(PacketQueue *q)
00321 {
00322 AVPacketList *pkt, *pkt1;
00323
00324 SDL_LockMutex(q->mutex);
00325 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
00326 pkt1 = pkt->next;
00327 av_free_packet(&pkt->pkt);
00328 av_freep(&pkt);
00329 }
00330 q->last_pkt = NULL;
00331 q->first_pkt = NULL;
00332 q->nb_packets = 0;
00333 q->size = 0;
00334 SDL_UnlockMutex(q->mutex);
00335 }
00336
00337 static void packet_queue_end(PacketQueue *q)
00338 {
00339 packet_queue_flush(q);
00340 SDL_DestroyMutex(q->mutex);
00341 SDL_DestroyCond(q->cond);
00342 }
00343
00344 static void packet_queue_abort(PacketQueue *q)
00345 {
00346 SDL_LockMutex(q->mutex);
00347
00348 q->abort_request = 1;
00349
00350 SDL_CondSignal(q->cond);
00351
00352 SDL_UnlockMutex(q->mutex);
00353 }
00354
00355
00356 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
00357 {
00358 AVPacketList *pkt1;
00359 int ret;
00360
00361 SDL_LockMutex(q->mutex);
00362
00363 for(;;) {
00364 if (q->abort_request) {
00365 ret = -1;
00366 break;
00367 }
00368
00369 pkt1 = q->first_pkt;
00370 if (pkt1) {
00371 q->first_pkt = pkt1->next;
00372 if (!q->first_pkt)
00373 q->last_pkt = NULL;
00374 q->nb_packets--;
00375 q->size -= pkt1->pkt.size + sizeof(*pkt1);
00376 *pkt = pkt1->pkt;
00377 av_free(pkt1);
00378 ret = 1;
00379 break;
00380 } else if (!block) {
00381 ret = 0;
00382 break;
00383 } else {
00384 SDL_CondWait(q->cond, q->mutex);
00385 }
00386 }
00387 SDL_UnlockMutex(q->mutex);
00388 return ret;
00389 }
00390
00391 static inline void fill_rectangle(SDL_Surface *screen,
00392 int x, int y, int w, int h, int color)
00393 {
00394 SDL_Rect rect;
00395 rect.x = x;
00396 rect.y = y;
00397 rect.w = w;
00398 rect.h = h;
00399 SDL_FillRect(screen, &rect, color);
00400 }
00401
00402 #define ALPHA_BLEND(a, oldp, newp, s)\
00403 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
00404
00405 #define RGBA_IN(r, g, b, a, s)\
00406 {\
00407 unsigned int v = ((const uint32_t *)(s))[0];\
00408 a = (v >> 24) & 0xff;\
00409 r = (v >> 16) & 0xff;\
00410 g = (v >> 8) & 0xff;\
00411 b = v & 0xff;\
00412 }
00413
00414 #define YUVA_IN(y, u, v, a, s, pal)\
00415 {\
00416 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
00417 a = (val >> 24) & 0xff;\
00418 y = (val >> 16) & 0xff;\
00419 u = (val >> 8) & 0xff;\
00420 v = val & 0xff;\
00421 }
00422
00423 #define YUVA_OUT(d, y, u, v, a)\
00424 {\
00425 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
00426 }
00427
00428
00429 #define BPP 1
00430
00431 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
00432 {
00433 int wrap, wrap3, width2, skip2;
00434 int y, u, v, a, u1, v1, a1, w, h;
00435 uint8_t *lum, *cb, *cr;
00436 const uint8_t *p;
00437 const uint32_t *pal;
00438 int dstx, dsty, dstw, dsth;
00439
00440 dstw = av_clip(rect->w, 0, imgw);
00441 dsth = av_clip(rect->h, 0, imgh);
00442 dstx = av_clip(rect->x, 0, imgw - dstw);
00443 dsty = av_clip(rect->y, 0, imgh - dsth);
00444 lum = dst->data[0] + dsty * dst->linesize[0];
00445 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
00446 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
00447
00448 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
00449 skip2 = dstx >> 1;
00450 wrap = dst->linesize[0];
00451 wrap3 = rect->pict.linesize[0];
00452 p = rect->pict.data[0];
00453 pal = (const uint32_t *)rect->pict.data[1];
00454
00455 if (dsty & 1) {
00456 lum += dstx;
00457 cb += skip2;
00458 cr += skip2;
00459
00460 if (dstx & 1) {
00461 YUVA_IN(y, u, v, a, p, pal);
00462 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00463 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00464 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00465 cb++;
00466 cr++;
00467 lum++;
00468 p += BPP;
00469 }
00470 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00471 YUVA_IN(y, u, v, a, p, pal);
00472 u1 = u;
00473 v1 = v;
00474 a1 = a;
00475 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00476
00477 YUVA_IN(y, u, v, a, p + BPP, pal);
00478 u1 += u;
00479 v1 += v;
00480 a1 += a;
00481 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00482 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00483 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00484 cb++;
00485 cr++;
00486 p += 2 * BPP;
00487 lum += 2;
00488 }
00489 if (w) {
00490 YUVA_IN(y, u, v, a, p, pal);
00491 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00492 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00493 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00494 p++;
00495 lum++;
00496 }
00497 p += wrap3 - dstw * BPP;
00498 lum += wrap - dstw - dstx;
00499 cb += dst->linesize[1] - width2 - skip2;
00500 cr += dst->linesize[2] - width2 - skip2;
00501 }
00502 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
00503 lum += dstx;
00504 cb += skip2;
00505 cr += skip2;
00506
00507 if (dstx & 1) {
00508 YUVA_IN(y, u, v, a, p, pal);
00509 u1 = u;
00510 v1 = v;
00511 a1 = a;
00512 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00513 p += wrap3;
00514 lum += wrap;
00515 YUVA_IN(y, u, v, a, p, pal);
00516 u1 += u;
00517 v1 += v;
00518 a1 += a;
00519 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00520 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00521 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00522 cb++;
00523 cr++;
00524 p += -wrap3 + BPP;
00525 lum += -wrap + 1;
00526 }
00527 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00528 YUVA_IN(y, u, v, a, p, pal);
00529 u1 = u;
00530 v1 = v;
00531 a1 = a;
00532 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00533
00534 YUVA_IN(y, u, v, a, p + BPP, pal);
00535 u1 += u;
00536 v1 += v;
00537 a1 += a;
00538 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00539 p += wrap3;
00540 lum += wrap;
00541
00542 YUVA_IN(y, u, v, a, p, pal);
00543 u1 += u;
00544 v1 += v;
00545 a1 += a;
00546 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00547
00548 YUVA_IN(y, u, v, a, p + BPP, pal);
00549 u1 += u;
00550 v1 += v;
00551 a1 += a;
00552 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00553
00554 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
00555 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
00556
00557 cb++;
00558 cr++;
00559 p += -wrap3 + 2 * BPP;
00560 lum += -wrap + 2;
00561 }
00562 if (w) {
00563 YUVA_IN(y, u, v, a, p, pal);
00564 u1 = u;
00565 v1 = v;
00566 a1 = a;
00567 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00568 p += wrap3;
00569 lum += wrap;
00570 YUVA_IN(y, u, v, a, p, pal);
00571 u1 += u;
00572 v1 += v;
00573 a1 += a;
00574 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00575 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00576 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00577 cb++;
00578 cr++;
00579 p += -wrap3 + BPP;
00580 lum += -wrap + 1;
00581 }
00582 p += wrap3 + (wrap3 - dstw * BPP);
00583 lum += wrap + (wrap - dstw - dstx);
00584 cb += dst->linesize[1] - width2 - skip2;
00585 cr += dst->linesize[2] - width2 - skip2;
00586 }
00587
00588 if (h) {
00589 lum += dstx;
00590 cb += skip2;
00591 cr += skip2;
00592
00593 if (dstx & 1) {
00594 YUVA_IN(y, u, v, a, p, pal);
00595 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00596 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00597 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00598 cb++;
00599 cr++;
00600 lum++;
00601 p += BPP;
00602 }
00603 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00604 YUVA_IN(y, u, v, a, p, pal);
00605 u1 = u;
00606 v1 = v;
00607 a1 = a;
00608 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00609
00610 YUVA_IN(y, u, v, a, p + BPP, pal);
00611 u1 += u;
00612 v1 += v;
00613 a1 += a;
00614 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00615 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
00616 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
00617 cb++;
00618 cr++;
00619 p += 2 * BPP;
00620 lum += 2;
00621 }
00622 if (w) {
00623 YUVA_IN(y, u, v, a, p, pal);
00624 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00625 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00626 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00627 }
00628 }
00629 }
00630
00631 static void free_subpicture(SubPicture *sp)
00632 {
00633 avsubtitle_free(&sp->sub);
00634 }
00635
00636 static void video_image_display(VideoState *is)
00637 {
00638 VideoPicture *vp;
00639 SubPicture *sp;
00640 AVPicture pict;
00641 float aspect_ratio;
00642 int width, height, x, y;
00643 SDL_Rect rect;
00644 int i;
00645
00646 vp = &is->pictq[is->pictq_rindex];
00647 if (vp->bmp) {
00648 #if CONFIG_AVFILTER
00649 if (vp->picref->video->sample_aspect_ratio.num == 0)
00650 aspect_ratio = 0;
00651 else
00652 aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
00653 #else
00654
00655
00656 if (is->video_st->sample_aspect_ratio.num)
00657 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
00658 else if (is->video_st->codec->sample_aspect_ratio.num)
00659 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
00660 else
00661 aspect_ratio = 0;
00662 #endif
00663 if (aspect_ratio <= 0.0)
00664 aspect_ratio = 1.0;
00665 aspect_ratio *= (float)vp->width / (float)vp->height;
00666
00667 if (is->subtitle_st) {
00668 if (is->subpq_size > 0) {
00669 sp = &is->subpq[is->subpq_rindex];
00670
00671 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
00672 SDL_LockYUVOverlay (vp->bmp);
00673
00674 pict.data[0] = vp->bmp->pixels[0];
00675 pict.data[1] = vp->bmp->pixels[2];
00676 pict.data[2] = vp->bmp->pixels[1];
00677
00678 pict.linesize[0] = vp->bmp->pitches[0];
00679 pict.linesize[1] = vp->bmp->pitches[2];
00680 pict.linesize[2] = vp->bmp->pitches[1];
00681
00682 for (i = 0; i < sp->sub.num_rects; i++)
00683 blend_subrect(&pict, sp->sub.rects[i],
00684 vp->bmp->w, vp->bmp->h);
00685
00686 SDL_UnlockYUVOverlay (vp->bmp);
00687 }
00688 }
00689 }
00690
00691
00692
00693 height = is->height;
00694 width = ((int)rint(height * aspect_ratio)) & ~1;
00695 if (width > is->width) {
00696 width = is->width;
00697 height = ((int)rint(width / aspect_ratio)) & ~1;
00698 }
00699 x = (is->width - width) / 2;
00700 y = (is->height - height) / 2;
00701 is->no_background = 0;
00702 rect.x = is->xleft + x;
00703 rect.y = is->ytop + y;
00704 rect.w = FFMAX(width, 1);
00705 rect.h = FFMAX(height, 1);
00706 SDL_DisplayYUVOverlay(vp->bmp, &rect);
00707 }
00708 }
00709
00710
00711
00712 static int audio_write_get_buf_size(VideoState *is)
00713 {
00714 return is->audio_buf_size - is->audio_buf_index;
00715 }
00716
00717 static inline int compute_mod(int a, int b)
00718 {
00719 return a < 0 ? a%b + b : a%b;
00720 }
00721
00722 static void video_audio_display(VideoState *s)
00723 {
00724 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
00725 int ch, channels, h, h2, bgcolor, fgcolor;
00726 int16_t time_diff;
00727 int rdft_bits, nb_freq;
00728
00729 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
00730 ;
00731 nb_freq= 1<<(rdft_bits-1);
00732
00733
00734 channels = s->audio_st->codec->channels;
00735 nb_display_channels = channels;
00736 if (!s->paused) {
00737 int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
00738 n = 2 * channels;
00739 delay = audio_write_get_buf_size(s);
00740 delay /= n;
00741
00742
00743
00744 if (audio_callback_time) {
00745 time_diff = av_gettime() - audio_callback_time;
00746 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
00747 }
00748
00749 delay += 2*data_used;
00750 if (delay < data_used)
00751 delay = data_used;
00752
00753 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
00754 if (s->show_mode == SHOW_MODE_WAVES) {
00755 h= INT_MIN;
00756 for(i=0; i<1000; i+=channels){
00757 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
00758 int a= s->sample_array[idx];
00759 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
00760 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
00761 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
00762 int score= a-d;
00763 if(h<score && (b^c)<0){
00764 h= score;
00765 i_start= idx;
00766 }
00767 }
00768 }
00769
00770 s->last_i_start = i_start;
00771 } else {
00772 i_start = s->last_i_start;
00773 }
00774
00775 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
00776 if (s->show_mode == SHOW_MODE_WAVES) {
00777 fill_rectangle(screen,
00778 s->xleft, s->ytop, s->width, s->height,
00779 bgcolor);
00780
00781 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
00782
00783
00784 h = s->height / nb_display_channels;
00785
00786 h2 = (h * 9) / 20;
00787 for(ch = 0;ch < nb_display_channels; ch++) {
00788 i = i_start + ch;
00789 y1 = s->ytop + ch * h + (h / 2);
00790 for(x = 0; x < s->width; x++) {
00791 y = (s->sample_array[i] * h2) >> 15;
00792 if (y < 0) {
00793 y = -y;
00794 ys = y1 - y;
00795 } else {
00796 ys = y1;
00797 }
00798 fill_rectangle(screen,
00799 s->xleft + x, ys, 1, y,
00800 fgcolor);
00801 i += channels;
00802 if (i >= SAMPLE_ARRAY_SIZE)
00803 i -= SAMPLE_ARRAY_SIZE;
00804 }
00805 }
00806
00807 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
00808
00809 for(ch = 1;ch < nb_display_channels; ch++) {
00810 y = s->ytop + ch * h;
00811 fill_rectangle(screen,
00812 s->xleft, y, s->width, 1,
00813 fgcolor);
00814 }
00815 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
00816 }else{
00817 nb_display_channels= FFMIN(nb_display_channels, 2);
00818 if(rdft_bits != s->rdft_bits){
00819 av_rdft_end(s->rdft);
00820 av_free(s->rdft_data);
00821 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
00822 s->rdft_bits= rdft_bits;
00823 s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
00824 }
00825 {
00826 FFTSample *data[2];
00827 for(ch = 0;ch < nb_display_channels; ch++) {
00828 data[ch] = s->rdft_data + 2*nb_freq*ch;
00829 i = i_start + ch;
00830 for(x = 0; x < 2*nb_freq; x++) {
00831 double w= (x-nb_freq)*(1.0/nb_freq);
00832 data[ch][x]= s->sample_array[i]*(1.0-w*w);
00833 i += channels;
00834 if (i >= SAMPLE_ARRAY_SIZE)
00835 i -= SAMPLE_ARRAY_SIZE;
00836 }
00837 av_rdft_calc(s->rdft, data[ch]);
00838 }
00839
00840 for(y=0; y<s->height; y++){
00841 double w= 1/sqrt(nb_freq);
00842 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
00843 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
00844 + data[1][2*y+1]*data[1][2*y+1])) : a;
00845 a= FFMIN(a,255);
00846 b= FFMIN(b,255);
00847 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
00848
00849 fill_rectangle(screen,
00850 s->xpos, s->height-y, 1, 1,
00851 fgcolor);
00852 }
00853 }
00854 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
00855 s->xpos++;
00856 if(s->xpos >= s->width)
00857 s->xpos= s->xleft;
00858 }
00859 }
00860
00861 static void stream_close(VideoState *is)
00862 {
00863 VideoPicture *vp;
00864 int i;
00865
00866 is->abort_request = 1;
00867 SDL_WaitThread(is->read_tid, NULL);
00868 SDL_WaitThread(is->refresh_tid, NULL);
00869
00870
00871 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
00872 vp = &is->pictq[i];
00873 #if CONFIG_AVFILTER
00874 if (vp->picref) {
00875 avfilter_unref_buffer(vp->picref);
00876 vp->picref = NULL;
00877 }
00878 #endif
00879 if (vp->bmp) {
00880 SDL_FreeYUVOverlay(vp->bmp);
00881 vp->bmp = NULL;
00882 }
00883 }
00884 SDL_DestroyMutex(is->pictq_mutex);
00885 SDL_DestroyCond(is->pictq_cond);
00886 SDL_DestroyMutex(is->subpq_mutex);
00887 SDL_DestroyCond(is->subpq_cond);
00888 #if !CONFIG_AVFILTER
00889 if (is->img_convert_ctx)
00890 sws_freeContext(is->img_convert_ctx);
00891 #endif
00892 av_free(is);
00893 }
00894
00895 static void do_exit(void)
00896 {
00897 if (cur_stream) {
00898 stream_close(cur_stream);
00899 cur_stream = NULL;
00900 }
00901 uninit_opts();
00902 #if CONFIG_AVFILTER
00903 avfilter_uninit();
00904 #endif
00905 if (show_status)
00906 printf("\n");
00907 SDL_Quit();
00908 av_log(NULL, AV_LOG_QUIET, "%s", "");
00909 exit(0);
00910 }
00911
00912 static int video_open(VideoState *is){
00913 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
00914 int w,h;
00915
00916 if(is_full_screen) flags |= SDL_FULLSCREEN;
00917 else flags |= SDL_RESIZABLE;
00918
00919 if (is_full_screen && fs_screen_width) {
00920 w = fs_screen_width;
00921 h = fs_screen_height;
00922 } else if(!is_full_screen && screen_width){
00923 w = screen_width;
00924 h = screen_height;
00925 #if CONFIG_AVFILTER
00926 }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
00927 w = is->out_video_filter->inputs[0]->w;
00928 h = is->out_video_filter->inputs[0]->h;
00929 #else
00930 }else if (is->video_st && is->video_st->codec->width){
00931 w = is->video_st->codec->width;
00932 h = is->video_st->codec->height;
00933 #endif
00934 } else {
00935 w = 640;
00936 h = 480;
00937 }
00938 if(screen && is->width == screen->w && screen->w == w
00939 && is->height== screen->h && screen->h == h)
00940 return 0;
00941
00942 #ifndef __APPLE__
00943 screen = SDL_SetVideoMode(w, h, 0, flags);
00944 #else
00945
00946 screen = SDL_SetVideoMode(w, h, 24, flags);
00947 #endif
00948 if (!screen) {
00949 fprintf(stderr, "SDL: could not set video mode - exiting\n");
00950 do_exit();
00951 }
00952 if (!window_title)
00953 window_title = input_filename;
00954 SDL_WM_SetCaption(window_title, window_title);
00955
00956 is->width = screen->w;
00957 is->height = screen->h;
00958
00959 return 0;
00960 }
00961
00962
00963 static void video_display(VideoState *is)
00964 {
00965 if(!screen)
00966 video_open(cur_stream);
00967 if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
00968 video_audio_display(is);
00969 else if (is->video_st)
00970 video_image_display(is);
00971 }
00972
00973 static int refresh_thread(void *opaque)
00974 {
00975 VideoState *is= opaque;
00976 while(!is->abort_request){
00977 SDL_Event event;
00978 event.type = FF_REFRESH_EVENT;
00979 event.user.data1 = opaque;
00980 if(!is->refresh){
00981 is->refresh=1;
00982 SDL_PushEvent(&event);
00983 }
00984
00985 usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
00986 }
00987 return 0;
00988 }
00989
00990
00991 static double get_audio_clock(VideoState *is)
00992 {
00993 double pts;
00994 int hw_buf_size, bytes_per_sec;
00995 pts = is->audio_clock;
00996 hw_buf_size = audio_write_get_buf_size(is);
00997 bytes_per_sec = 0;
00998 if (is->audio_st) {
00999 bytes_per_sec = is->audio_st->codec->sample_rate *
01000 2 * is->audio_st->codec->channels;
01001 }
01002 if (bytes_per_sec)
01003 pts -= (double)hw_buf_size / bytes_per_sec;
01004 return pts;
01005 }
01006
01007
01008 static double get_video_clock(VideoState *is)
01009 {
01010 if (is->paused) {
01011 return is->video_current_pts;
01012 } else {
01013 return is->video_current_pts_drift + av_gettime() / 1000000.0;
01014 }
01015 }
01016
01017
01018 static double get_external_clock(VideoState *is)
01019 {
01020 int64_t ti;
01021 ti = av_gettime();
01022 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
01023 }
01024
01025
01026 static double get_master_clock(VideoState *is)
01027 {
01028 double val;
01029
01030 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
01031 if (is->video_st)
01032 val = get_video_clock(is);
01033 else
01034 val = get_audio_clock(is);
01035 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
01036 if (is->audio_st)
01037 val = get_audio_clock(is);
01038 else
01039 val = get_video_clock(is);
01040 } else {
01041 val = get_external_clock(is);
01042 }
01043 return val;
01044 }
01045
01046
01047 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
01048 {
01049 if (!is->seek_req) {
01050 is->seek_pos = pos;
01051 is->seek_rel = rel;
01052 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
01053 if (seek_by_bytes)
01054 is->seek_flags |= AVSEEK_FLAG_BYTE;
01055 is->seek_req = 1;
01056 }
01057 }
01058
01059
01060 static void stream_toggle_pause(VideoState *is)
01061 {
01062 if (is->paused) {
01063 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
01064 if(is->read_pause_return != AVERROR(ENOSYS)){
01065 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
01066 }
01067 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
01068 }
01069 is->paused = !is->paused;
01070 }
01071
01072 static double compute_target_time(double frame_current_pts, VideoState *is)
01073 {
01074 double delay, sync_threshold, diff;
01075
01076
01077 delay = frame_current_pts - is->frame_last_pts;
01078 if (delay <= 0 || delay >= 10.0) {
01079
01080 delay = is->frame_last_delay;
01081 } else {
01082 is->frame_last_delay = delay;
01083 }
01084 is->frame_last_pts = frame_current_pts;
01085
01086
01087 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
01088 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01089
01090
01091 diff = get_video_clock(is) - get_master_clock(is);
01092
01093
01094
01095
01096 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
01097 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
01098 if (diff <= -sync_threshold)
01099 delay = 0;
01100 else if (diff >= sync_threshold)
01101 delay = 2 * delay;
01102 }
01103 }
01104 is->frame_timer += delay;
01105
01106 av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
01107 delay, frame_current_pts, -diff);
01108
01109 return is->frame_timer;
01110 }
01111
01112
01113 static void video_refresh(void *opaque)
01114 {
01115 VideoState *is = opaque;
01116 VideoPicture *vp;
01117
01118 SubPicture *sp, *sp2;
01119
01120 if (is->video_st) {
01121 retry:
01122 if (is->pictq_size == 0) {
01123
01124 } else {
01125 double time= av_gettime()/1000000.0;
01126 double next_target;
01127
01128 vp = &is->pictq[is->pictq_rindex];
01129
01130 if(time < vp->target_clock)
01131 return;
01132
01133 is->video_current_pts = vp->pts;
01134 is->video_current_pts_drift = is->video_current_pts - time;
01135 is->video_current_pos = vp->pos;
01136 if(is->pictq_size > 1){
01137 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
01138 assert(nextvp->target_clock >= vp->target_clock);
01139 next_target= nextvp->target_clock;
01140 }else{
01141 next_target= vp->target_clock + is->video_clock - vp->pts;
01142 }
01143 if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
01144 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
01145 if(is->pictq_size > 1 || time > next_target + 0.5){
01146
01147 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01148 is->pictq_rindex = 0;
01149
01150 SDL_LockMutex(is->pictq_mutex);
01151 is->pictq_size--;
01152 SDL_CondSignal(is->pictq_cond);
01153 SDL_UnlockMutex(is->pictq_mutex);
01154 goto retry;
01155 }
01156 }
01157
01158 if(is->subtitle_st) {
01159 if (is->subtitle_stream_changed) {
01160 SDL_LockMutex(is->subpq_mutex);
01161
01162 while (is->subpq_size) {
01163 free_subpicture(&is->subpq[is->subpq_rindex]);
01164
01165
01166 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01167 is->subpq_rindex = 0;
01168
01169 is->subpq_size--;
01170 }
01171 is->subtitle_stream_changed = 0;
01172
01173 SDL_CondSignal(is->subpq_cond);
01174 SDL_UnlockMutex(is->subpq_mutex);
01175 } else {
01176 if (is->subpq_size > 0) {
01177 sp = &is->subpq[is->subpq_rindex];
01178
01179 if (is->subpq_size > 1)
01180 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
01181 else
01182 sp2 = NULL;
01183
01184 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
01185 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
01186 {
01187 free_subpicture(sp);
01188
01189
01190 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01191 is->subpq_rindex = 0;
01192
01193 SDL_LockMutex(is->subpq_mutex);
01194 is->subpq_size--;
01195 SDL_CondSignal(is->subpq_cond);
01196 SDL_UnlockMutex(is->subpq_mutex);
01197 }
01198 }
01199 }
01200 }
01201
01202
01203 if (!display_disable)
01204 video_display(is);
01205
01206
01207 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01208 is->pictq_rindex = 0;
01209
01210 SDL_LockMutex(is->pictq_mutex);
01211 is->pictq_size--;
01212 SDL_CondSignal(is->pictq_cond);
01213 SDL_UnlockMutex(is->pictq_mutex);
01214 }
01215 } else if (is->audio_st) {
01216
01217
01218
01219
01220
01221
01222 if (!display_disable)
01223 video_display(is);
01224 }
01225 if (show_status) {
01226 static int64_t last_time;
01227 int64_t cur_time;
01228 int aqsize, vqsize, sqsize;
01229 double av_diff;
01230
01231 cur_time = av_gettime();
01232 if (!last_time || (cur_time - last_time) >= 30000) {
01233 aqsize = 0;
01234 vqsize = 0;
01235 sqsize = 0;
01236 if (is->audio_st)
01237 aqsize = is->audioq.size;
01238 if (is->video_st)
01239 vqsize = is->videoq.size;
01240 if (is->subtitle_st)
01241 sqsize = is->subtitleq.size;
01242 av_diff = 0;
01243 if (is->audio_st && is->video_st)
01244 av_diff = get_audio_clock(is) - get_video_clock(is);
01245 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
01246 get_master_clock(is),
01247 av_diff,
01248 FFMAX(is->skip_frames-1, 0),
01249 aqsize / 1024,
01250 vqsize / 1024,
01251 sqsize,
01252 is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
01253 is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
01254 fflush(stdout);
01255 last_time = cur_time;
01256 }
01257 }
01258 }
01259
01260
01261
01262 static void alloc_picture(void *opaque)
01263 {
01264 VideoState *is = opaque;
01265 VideoPicture *vp;
01266
01267 vp = &is->pictq[is->pictq_windex];
01268
01269 if (vp->bmp)
01270 SDL_FreeYUVOverlay(vp->bmp);
01271
01272 #if CONFIG_AVFILTER
01273 if (vp->picref)
01274 avfilter_unref_buffer(vp->picref);
01275 vp->picref = NULL;
01276
01277 vp->width = is->out_video_filter->inputs[0]->w;
01278 vp->height = is->out_video_filter->inputs[0]->h;
01279 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
01280 #else
01281 vp->width = is->video_st->codec->width;
01282 vp->height = is->video_st->codec->height;
01283 vp->pix_fmt = is->video_st->codec->pix_fmt;
01284 #endif
01285
01286 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
01287 SDL_YV12_OVERLAY,
01288 screen);
01289 if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
01290
01291
01292 fprintf(stderr, "Error: the video system does not support an image\n"
01293 "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
01294 "to reduce the image size.\n", vp->width, vp->height );
01295 do_exit();
01296 }
01297
01298 SDL_LockMutex(is->pictq_mutex);
01299 vp->allocated = 1;
01300 SDL_CondSignal(is->pictq_cond);
01301 SDL_UnlockMutex(is->pictq_mutex);
01302 }
01303
01304 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
01305 {
01306 VideoPicture *vp;
01307 double frame_delay, pts = pts1;
01308
01309
01310
01311 if (pts != 0) {
01312
01313 is->video_clock = pts;
01314 } else {
01315 pts = is->video_clock;
01316 }
01317
01318 frame_delay = av_q2d(is->video_st->codec->time_base);
01319
01320
01321 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
01322 is->video_clock += frame_delay;
01323
01324 #if defined(DEBUG_SYNC) && 0
01325 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
01326 av_get_picture_type_char(src_frame->pict_type), pts, pts1);
01327 #endif
01328
01329
01330 SDL_LockMutex(is->pictq_mutex);
01331
01332 if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
01333 is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
01334
01335 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
01336 !is->videoq.abort_request) {
01337 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01338 }
01339 SDL_UnlockMutex(is->pictq_mutex);
01340
01341 if (is->videoq.abort_request)
01342 return -1;
01343
01344 vp = &is->pictq[is->pictq_windex];
01345
01346
01347 if (!vp->bmp ||
01348 #if CONFIG_AVFILTER
01349 vp->width != is->out_video_filter->inputs[0]->w ||
01350 vp->height != is->out_video_filter->inputs[0]->h) {
01351 #else
01352 vp->width != is->video_st->codec->width ||
01353 vp->height != is->video_st->codec->height) {
01354 #endif
01355 SDL_Event event;
01356
01357 vp->allocated = 0;
01358
01359
01360
01361 event.type = FF_ALLOC_EVENT;
01362 event.user.data1 = is;
01363 SDL_PushEvent(&event);
01364
01365
01366 SDL_LockMutex(is->pictq_mutex);
01367 while (!vp->allocated && !is->videoq.abort_request) {
01368 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01369 }
01370 SDL_UnlockMutex(is->pictq_mutex);
01371
01372 if (is->videoq.abort_request)
01373 return -1;
01374 }
01375
01376
01377 if (vp->bmp) {
01378 AVPicture pict;
01379 #if CONFIG_AVFILTER
01380 if(vp->picref)
01381 avfilter_unref_buffer(vp->picref);
01382 vp->picref = src_frame->opaque;
01383 #endif
01384
01385
01386 SDL_LockYUVOverlay (vp->bmp);
01387
01388 memset(&pict,0,sizeof(AVPicture));
01389 pict.data[0] = vp->bmp->pixels[0];
01390 pict.data[1] = vp->bmp->pixels[2];
01391 pict.data[2] = vp->bmp->pixels[1];
01392
01393 pict.linesize[0] = vp->bmp->pitches[0];
01394 pict.linesize[1] = vp->bmp->pitches[2];
01395 pict.linesize[2] = vp->bmp->pitches[1];
01396
01397 #if CONFIG_AVFILTER
01398
01399 av_picture_copy(&pict, (AVPicture *)src_frame,
01400 vp->pix_fmt, vp->width, vp->height);
01401 #else
01402 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
01403 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
01404 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
01405 PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
01406 if (is->img_convert_ctx == NULL) {
01407 fprintf(stderr, "Cannot initialize the conversion context\n");
01408 exit(1);
01409 }
01410 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
01411 0, vp->height, pict.data, pict.linesize);
01412 #endif
01413
01414 SDL_UnlockYUVOverlay(vp->bmp);
01415
01416 vp->pts = pts;
01417 vp->pos = pos;
01418
01419
01420 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
01421 is->pictq_windex = 0;
01422 SDL_LockMutex(is->pictq_mutex);
01423 vp->target_clock= compute_target_time(vp->pts, is);
01424
01425 is->pictq_size++;
01426 SDL_UnlockMutex(is->pictq_mutex);
01427 }
01428 return 0;
01429 }
01430
01431 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
01432 {
01433 int len1 av_unused, got_picture, i;
01434
01435 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
01436 return -1;
01437
01438 if (pkt->data == flush_pkt.data) {
01439 avcodec_flush_buffers(is->video_st->codec);
01440
01441 SDL_LockMutex(is->pictq_mutex);
01442
01443 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
01444 is->pictq[i].target_clock= 0;
01445 }
01446 while (is->pictq_size && !is->videoq.abort_request) {
01447 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01448 }
01449 is->video_current_pos = -1;
01450 SDL_UnlockMutex(is->pictq_mutex);
01451
01452 is->frame_last_pts = AV_NOPTS_VALUE;
01453 is->frame_last_delay = 0;
01454 is->frame_timer = (double)av_gettime() / 1000000.0;
01455 is->skip_frames = 1;
01456 is->skip_frames_index = 0;
01457 return 0;
01458 }
01459
01460 len1 = avcodec_decode_video2(is->video_st->codec,
01461 frame, &got_picture,
01462 pkt);
01463
01464 if (got_picture) {
01465 if (decoder_reorder_pts == -1) {
01466 *pts = frame->best_effort_timestamp;
01467 } else if (decoder_reorder_pts) {
01468 *pts = frame->pkt_pts;
01469 } else {
01470 *pts = frame->pkt_dts;
01471 }
01472
01473 if (*pts == AV_NOPTS_VALUE) {
01474 *pts = 0;
01475 }
01476
01477 is->skip_frames_index += 1;
01478 if(is->skip_frames_index >= is->skip_frames){
01479 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
01480 return 1;
01481 }
01482
01483 }
01484 return 0;
01485 }
01486
01487 #if CONFIG_AVFILTER
01488 typedef struct {
01489 VideoState *is;
01490 AVFrame *frame;
01491 int use_dr1;
01492 } FilterPriv;
01493
01494 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
01495 {
01496 AVFilterContext *ctx = codec->opaque;
01497 AVFilterBufferRef *ref;
01498 int perms = AV_PERM_WRITE;
01499 int i, w, h, stride[4];
01500 unsigned edge;
01501 int pixel_size;
01502
01503 av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
01504
01505 if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
01506 perms |= AV_PERM_NEG_LINESIZES;
01507
01508 if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
01509 if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
01510 if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
01511 if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
01512 }
01513 if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
01514
01515 w = codec->width;
01516 h = codec->height;
01517
01518 if(av_image_check_size(w, h, 0, codec))
01519 return -1;
01520
01521 avcodec_align_dimensions2(codec, &w, &h, stride);
01522 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
01523 w += edge << 1;
01524 h += edge << 1;
01525
01526 if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
01527 return -1;
01528
01529 pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
01530 ref->video->w = codec->width;
01531 ref->video->h = codec->height;
01532 for(i = 0; i < 4; i ++) {
01533 unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
01534 unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
01535
01536 if (ref->data[i]) {
01537 ref->data[i] += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
01538 }
01539 pic->data[i] = ref->data[i];
01540 pic->linesize[i] = ref->linesize[i];
01541 }
01542 pic->opaque = ref;
01543 pic->age = INT_MAX;
01544 pic->type = FF_BUFFER_TYPE_USER;
01545 pic->reordered_opaque = codec->reordered_opaque;
01546 if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
01547 else pic->pkt_pts = AV_NOPTS_VALUE;
01548 return 0;
01549 }
01550
01551 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
01552 {
01553 memset(pic->data, 0, sizeof(pic->data));
01554 avfilter_unref_buffer(pic->opaque);
01555 }
01556
01557 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
01558 {
01559 AVFilterBufferRef *ref = pic->opaque;
01560
01561 if (pic->data[0] == NULL) {
01562 pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
01563 return codec->get_buffer(codec, pic);
01564 }
01565
01566 if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
01567 (codec->pix_fmt != ref->format)) {
01568 av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
01569 return -1;
01570 }
01571
01572 pic->reordered_opaque = codec->reordered_opaque;
01573 if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
01574 else pic->pkt_pts = AV_NOPTS_VALUE;
01575 return 0;
01576 }
01577
01578 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
01579 {
01580 FilterPriv *priv = ctx->priv;
01581 AVCodecContext *codec;
01582 if(!opaque) return -1;
01583
01584 priv->is = opaque;
01585 codec = priv->is->video_st->codec;
01586 codec->opaque = ctx;
01587 if((codec->codec->capabilities & CODEC_CAP_DR1)
01588 ) {
01589 av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
01590 priv->use_dr1 = 1;
01591 codec->get_buffer = input_get_buffer;
01592 codec->release_buffer = input_release_buffer;
01593 codec->reget_buffer = input_reget_buffer;
01594 codec->thread_safe_callbacks = 1;
01595 }
01596
01597 priv->frame = avcodec_alloc_frame();
01598
01599 return 0;
01600 }
01601
01602 static void input_uninit(AVFilterContext *ctx)
01603 {
01604 FilterPriv *priv = ctx->priv;
01605 av_free(priv->frame);
01606 }
01607
01608 static int input_request_frame(AVFilterLink *link)
01609 {
01610 FilterPriv *priv = link->src->priv;
01611 AVFilterBufferRef *picref;
01612 int64_t pts = 0;
01613 AVPacket pkt;
01614 int ret;
01615
01616 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
01617 av_free_packet(&pkt);
01618 if (ret < 0)
01619 return -1;
01620
01621 if(priv->use_dr1 && priv->frame->opaque) {
01622 picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
01623 } else {
01624 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
01625 av_image_copy(picref->data, picref->linesize,
01626 priv->frame->data, priv->frame->linesize,
01627 picref->format, link->w, link->h);
01628 }
01629 av_free_packet(&pkt);
01630
01631 avfilter_copy_frame_props(picref, priv->frame);
01632 picref->pts = pts;
01633
01634 avfilter_start_frame(link, picref);
01635 avfilter_draw_slice(link, 0, link->h, 1);
01636 avfilter_end_frame(link);
01637
01638 return 0;
01639 }
01640
01641 static int input_query_formats(AVFilterContext *ctx)
01642 {
01643 FilterPriv *priv = ctx->priv;
01644 enum PixelFormat pix_fmts[] = {
01645 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
01646 };
01647
01648 avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
01649 return 0;
01650 }
01651
01652 static int input_config_props(AVFilterLink *link)
01653 {
01654 FilterPriv *priv = link->src->priv;
01655 AVCodecContext *c = priv->is->video_st->codec;
01656
01657 link->w = c->width;
01658 link->h = c->height;
01659 link->time_base = priv->is->video_st->time_base;
01660
01661 return 0;
01662 }
01663
01664 static AVFilter input_filter =
01665 {
01666 .name = "ffplay_input",
01667
01668 .priv_size = sizeof(FilterPriv),
01669
01670 .init = input_init,
01671 .uninit = input_uninit,
01672
01673 .query_formats = input_query_formats,
01674
01675 .inputs = (AVFilterPad[]) {{ .name = NULL }},
01676 .outputs = (AVFilterPad[]) {{ .name = "default",
01677 .type = AVMEDIA_TYPE_VIDEO,
01678 .request_frame = input_request_frame,
01679 .config_props = input_config_props, },
01680 { .name = NULL }},
01681 };
01682
01683 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
01684 {
01685 char sws_flags_str[128];
01686 int ret;
01687 enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
01688 AVFilterContext *filt_src = NULL, *filt_out = NULL;
01689 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
01690 graph->scale_sws_opts = av_strdup(sws_flags_str);
01691
01692 if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
01693 NULL, is, graph)) < 0)
01694 goto the_end;
01695 if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
01696 NULL, pix_fmts, graph)) < 0)
01697 goto the_end;
01698
01699 if(vfilters) {
01700 AVFilterInOut *outputs = avfilter_inout_alloc();
01701 AVFilterInOut *inputs = avfilter_inout_alloc();
01702
01703 outputs->name = av_strdup("in");
01704 outputs->filter_ctx = filt_src;
01705 outputs->pad_idx = 0;
01706 outputs->next = NULL;
01707
01708 inputs->name = av_strdup("out");
01709 inputs->filter_ctx = filt_out;
01710 inputs->pad_idx = 0;
01711 inputs->next = NULL;
01712
01713 if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
01714 goto the_end;
01715 av_freep(&vfilters);
01716 } else {
01717 if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
01718 goto the_end;
01719 }
01720
01721 if ((ret = avfilter_graph_config(graph, NULL)) < 0)
01722 goto the_end;
01723
01724 is->out_video_filter = filt_out;
01725 the_end:
01726 return ret;
01727 }
01728
01729 #endif
01730
01731 static int video_thread(void *arg)
01732 {
01733 VideoState *is = arg;
01734 AVFrame *frame= avcodec_alloc_frame();
01735 int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
01736 double pts;
01737 int ret;
01738
01739 #if CONFIG_AVFILTER
01740 AVFilterGraph *graph = avfilter_graph_alloc();
01741 AVFilterContext *filt_out = NULL;
01742
01743 if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
01744 goto the_end;
01745 filt_out = is->out_video_filter;
01746 #endif
01747
01748 for(;;) {
01749 #if !CONFIG_AVFILTER
01750 AVPacket pkt;
01751 #else
01752 AVFilterBufferRef *picref;
01753 AVRational tb = filt_out->inputs[0]->time_base;
01754 #endif
01755 while (is->paused && !is->videoq.abort_request)
01756 SDL_Delay(10);
01757 #if CONFIG_AVFILTER
01758 ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
01759 if (picref) {
01760 avfilter_fill_frame_from_video_buffer_ref(frame, picref);
01761 pts_int = picref->pts;
01762 pos = picref->pos;
01763 frame->opaque = picref;
01764 }
01765
01766 if (av_cmp_q(tb, is->video_st->time_base)) {
01767 av_unused int64_t pts1 = pts_int;
01768 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
01769 av_dlog(NULL, "video_thread(): "
01770 "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
01771 tb.num, tb.den, pts1,
01772 is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
01773 }
01774 #else
01775 ret = get_video_frame(is, frame, &pts_int, &pkt);
01776 pos = pkt.pos;
01777 av_free_packet(&pkt);
01778 #endif
01779
01780 if (ret < 0) goto the_end;
01781
01782 #if CONFIG_AVFILTER
01783 if (!picref)
01784 continue;
01785 #endif
01786
01787 pts = pts_int*av_q2d(is->video_st->time_base);
01788
01789 ret = queue_picture(is, frame, pts, pos);
01790
01791 if (ret < 0)
01792 goto the_end;
01793
01794 if (step)
01795 if (cur_stream)
01796 stream_toggle_pause(cur_stream);
01797 }
01798 the_end:
01799 #if CONFIG_AVFILTER
01800 avfilter_graph_free(&graph);
01801 #endif
01802 av_free(frame);
01803 return 0;
01804 }
01805
01806 static int subtitle_thread(void *arg)
01807 {
01808 VideoState *is = arg;
01809 SubPicture *sp;
01810 AVPacket pkt1, *pkt = &pkt1;
01811 int len1 av_unused, got_subtitle;
01812 double pts;
01813 int i, j;
01814 int r, g, b, y, u, v, a;
01815
01816 for(;;) {
01817 while (is->paused && !is->subtitleq.abort_request) {
01818 SDL_Delay(10);
01819 }
01820 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
01821 break;
01822
01823 if(pkt->data == flush_pkt.data){
01824 avcodec_flush_buffers(is->subtitle_st->codec);
01825 continue;
01826 }
01827 SDL_LockMutex(is->subpq_mutex);
01828 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
01829 !is->subtitleq.abort_request) {
01830 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
01831 }
01832 SDL_UnlockMutex(is->subpq_mutex);
01833
01834 if (is->subtitleq.abort_request)
01835 goto the_end;
01836
01837 sp = &is->subpq[is->subpq_windex];
01838
01839
01840
01841 pts = 0;
01842 if (pkt->pts != AV_NOPTS_VALUE)
01843 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
01844
01845 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
01846 &sp->sub, &got_subtitle,
01847 pkt);
01848 if (got_subtitle && sp->sub.format == 0) {
01849 sp->pts = pts;
01850
01851 for (i = 0; i < sp->sub.num_rects; i++)
01852 {
01853 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
01854 {
01855 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
01856 y = RGB_TO_Y_CCIR(r, g, b);
01857 u = RGB_TO_U_CCIR(r, g, b, 0);
01858 v = RGB_TO_V_CCIR(r, g, b, 0);
01859 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
01860 }
01861 }
01862
01863
01864 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
01865 is->subpq_windex = 0;
01866 SDL_LockMutex(is->subpq_mutex);
01867 is->subpq_size++;
01868 SDL_UnlockMutex(is->subpq_mutex);
01869 }
01870 av_free_packet(pkt);
01871 }
01872 the_end:
01873 return 0;
01874 }
01875
01876
01877 static void update_sample_display(VideoState *is, short *samples, int samples_size)
01878 {
01879 int size, len;
01880
01881 size = samples_size / sizeof(short);
01882 while (size > 0) {
01883 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
01884 if (len > size)
01885 len = size;
01886 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
01887 samples += len;
01888 is->sample_array_index += len;
01889 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
01890 is->sample_array_index = 0;
01891 size -= len;
01892 }
01893 }
01894
01895
01896
01897 static int synchronize_audio(VideoState *is, short *samples,
01898 int samples_size1, double pts)
01899 {
01900 int n, samples_size;
01901 double ref_clock;
01902
01903 n = 2 * is->audio_st->codec->channels;
01904 samples_size = samples_size1;
01905
01906
01907 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
01908 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01909 double diff, avg_diff;
01910 int wanted_size, min_size, max_size, nb_samples;
01911
01912 ref_clock = get_master_clock(is);
01913 diff = get_audio_clock(is) - ref_clock;
01914
01915 if (diff < AV_NOSYNC_THRESHOLD) {
01916 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
01917 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
01918
01919 is->audio_diff_avg_count++;
01920 } else {
01921
01922 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
01923
01924 if (fabs(avg_diff) >= is->audio_diff_threshold) {
01925 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
01926 nb_samples = samples_size / n;
01927
01928 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
01929 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
01930 if (wanted_size < min_size)
01931 wanted_size = min_size;
01932 else if (wanted_size > max_size)
01933 wanted_size = max_size;
01934
01935
01936 if (wanted_size < samples_size) {
01937
01938 samples_size = wanted_size;
01939 } else if (wanted_size > samples_size) {
01940 uint8_t *samples_end, *q;
01941 int nb;
01942
01943
01944 nb = (samples_size - wanted_size);
01945 samples_end = (uint8_t *)samples + samples_size - n;
01946 q = samples_end + n;
01947 while (nb > 0) {
01948 memcpy(q, samples_end, n);
01949 q += n;
01950 nb -= n;
01951 }
01952 samples_size = wanted_size;
01953 }
01954 }
01955 #if 0
01956 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
01957 diff, avg_diff, samples_size - samples_size1,
01958 is->audio_clock, is->video_clock, is->audio_diff_threshold);
01959 #endif
01960 }
01961 } else {
01962
01963
01964 is->audio_diff_avg_count = 0;
01965 is->audio_diff_cum = 0;
01966 }
01967 }
01968
01969 return samples_size;
01970 }
01971
01972
01973 static int audio_decode_frame(VideoState *is, double *pts_ptr)
01974 {
01975 AVPacket *pkt_temp = &is->audio_pkt_temp;
01976 AVPacket *pkt = &is->audio_pkt;
01977 AVCodecContext *dec= is->audio_st->codec;
01978 int n, len1, data_size;
01979 double pts;
01980
01981 for(;;) {
01982
01983 while (pkt_temp->size > 0) {
01984 data_size = sizeof(is->audio_buf1);
01985 len1 = avcodec_decode_audio3(dec,
01986 (int16_t *)is->audio_buf1, &data_size,
01987 pkt_temp);
01988 if (len1 < 0) {
01989
01990 pkt_temp->size = 0;
01991 break;
01992 }
01993
01994 pkt_temp->data += len1;
01995 pkt_temp->size -= len1;
01996 if (data_size <= 0)
01997 continue;
01998
01999 if (dec->sample_fmt != is->audio_src_fmt) {
02000 if (is->reformat_ctx)
02001 av_audio_convert_free(is->reformat_ctx);
02002 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
02003 dec->sample_fmt, 1, NULL, 0);
02004 if (!is->reformat_ctx) {
02005 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
02006 av_get_sample_fmt_name(dec->sample_fmt),
02007 av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
02008 break;
02009 }
02010 is->audio_src_fmt= dec->sample_fmt;
02011 }
02012
02013 if (is->reformat_ctx) {
02014 const void *ibuf[6]= {is->audio_buf1};
02015 void *obuf[6]= {is->audio_buf2};
02016 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
02017 int ostride[6]= {2};
02018 int len= data_size/istride[0];
02019 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
02020 printf("av_audio_convert() failed\n");
02021 break;
02022 }
02023 is->audio_buf= is->audio_buf2;
02024
02025
02026 data_size= len*2;
02027 }else{
02028 is->audio_buf= is->audio_buf1;
02029 }
02030
02031
02032 pts = is->audio_clock;
02033 *pts_ptr = pts;
02034 n = 2 * dec->channels;
02035 is->audio_clock += (double)data_size /
02036 (double)(n * dec->sample_rate);
02037 #ifdef DEBUG
02038 {
02039 static double last_clock;
02040 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
02041 is->audio_clock - last_clock,
02042 is->audio_clock, pts);
02043 last_clock = is->audio_clock;
02044 }
02045 #endif
02046 return data_size;
02047 }
02048
02049
02050 if (pkt->data)
02051 av_free_packet(pkt);
02052
02053 if (is->paused || is->audioq.abort_request) {
02054 return -1;
02055 }
02056
02057
02058 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
02059 return -1;
02060 if(pkt->data == flush_pkt.data){
02061 avcodec_flush_buffers(dec);
02062 continue;
02063 }
02064
02065 pkt_temp->data = pkt->data;
02066 pkt_temp->size = pkt->size;
02067
02068
02069 if (pkt->pts != AV_NOPTS_VALUE) {
02070 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
02071 }
02072 }
02073 }
02074
02075
02076 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
02077 {
02078 VideoState *is = opaque;
02079 int audio_size, len1;
02080 double pts;
02081
02082 audio_callback_time = av_gettime();
02083
02084 while (len > 0) {
02085 if (is->audio_buf_index >= is->audio_buf_size) {
02086 audio_size = audio_decode_frame(is, &pts);
02087 if (audio_size < 0) {
02088
02089 is->audio_buf = is->audio_buf1;
02090 is->audio_buf_size = 1024;
02091 memset(is->audio_buf, 0, is->audio_buf_size);
02092 } else {
02093 if (is->show_mode != SHOW_MODE_VIDEO)
02094 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
02095 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
02096 pts);
02097 is->audio_buf_size = audio_size;
02098 }
02099 is->audio_buf_index = 0;
02100 }
02101 len1 = is->audio_buf_size - is->audio_buf_index;
02102 if (len1 > len)
02103 len1 = len;
02104 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
02105 len -= len1;
02106 stream += len1;
02107 is->audio_buf_index += len1;
02108 }
02109 }
02110
02111
02112 static int stream_component_open(VideoState *is, int stream_index)
02113 {
02114 AVFormatContext *ic = is->ic;
02115 AVCodecContext *avctx;
02116 AVCodec *codec;
02117 SDL_AudioSpec wanted_spec, spec;
02118
02119 if (stream_index < 0 || stream_index >= ic->nb_streams)
02120 return -1;
02121 avctx = ic->streams[stream_index]->codec;
02122
02123
02124 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02125 if (avctx->channels > 0) {
02126 avctx->request_channels = FFMIN(2, avctx->channels);
02127 } else {
02128 avctx->request_channels = 2;
02129 }
02130 }
02131
02132 codec = avcodec_find_decoder(avctx->codec_id);
02133 if (!codec)
02134 return -1;
02135
02136 avctx->workaround_bugs = workaround_bugs;
02137 avctx->lowres = lowres;
02138 if(avctx->lowres > codec->max_lowres){
02139 av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
02140 codec->max_lowres);
02141 avctx->lowres= codec->max_lowres;
02142 }
02143 if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
02144 avctx->idct_algo= idct;
02145 if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
02146 avctx->skip_frame= skip_frame;
02147 avctx->skip_idct= skip_idct;
02148 avctx->skip_loop_filter= skip_loop_filter;
02149 avctx->error_recognition= error_recognition;
02150 avctx->error_concealment= error_concealment;
02151 avctx->thread_count= thread_count;
02152
02153 set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
02154
02155 if(codec->capabilities & CODEC_CAP_DR1)
02156 avctx->flags |= CODEC_FLAG_EMU_EDGE;
02157
02158 if (avcodec_open(avctx, codec) < 0)
02159 return -1;
02160
02161
02162 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02163 if(avctx->sample_rate <= 0 || avctx->channels <= 0){
02164 fprintf(stderr, "Invalid sample rate or channel count\n");
02165 return -1;
02166 }
02167 wanted_spec.freq = avctx->sample_rate;
02168 wanted_spec.format = AUDIO_S16SYS;
02169 wanted_spec.channels = avctx->channels;
02170 wanted_spec.silence = 0;
02171 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
02172 wanted_spec.callback = sdl_audio_callback;
02173 wanted_spec.userdata = is;
02174 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
02175 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
02176 return -1;
02177 }
02178 is->audio_hw_buf_size = spec.size;
02179 is->audio_src_fmt= AV_SAMPLE_FMT_S16;
02180 }
02181
02182 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
02183 switch(avctx->codec_type) {
02184 case AVMEDIA_TYPE_AUDIO:
02185 is->audio_stream = stream_index;
02186 is->audio_st = ic->streams[stream_index];
02187 is->audio_buf_size = 0;
02188 is->audio_buf_index = 0;
02189
02190
02191 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
02192 is->audio_diff_avg_count = 0;
02193
02194
02195 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
02196
02197 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
02198 packet_queue_init(&is->audioq);
02199 SDL_PauseAudio(0);
02200 break;
02201 case AVMEDIA_TYPE_VIDEO:
02202 is->video_stream = stream_index;
02203 is->video_st = ic->streams[stream_index];
02204
02205 packet_queue_init(&is->videoq);
02206 is->video_tid = SDL_CreateThread(video_thread, is);
02207 break;
02208 case AVMEDIA_TYPE_SUBTITLE:
02209 is->subtitle_stream = stream_index;
02210 is->subtitle_st = ic->streams[stream_index];
02211 packet_queue_init(&is->subtitleq);
02212
02213 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
02214 break;
02215 default:
02216 break;
02217 }
02218 return 0;
02219 }
02220
02221 static void stream_component_close(VideoState *is, int stream_index)
02222 {
02223 AVFormatContext *ic = is->ic;
02224 AVCodecContext *avctx;
02225
02226 if (stream_index < 0 || stream_index >= ic->nb_streams)
02227 return;
02228 avctx = ic->streams[stream_index]->codec;
02229
02230 switch(avctx->codec_type) {
02231 case AVMEDIA_TYPE_AUDIO:
02232 packet_queue_abort(&is->audioq);
02233
02234 SDL_CloseAudio();
02235
02236 packet_queue_end(&is->audioq);
02237 if (is->reformat_ctx)
02238 av_audio_convert_free(is->reformat_ctx);
02239 is->reformat_ctx = NULL;
02240 break;
02241 case AVMEDIA_TYPE_VIDEO:
02242 packet_queue_abort(&is->videoq);
02243
02244
02245
02246 SDL_LockMutex(is->pictq_mutex);
02247 SDL_CondSignal(is->pictq_cond);
02248 SDL_UnlockMutex(is->pictq_mutex);
02249
02250 SDL_WaitThread(is->video_tid, NULL);
02251
02252 packet_queue_end(&is->videoq);
02253 break;
02254 case AVMEDIA_TYPE_SUBTITLE:
02255 packet_queue_abort(&is->subtitleq);
02256
02257
02258
02259 SDL_LockMutex(is->subpq_mutex);
02260 is->subtitle_stream_changed = 1;
02261
02262 SDL_CondSignal(is->subpq_cond);
02263 SDL_UnlockMutex(is->subpq_mutex);
02264
02265 SDL_WaitThread(is->subtitle_tid, NULL);
02266
02267 packet_queue_end(&is->subtitleq);
02268 break;
02269 default:
02270 break;
02271 }
02272
02273 ic->streams[stream_index]->discard = AVDISCARD_ALL;
02274 avcodec_close(avctx);
02275 switch(avctx->codec_type) {
02276 case AVMEDIA_TYPE_AUDIO:
02277 is->audio_st = NULL;
02278 is->audio_stream = -1;
02279 break;
02280 case AVMEDIA_TYPE_VIDEO:
02281 is->video_st = NULL;
02282 is->video_stream = -1;
02283 break;
02284 case AVMEDIA_TYPE_SUBTITLE:
02285 is->subtitle_st = NULL;
02286 is->subtitle_stream = -1;
02287 break;
02288 default:
02289 break;
02290 }
02291 }
02292
02293
02294
02295 static VideoState *global_video_state;
02296
02297 static int decode_interrupt_cb(void)
02298 {
02299 return (global_video_state && global_video_state->abort_request);
02300 }
02301
02302
02303 static int read_thread(void *arg)
02304 {
02305 VideoState *is = arg;
02306 AVFormatContext *ic = NULL;
02307 int err, i, ret;
02308 int st_index[AVMEDIA_TYPE_NB];
02309 AVPacket pkt1, *pkt = &pkt1;
02310 int eof=0;
02311 int pkt_in_play_range = 0;
02312 AVDictionaryEntry *t;
02313
02314 memset(st_index, -1, sizeof(st_index));
02315 is->video_stream = -1;
02316 is->audio_stream = -1;
02317 is->subtitle_stream = -1;
02318
02319 global_video_state = is;
02320 avio_set_interrupt_cb(decode_interrupt_cb);
02321
02322 err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
02323 if (err < 0) {
02324 print_error(is->filename, err);
02325 ret = -1;
02326 goto fail;
02327 }
02328 if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
02329 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
02330 ret = AVERROR_OPTION_NOT_FOUND;
02331 goto fail;
02332 }
02333 is->ic = ic;
02334
02335 if(genpts)
02336 ic->flags |= AVFMT_FLAG_GENPTS;
02337
02338 err = av_find_stream_info(ic);
02339 if (err < 0) {
02340 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
02341 ret = -1;
02342 goto fail;
02343 }
02344 if(ic->pb)
02345 ic->pb->eof_reached= 0;
02346
02347 if(seek_by_bytes<0)
02348 seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
02349
02350
02351 if (start_time != AV_NOPTS_VALUE) {
02352 int64_t timestamp;
02353
02354 timestamp = start_time;
02355
02356 if (ic->start_time != AV_NOPTS_VALUE)
02357 timestamp += ic->start_time;
02358 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
02359 if (ret < 0) {
02360 fprintf(stderr, "%s: could not seek to position %0.3f\n",
02361 is->filename, (double)timestamp / AV_TIME_BASE);
02362 }
02363 }
02364
02365 for (i = 0; i < ic->nb_streams; i++)
02366 ic->streams[i]->discard = AVDISCARD_ALL;
02367 if (!video_disable)
02368 st_index[AVMEDIA_TYPE_VIDEO] =
02369 av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
02370 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
02371 if (!audio_disable)
02372 st_index[AVMEDIA_TYPE_AUDIO] =
02373 av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
02374 wanted_stream[AVMEDIA_TYPE_AUDIO],
02375 st_index[AVMEDIA_TYPE_VIDEO],
02376 NULL, 0);
02377 if (!video_disable)
02378 st_index[AVMEDIA_TYPE_SUBTITLE] =
02379 av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
02380 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
02381 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
02382 st_index[AVMEDIA_TYPE_AUDIO] :
02383 st_index[AVMEDIA_TYPE_VIDEO]),
02384 NULL, 0);
02385 if (show_status) {
02386 av_dump_format(ic, 0, is->filename, 0);
02387 }
02388
02389 is->show_mode = show_mode;
02390
02391
02392 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
02393 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
02394 }
02395
02396 ret=-1;
02397 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
02398 ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
02399 }
02400 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
02401 if (is->show_mode == SHOW_MODE_NONE)
02402 is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
02403
02404 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
02405 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
02406 }
02407
02408 if (is->video_stream < 0 && is->audio_stream < 0) {
02409 fprintf(stderr, "%s: could not open codecs\n", is->filename);
02410 ret = -1;
02411 goto fail;
02412 }
02413
02414 for(;;) {
02415 if (is->abort_request)
02416 break;
02417 if (is->paused != is->last_paused) {
02418 is->last_paused = is->paused;
02419 if (is->paused)
02420 is->read_pause_return= av_read_pause(ic);
02421 else
02422 av_read_play(ic);
02423 }
02424 #if CONFIG_RTSP_DEMUXER
02425 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
02426
02427
02428 SDL_Delay(10);
02429 continue;
02430 }
02431 #endif
02432 if (is->seek_req) {
02433 int64_t seek_target= is->seek_pos;
02434 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
02435 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
02436
02437
02438
02439 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
02440 if (ret < 0) {
02441 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
02442 }else{
02443 if (is->audio_stream >= 0) {
02444 packet_queue_flush(&is->audioq);
02445 packet_queue_put(&is->audioq, &flush_pkt);
02446 }
02447 if (is->subtitle_stream >= 0) {
02448 packet_queue_flush(&is->subtitleq);
02449 packet_queue_put(&is->subtitleq, &flush_pkt);
02450 }
02451 if (is->video_stream >= 0) {
02452 packet_queue_flush(&is->videoq);
02453 packet_queue_put(&is->videoq, &flush_pkt);
02454 }
02455 }
02456 is->seek_req = 0;
02457 eof= 0;
02458 }
02459
02460
02461 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
02462 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
02463 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
02464 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
02465
02466 SDL_Delay(10);
02467 continue;
02468 }
02469 if(eof) {
02470 if(is->video_stream >= 0){
02471 av_init_packet(pkt);
02472 pkt->data=NULL;
02473 pkt->size=0;
02474 pkt->stream_index= is->video_stream;
02475 packet_queue_put(&is->videoq, pkt);
02476 }
02477 SDL_Delay(10);
02478 if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
02479 if(loop!=1 && (!loop || --loop)){
02480 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
02481 }else if(autoexit){
02482 ret=AVERROR_EOF;
02483 goto fail;
02484 }
02485 }
02486 eof=0;
02487 continue;
02488 }
02489 ret = av_read_frame(ic, pkt);
02490 if (ret < 0) {
02491 if (ret == AVERROR_EOF || url_feof(ic->pb))
02492 eof=1;
02493 if (ic->pb && ic->pb->error)
02494 break;
02495 SDL_Delay(100);
02496 continue;
02497 }
02498
02499 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
02500 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
02501 av_q2d(ic->streams[pkt->stream_index]->time_base) -
02502 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
02503 <= ((double)duration/1000000);
02504 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
02505 packet_queue_put(&is->audioq, pkt);
02506 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
02507 packet_queue_put(&is->videoq, pkt);
02508 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
02509 packet_queue_put(&is->subtitleq, pkt);
02510 } else {
02511 av_free_packet(pkt);
02512 }
02513 }
02514
02515 while (!is->abort_request) {
02516 SDL_Delay(100);
02517 }
02518
02519 ret = 0;
02520 fail:
02521
02522 global_video_state = NULL;
02523
02524
02525 if (is->audio_stream >= 0)
02526 stream_component_close(is, is->audio_stream);
02527 if (is->video_stream >= 0)
02528 stream_component_close(is, is->video_stream);
02529 if (is->subtitle_stream >= 0)
02530 stream_component_close(is, is->subtitle_stream);
02531 if (is->ic) {
02532 av_close_input_file(is->ic);
02533 is->ic = NULL;
02534 }
02535 avio_set_interrupt_cb(NULL);
02536
02537 if (ret != 0) {
02538 SDL_Event event;
02539
02540 event.type = FF_QUIT_EVENT;
02541 event.user.data1 = is;
02542 SDL_PushEvent(&event);
02543 }
02544 return 0;
02545 }
02546
02547 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
02548 {
02549 VideoState *is;
02550
02551 is = av_mallocz(sizeof(VideoState));
02552 if (!is)
02553 return NULL;
02554 av_strlcpy(is->filename, filename, sizeof(is->filename));
02555 is->iformat = iformat;
02556 is->ytop = 0;
02557 is->xleft = 0;
02558
02559
02560 is->pictq_mutex = SDL_CreateMutex();
02561 is->pictq_cond = SDL_CreateCond();
02562
02563 is->subpq_mutex = SDL_CreateMutex();
02564 is->subpq_cond = SDL_CreateCond();
02565
02566 is->av_sync_type = av_sync_type;
02567 is->read_tid = SDL_CreateThread(read_thread, is);
02568 if (!is->read_tid) {
02569 av_free(is);
02570 return NULL;
02571 }
02572 return is;
02573 }
02574
02575 static void stream_cycle_channel(VideoState *is, int codec_type)
02576 {
02577 AVFormatContext *ic = is->ic;
02578 int start_index, stream_index;
02579 AVStream *st;
02580
02581 if (codec_type == AVMEDIA_TYPE_VIDEO)
02582 start_index = is->video_stream;
02583 else if (codec_type == AVMEDIA_TYPE_AUDIO)
02584 start_index = is->audio_stream;
02585 else
02586 start_index = is->subtitle_stream;
02587 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
02588 return;
02589 stream_index = start_index;
02590 for(;;) {
02591 if (++stream_index >= is->ic->nb_streams)
02592 {
02593 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
02594 {
02595 stream_index = -1;
02596 goto the_end;
02597 } else
02598 stream_index = 0;
02599 }
02600 if (stream_index == start_index)
02601 return;
02602 st = ic->streams[stream_index];
02603 if (st->codec->codec_type == codec_type) {
02604
02605 switch(codec_type) {
02606 case AVMEDIA_TYPE_AUDIO:
02607 if (st->codec->sample_rate != 0 &&
02608 st->codec->channels != 0)
02609 goto the_end;
02610 break;
02611 case AVMEDIA_TYPE_VIDEO:
02612 case AVMEDIA_TYPE_SUBTITLE:
02613 goto the_end;
02614 default:
02615 break;
02616 }
02617 }
02618 }
02619 the_end:
02620 stream_component_close(is, start_index);
02621 stream_component_open(is, stream_index);
02622 }
02623
02624
02625 static void toggle_full_screen(void)
02626 {
02627 is_full_screen = !is_full_screen;
02628 video_open(cur_stream);
02629 }
02630
02631 static void toggle_pause(void)
02632 {
02633 if (cur_stream)
02634 stream_toggle_pause(cur_stream);
02635 step = 0;
02636 }
02637
02638 static void step_to_next_frame(void)
02639 {
02640 if (cur_stream) {
02641
02642 if (cur_stream->paused)
02643 stream_toggle_pause(cur_stream);
02644 }
02645 step = 1;
02646 }
02647
02648 static void toggle_audio_display(void)
02649 {
02650 if (cur_stream) {
02651 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
02652 cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
02653 fill_rectangle(screen,
02654 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
02655 bgcolor);
02656 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
02657 }
02658 }
02659
02660
02661 static void event_loop(void)
02662 {
02663 SDL_Event event;
02664 double incr, pos, frac;
02665
02666 for(;;) {
02667 double x;
02668 SDL_WaitEvent(&event);
02669 switch(event.type) {
02670 case SDL_KEYDOWN:
02671 if (exit_on_keydown) {
02672 do_exit();
02673 break;
02674 }
02675 switch(event.key.keysym.sym) {
02676 case SDLK_ESCAPE:
02677 case SDLK_q:
02678 do_exit();
02679 break;
02680 case SDLK_f:
02681 toggle_full_screen();
02682 break;
02683 case SDLK_p:
02684 case SDLK_SPACE:
02685 toggle_pause();
02686 break;
02687 case SDLK_s:
02688 step_to_next_frame();
02689 break;
02690 case SDLK_a:
02691 if (cur_stream)
02692 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
02693 break;
02694 case SDLK_v:
02695 if (cur_stream)
02696 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
02697 break;
02698 case SDLK_t:
02699 if (cur_stream)
02700 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
02701 break;
02702 case SDLK_w:
02703 toggle_audio_display();
02704 break;
02705 case SDLK_LEFT:
02706 incr = -10.0;
02707 goto do_seek;
02708 case SDLK_RIGHT:
02709 incr = 10.0;
02710 goto do_seek;
02711 case SDLK_UP:
02712 incr = 60.0;
02713 goto do_seek;
02714 case SDLK_DOWN:
02715 incr = -60.0;
02716 do_seek:
02717 if (cur_stream) {
02718 if (seek_by_bytes) {
02719 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
02720 pos= cur_stream->video_current_pos;
02721 }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
02722 pos= cur_stream->audio_pkt.pos;
02723 }else
02724 pos = avio_tell(cur_stream->ic->pb);
02725 if (cur_stream->ic->bit_rate)
02726 incr *= cur_stream->ic->bit_rate / 8.0;
02727 else
02728 incr *= 180000.0;
02729 pos += incr;
02730 stream_seek(cur_stream, pos, incr, 1);
02731 } else {
02732 pos = get_master_clock(cur_stream);
02733 pos += incr;
02734 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
02735 }
02736 }
02737 break;
02738 default:
02739 break;
02740 }
02741 break;
02742 case SDL_MOUSEBUTTONDOWN:
02743 if (exit_on_mousedown) {
02744 do_exit();
02745 break;
02746 }
02747 case SDL_MOUSEMOTION:
02748 if(event.type ==SDL_MOUSEBUTTONDOWN){
02749 x= event.button.x;
02750 }else{
02751 if(event.motion.state != SDL_PRESSED)
02752 break;
02753 x= event.motion.x;
02754 }
02755 if (cur_stream) {
02756 if(seek_by_bytes || cur_stream->ic->duration<=0){
02757 uint64_t size= avio_size(cur_stream->ic->pb);
02758 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
02759 }else{
02760 int64_t ts;
02761 int ns, hh, mm, ss;
02762 int tns, thh, tmm, tss;
02763 tns = cur_stream->ic->duration/1000000LL;
02764 thh = tns/3600;
02765 tmm = (tns%3600)/60;
02766 tss = (tns%60);
02767 frac = x/cur_stream->width;
02768 ns = frac*tns;
02769 hh = ns/3600;
02770 mm = (ns%3600)/60;
02771 ss = (ns%60);
02772 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
02773 hh, mm, ss, thh, tmm, tss);
02774 ts = frac*cur_stream->ic->duration;
02775 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
02776 ts += cur_stream->ic->start_time;
02777 stream_seek(cur_stream, ts, 0, 0);
02778 }
02779 }
02780 break;
02781 case SDL_VIDEORESIZE:
02782 if (cur_stream) {
02783 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
02784 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
02785 screen_width = cur_stream->width = event.resize.w;
02786 screen_height= cur_stream->height= event.resize.h;
02787 }
02788 break;
02789 case SDL_QUIT:
02790 case FF_QUIT_EVENT:
02791 do_exit();
02792 break;
02793 case FF_ALLOC_EVENT:
02794 video_open(event.user.data1);
02795 alloc_picture(event.user.data1);
02796 break;
02797 case FF_REFRESH_EVENT:
02798 video_refresh(event.user.data1);
02799 cur_stream->refresh=0;
02800 break;
02801 default:
02802 break;
02803 }
02804 }
02805 }
02806
02807 static int opt_frame_size(const char *opt, const char *arg)
02808 {
02809 if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
02810 fprintf(stderr, "Incorrect frame size\n");
02811 return AVERROR(EINVAL);
02812 }
02813 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
02814 fprintf(stderr, "Frame size must be a multiple of 2\n");
02815 return AVERROR(EINVAL);
02816 }
02817 return 0;
02818 }
02819
02820 static int opt_width(const char *opt, const char *arg)
02821 {
02822 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02823 return 0;
02824 }
02825
02826 static int opt_height(const char *opt, const char *arg)
02827 {
02828 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02829 return 0;
02830 }
02831
02832 static int opt_format(const char *opt, const char *arg)
02833 {
02834 file_iformat = av_find_input_format(arg);
02835 if (!file_iformat) {
02836 fprintf(stderr, "Unknown input format: %s\n", arg);
02837 return AVERROR(EINVAL);
02838 }
02839 return 0;
02840 }
02841
02842 static int opt_frame_pix_fmt(const char *opt, const char *arg)
02843 {
02844 frame_pix_fmt = av_get_pix_fmt(arg);
02845 return 0;
02846 }
02847
02848 static int opt_sync(const char *opt, const char *arg)
02849 {
02850 if (!strcmp(arg, "audio"))
02851 av_sync_type = AV_SYNC_AUDIO_MASTER;
02852 else if (!strcmp(arg, "video"))
02853 av_sync_type = AV_SYNC_VIDEO_MASTER;
02854 else if (!strcmp(arg, "ext"))
02855 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
02856 else {
02857 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
02858 exit(1);
02859 }
02860 return 0;
02861 }
02862
02863 static int opt_seek(const char *opt, const char *arg)
02864 {
02865 start_time = parse_time_or_die(opt, arg, 1);
02866 return 0;
02867 }
02868
02869 static int opt_duration(const char *opt, const char *arg)
02870 {
02871 duration = parse_time_or_die(opt, arg, 1);
02872 return 0;
02873 }
02874
02875 static int opt_thread_count(const char *opt, const char *arg)
02876 {
02877 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
02878 #if !HAVE_THREADS
02879 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
02880 #endif
02881 return 0;
02882 }
02883
02884 static int opt_show_mode(const char *opt, const char *arg)
02885 {
02886 show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
02887 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
02888 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
02889 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
02890 return 0;
02891 }
02892
02893 static int opt_input_file(const char *opt, const char *filename)
02894 {
02895 if (input_filename) {
02896 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
02897 filename, input_filename);
02898 exit(1);
02899 }
02900 if (!strcmp(filename, "-"))
02901 filename = "pipe:";
02902 input_filename = filename;
02903 return 0;
02904 }
02905
02906 static const OptionDef options[] = {
02907 #include "cmdutils_common_opts.h"
02908 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
02909 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
02910 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
02911 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
02912 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
02913 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
02914 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
02915 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
02916 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
02917 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
02918 { "t", HAS_ARG, {(void*)&opt_duration}, "play \"duration\" seconds of audio/video", "duration" },
02919 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
02920 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
02921 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
02922 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
02923 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
02924 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
02925 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
02926 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
02927 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
02928 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
02929 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
02930 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
02931 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
02932 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
02933 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
02934 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
02935 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
02936 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
02937 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
02938 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
02939 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
02940 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
02941 { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
02942 { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
02943 #if CONFIG_AVFILTER
02944 { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
02945 #endif
02946 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
02947 { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
02948 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
02949 { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
02950 { NULL, },
02951 };
02952
02953 static void show_usage(void)
02954 {
02955 printf("Simple media player\n");
02956 printf("usage: ffplay [options] input_file\n");
02957 printf("\n");
02958 }
02959
02960 static int opt_help(const char *opt, const char *arg)
02961 {
02962 av_log_set_callback(log_callback_help);
02963 show_usage();
02964 show_help_options(options, "Main options:\n",
02965 OPT_EXPERT, 0);
02966 show_help_options(options, "\nAdvanced options:\n",
02967 OPT_EXPERT, OPT_EXPERT);
02968 printf("\n");
02969 av_opt_show2(avcodec_opts[0], NULL,
02970 AV_OPT_FLAG_DECODING_PARAM, 0);
02971 printf("\n");
02972 av_opt_show2(avformat_opts, NULL,
02973 AV_OPT_FLAG_DECODING_PARAM, 0);
02974 #if !CONFIG_AVFILTER
02975 printf("\n");
02976 av_opt_show2(sws_opts, NULL,
02977 AV_OPT_FLAG_ENCODING_PARAM, 0);
02978 #endif
02979 printf("\nWhile playing:\n"
02980 "q, ESC quit\n"
02981 "f toggle full screen\n"
02982 "p, SPC pause\n"
02983 "a cycle audio channel\n"
02984 "v cycle video channel\n"
02985 "t cycle subtitle channel\n"
02986 "w show audio waves\n"
02987 "s activate frame-step mode\n"
02988 "left/right seek backward/forward 10 seconds\n"
02989 "down/up seek backward/forward 1 minute\n"
02990 "mouse click seek to percentage in file corresponding to fraction of width\n"
02991 );
02992 return 0;
02993 }
02994
02995
02996 int main(int argc, char **argv)
02997 {
02998 int flags;
02999
03000 av_log_set_flags(AV_LOG_SKIP_REPEATED);
03001
03002
03003 avcodec_register_all();
03004 #if CONFIG_AVDEVICE
03005 avdevice_register_all();
03006 #endif
03007 #if CONFIG_AVFILTER
03008 avfilter_register_all();
03009 #endif
03010 av_register_all();
03011
03012 init_opts();
03013
03014 show_banner();
03015
03016 parse_options(argc, argv, options, opt_input_file);
03017
03018 if (!input_filename) {
03019 show_usage();
03020 fprintf(stderr, "An input file must be specified\n");
03021 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
03022 exit(1);
03023 }
03024
03025 if (display_disable) {
03026 video_disable = 1;
03027 }
03028 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
03029 if (audio_disable)
03030 flags &= ~SDL_INIT_AUDIO;
03031 #if !defined(__MINGW32__) && !defined(__APPLE__)
03032 flags |= SDL_INIT_EVENTTHREAD;
03033 #endif
03034 if (SDL_Init (flags)) {
03035 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
03036 exit(1);
03037 }
03038
03039 if (!display_disable) {
03040 #if HAVE_SDL_VIDEO_SIZE
03041 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
03042 fs_screen_width = vi->current_w;
03043 fs_screen_height = vi->current_h;
03044 #endif
03045 }
03046
03047 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
03048 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
03049 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
03050
03051 av_init_packet(&flush_pkt);
03052 flush_pkt.data= "FLUSH";
03053
03054 cur_stream = stream_open(input_filename, file_iformat);
03055
03056 event_loop();
03057
03058
03059
03060 return 0;
03061 }