summaryrefslogtreecommitdiff
path: root/drivers/theora
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/theora')
-rw-r--r--drivers/theora/SCsub3
-rw-r--r--drivers/theora/codec.h2
-rw-r--r--drivers/theora/decode.c46
-rw-r--r--drivers/theora/video_stream_theora.cpp483
-rw-r--r--drivers/theora/video_stream_theora.h61
5 files changed, 374 insertions, 221 deletions
diff --git a/drivers/theora/SCsub b/drivers/theora/SCsub
index ecabce6c9d..faa1ede6a7 100644
--- a/drivers/theora/SCsub
+++ b/drivers/theora/SCsub
@@ -1,4 +1,3 @@
-
Import('env')
sources = [
@@ -34,5 +33,3 @@ sources = [
if env['use_theoraplayer_binary'] != "yes":
env.drivers_sources += sources
-
-
diff --git a/drivers/theora/codec.h b/drivers/theora/codec.h
index 5c2669630c..9b816e5cfd 100644
--- a/drivers/theora/codec.h
+++ b/drivers/theora/codec.h
@@ -15,7 +15,7 @@
********************************************************************/
-/**\mainpage
+/**\file
*
* \section intro Introduction
*
diff --git a/drivers/theora/decode.c b/drivers/theora/decode.c
index 7be66463d8..882606ae77 100644
--- a/drivers/theora/decode.c
+++ b/drivers/theora/decode.c
@@ -1611,28 +1611,35 @@ static void oc_filter_hedge(unsigned char *_dst,int _dst_ystride,
int sum1;
int bx;
int by;
+ int _rlimit1;
+ int _rlimit2;
rdst=_dst;
rsrc=_src;
- for(bx=0;bx<8;bx++){
+ for(bx=0;bx<8;++bx){
cdst=rdst;
csrc=rsrc;
- for(by=0;by<10;by++){
+ _rlimit1 = _rlimit2 = _flimit;
+ for(by=0;by<10;++by){
r[by]=*csrc;
csrc+=_src_ystride;
}
sum0=sum1=0;
- for(by=0;by<4;by++){
- sum0+=abs(r[by+1]-r[by]);
- sum1+=abs(r[by+5]-r[by+6]);
+ for(by=0;by<4;++by){
+ int sumed = abs(r[by+1]-r[by]);
+ sum0+=sumed;
+ _rlimit1-=sumed;
+ sumed = abs(r[by+5]-r[by+6]);
+ sum1+=sumed;
+ _rlimit2-=sumed;
}
*_variance0+=OC_MINI(255,sum0);
*_variance1+=OC_MINI(255,sum1);
- if(sum0<_flimit&&sum1<_flimit&&r[5]-r[4]<_qstep&&r[4]-r[5]<_qstep){
+ if(_rlimit1&&_rlimit2&&!(r[5]-r[4]-_qstep)&&!(r[4]-r[5]-_qstep)){
*cdst=(unsigned char)(r[0]*3+r[1]*2+r[2]+r[3]+r[4]+4>>3);
cdst+=_dst_ystride;
*cdst=(unsigned char)(r[0]*2+r[1]+r[2]*2+r[3]+r[4]+r[5]+4>>3);
cdst+=_dst_ystride;
- for(by=0;by<4;by++){
+ for(by=0;by<4;++by){
*cdst=(unsigned char)(r[by]+r[by+1]+r[by+2]+r[by+3]*2+
r[by+4]+r[by+5]+r[by+6]+4>>3);
cdst+=_dst_ystride;
@@ -1642,13 +1649,13 @@ static void oc_filter_hedge(unsigned char *_dst,int _dst_ystride,
*cdst=(unsigned char)(r[5]+r[6]+r[7]+r[8]*2+r[9]*3+4>>3);
}
else{
- for(by=1;by<=8;by++){
+ for(by=1;by<=8;++by){
*cdst=(unsigned char)r[by];
cdst+=_dst_ystride;
}
}
- rdst++;
- rsrc++;
+ ++rdst;
+ ++rsrc;
}
}
@@ -1663,19 +1670,26 @@ static void oc_filter_vedge(unsigned char *_dst,int _dst_ystride,
int sum1;
int bx;
int by;
+ int _rlimit1;
+ int _rlimit2;
cdst=_dst;
- for(by=0;by<8;by++){
+ for(by=0;by<8;++by){
rsrc=cdst-1;
rdst=cdst;
- for(bx=0;bx<10;bx++)r[bx]=*rsrc++;
+ for(bx=0;bx<10;++bx)r[bx]=*rsrc++;
sum0=sum1=0;
- for(bx=0;bx<4;bx++){
- sum0+=abs(r[bx+1]-r[bx]);
- sum1+=abs(r[bx+5]-r[bx+6]);
+ _rlimit1 = _rlimit2 = _flimit;
+ for(bx=0;bx<4;++bx){
+ int sumed = abs(r[bx+1]-r[bx]);
+ sum0+=sumed;
+ _rlimit1-=sumed;
+ sumed = abs(r[bx+5]-r[bx+6]);
+ sum1+=sumed;
+ _rlimit2-=sumed;
}
_variances[0]+=OC_MINI(255,sum0);
_variances[1]+=OC_MINI(255,sum1);
- if(sum0<_flimit&&sum1<_flimit&&r[5]-r[4]<_qstep&&r[4]-r[5]<_qstep){
+ if(_rlimit1&&_rlimit2&&!(r[5]-r[4]-_qstep)&&!(r[4]-r[5]-_qstep)){
*rdst++=(unsigned char)(r[0]*3+r[1]*2+r[2]+r[3]+r[4]+4>>3);
*rdst++=(unsigned char)(r[0]*2+r[1]+r[2]*2+r[3]+r[4]+r[5]+4>>3);
for(bx=0;bx<4;bx++){
diff --git a/drivers/theora/video_stream_theora.cpp b/drivers/theora/video_stream_theora.cpp
index 214185cf88..ed2565177a 100644
--- a/drivers/theora/video_stream_theora.cpp
+++ b/drivers/theora/video_stream_theora.cpp
@@ -1,16 +1,12 @@
#ifdef THEORA_ENABLED
-#if 0
+
#include "video_stream_theora.h"
#include "os/os.h"
#include "yuv2rgb.h"
+#include "globals.h"
-AudioStream::UpdateMode VideoStreamTheora::get_update_mode() const {
-
- return UPDATE_IDLE;
-};
-
-int VideoStreamTheora:: buffer_data() {
+int VideoStreamPlaybackTheora:: buffer_data() {
char *buffer=ogg_sync_buffer(&oy,4096);
int bytes=file->get_buffer((uint8_t*)buffer, 4096);
@@ -18,33 +14,13 @@ int VideoStreamTheora:: buffer_data() {
return(bytes);
}
-int VideoStreamTheora::queue_page(ogg_page *page){
+int VideoStreamPlaybackTheora::queue_page(ogg_page *page){
if(theora_p)ogg_stream_pagein(&to,page);
if(vorbis_p)ogg_stream_pagein(&vo,page);
return 0;
}
-Image VideoStreamTheora::peek_frame() const {
-
- if (frames_pending == 0)
- return Image();
- return Image(size.x, size.y, 0, format, frame_data);
-};
-
-Image VideoStreamTheora::pop_frame() {
-
- Image ret = peek_frame();
- frames_pending = 0;
-
- return ret;
-};
-
-int VideoStreamTheora::get_pending_frame_count() const {
-
- return frames_pending;
-};
-
-void VideoStreamTheora::video_write(void){
+void VideoStreamPlaybackTheora::video_write(void){
th_ycbcr_buffer yuv;
int y_offset, uv_offset;
th_decode_ycbcr_out(td,yuv);
@@ -78,25 +54,31 @@ void VideoStreamTheora::video_write(void){
int pitch = 4;
frame_data.resize(size.x * size.y * pitch);
- DVector<uint8_t>::Write w = frame_data.write();
- char* dst = (char*)w.ptr();
+ {
+ DVector<uint8_t>::Write w = frame_data.write();
+ char* dst = (char*)w.ptr();
- uv_offset=(ti.pic_x/2)+(yuv[1].stride)*(ti.pic_y/2);
+ uv_offset=(ti.pic_x/2)+(yuv[1].stride)*(ti.pic_y/2);
- if (px_fmt == TH_PF_444) {
+ if (px_fmt == TH_PF_444) {
- yuv444_2_rgb8888((uint8_t*)dst, (uint8_t*)yuv[0].data, (uint8_t*)yuv[1].data, (uint8_t*)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x<<2, 0);
+ yuv444_2_rgb8888((uint8_t*)dst, (uint8_t*)yuv[0].data, (uint8_t*)yuv[1].data, (uint8_t*)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x<<2, 0);
- } else if (px_fmt == TH_PF_422) {
+ } else if (px_fmt == TH_PF_422) {
- yuv422_2_rgb8888((uint8_t*)dst, (uint8_t*)yuv[0].data, (uint8_t*)yuv[1].data, (uint8_t*)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x<<2, 0);
+ yuv422_2_rgb8888((uint8_t*)dst, (uint8_t*)yuv[0].data, (uint8_t*)yuv[1].data, (uint8_t*)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x<<2, 0);
- } else if (px_fmt == TH_PF_420) {
+ } else if (px_fmt == TH_PF_420) {
- yuv420_2_rgb8888((uint8_t*)dst, (uint8_t*)yuv[0].data, (uint8_t*)yuv[2].data, (uint8_t*)yuv[1].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x<<2, 0);
- };
+ yuv420_2_rgb8888((uint8_t*)dst, (uint8_t*)yuv[0].data, (uint8_t*)yuv[2].data, (uint8_t*)yuv[1].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x<<2, 0);
+ };
+
+ format = Image::FORMAT_RGBA;
+ }
+
+ Image img(size.x,size.y,0,Image::FORMAT_RGBA,frame_data); //zero copy image creation
- format = Image::FORMAT_RGBA;
+ texture->set_data(img); //zero copy send to visual server
/*
@@ -194,9 +176,9 @@ void VideoStreamTheora::video_write(void){
frames_pending = 1;
}
-void VideoStreamTheora::clear() {
+void VideoStreamPlaybackTheora::clear() {
- if (file_name == "")
+ if (!file)
return;
if(vorbis_p){
@@ -218,7 +200,7 @@ void VideoStreamTheora::clear() {
}
ogg_sync_clear(&oy);
- file_name = "";
+ //file_name = "";
theora_p = 0;
vorbis_p = 0;
@@ -226,10 +208,14 @@ void VideoStreamTheora::clear() {
frames_pending = 0;
videobuf_time = 0;
+ if (file) {
+ memdelete(file);
+ }
+ file=NULL;
playing = false;
};
-void VideoStreamTheora::set_file(const String& p_file) {
+void VideoStreamPlaybackTheora::set_file(const String& p_file) {
ogg_packet op;
th_setup_info *ts = NULL;
@@ -241,7 +227,7 @@ void VideoStreamTheora::set_file(const String& p_file) {
file = FileAccess::open(p_file, FileAccess::READ);
ERR_FAIL_COND(!file);
- audio_frames_wrote = 0;
+
ogg_sync_init(&oy);
@@ -256,6 +242,10 @@ void VideoStreamTheora::set_file(const String& p_file) {
/* Ogg file open; parse the headers */
/* Only interested in Vorbis/Theora streams */
int stateflag = 0;
+
+ int audio_track_skip=audio_track;
+
+
while(!stateflag){
int ret=buffer_data();
if(ret==0)break;
@@ -281,9 +271,21 @@ void VideoStreamTheora::set_file(const String& p_file) {
copymem(&to,&test,sizeof(test));
theora_p=1;
}else if(!vorbis_p && vorbis_synthesis_headerin(&vi,&vc,&op)>=0){
+
+
/* it is vorbis */
- copymem(&vo,&test,sizeof(test));
- vorbis_p=1;
+ if (audio_track_skip) {
+ vorbis_info_clear(&vi);
+ vorbis_comment_clear(&vc);
+ ogg_stream_clear(&test);
+ vorbis_info_init(&vi);
+ vorbis_comment_init(&vc);
+
+ audio_track_skip--;
+ } else {
+ copymem(&vo,&test,sizeof(test));
+ vorbis_p=1;
+ }
}else{
/* whatever it is, we don't care about it */
ogg_stream_clear(&test);
@@ -386,6 +388,8 @@ void VideoStreamTheora::set_file(const String& p_file) {
size.x = w;
size.y = h;
+ texture->create(w,h,Image::FORMAT_RGBA,Texture::FLAG_FILTER|Texture::FLAG_VIDEO_SURFACE);
+
}else{
/* tear down the partial theora setup */
th_info_clear(&ti);
@@ -399,7 +403,8 @@ void VideoStreamTheora::set_file(const String& p_file) {
vorbis_block_init(&vd,&vb);
fprintf(stderr,"Ogg logical stream %lx is Vorbis %d channel %ld Hz audio.\n",
vo.serialno,vi.channels,vi.rate);
- _setup(vi.channels, vi.rate);
+ //_setup(vi.channels, vi.rate);
+
}else{
/* tear down the partial vorbis setup */
vorbis_info_clear(&vi);
@@ -409,229 +414,319 @@ void VideoStreamTheora::set_file(const String& p_file) {
playing = false;
buffering=true;
time=0;
+ audio_frames_wrote=0;
};
-float VideoStreamTheora::get_time() const {
+float VideoStreamPlaybackTheora::get_time() const {
//print_line("total: "+itos(get_total())+" todo: "+itos(get_todo()));
//return MAX(0,time-((get_total())/(float)vi.rate));
- return time-((get_total())/(float)vi.rate);
+ return time-AudioServer::get_singleton()->get_output_delay()-delay_compensation;//-((get_total())/(float)vi.rate);
};
-void VideoStreamTheora::update() {
+Ref<Texture> VideoStreamPlaybackTheora::get_texture() {
+
+ return texture;
+}
+
+void VideoStreamPlaybackTheora::update(float p_delta) {
if (!playing) {
//printf("not playing\n");
return;
};
- double ctime =AudioServer::get_singleton()->get_mix_time();
+ //double ctime =AudioServer::get_singleton()->get_mix_time();
- if (last_update_time) {
- double delta = (ctime-last_update_time);
- time+=delta;
- //print_line("delta: "+rtos(delta));
- }
- last_update_time=ctime;
+ //print_line("play "+rtos(p_delta));
+ time+=p_delta;
+ if (videobuf_time>get_time())
+ return; //no new frames need to be produced
- int audio_todo = get_todo();
- ogg_packet op;
- int audio_pending = 0;
+ bool frame_done=false;
+ bool audio_done=false;
+ while (!frame_done || !audio_done) {
+ //a frame needs to be produced
- while (vorbis_p && audio_todo) {
- int ret;
- float **pcm;
-
- /* if there's pending, decoded audio, grab it */
- if ((ret=vorbis_synthesis_pcmout(&vd,&pcm))>0) {
-
- audio_pending = ret;
- int16_t* out = get_write_buffer();
- int count = 0;
- int to_read = MIN(ret, audio_todo);
- for (int i=0; i<to_read; i++) {
-
- for(int j=0;j<vi.channels;j++){
- int val=Math::fast_ftoi(pcm[j][i]*32767.f);
- if(val>32767)val=32767;
- if(val<-32768)val=-32768;
- out[count++] = val;
- };
- };
- int tr = vorbis_synthesis_read(&vd, to_read);
- audio_todo -= to_read;
- audio_frames_wrote += to_read;
- write(to_read);
- audio_pending -= to_read;
- if (audio_todo==0)
- buffering=false;
+ ogg_packet op;
+ bool audio_pending = false;
- } else {
+ while (vorbis_p) {
+ int ret;
+ float **pcm;
+
+ bool buffer_full=false;
+
+ /* if there's pending, decoded audio, grab it */
+ if ((ret=vorbis_synthesis_pcmout(&vd,&pcm))>0) {
+
+
+
+ const int AUXBUF_LEN=4096;
+ int to_read = ret;
+ int16_t aux_buffer[AUXBUF_LEN];
+
+ while(to_read) {
+
+ int m = MIN(AUXBUF_LEN/vi.channels,to_read);
+
+ int count = 0;
+
+ for(int j=0;j<m;j++){
+ for(int i=0;i<vi.channels;i++){
+
+ int val=Math::fast_ftoi(pcm[i][j]*32767.f);
+ if(val>32767)val=32767;
+ if(val<-32768)val=-32768;
+ aux_buffer[count++] = val;
+ }
+ }
+
+ if (mix_callback) {
+ int mixed = mix_callback(mix_udata,aux_buffer,m);
+ to_read-=mixed;
+ if (mixed!=m) { //could mix no more
+ buffer_full=true;
+ break;
+ }
+ } else {
+ to_read-=m; //just pretend we sent the audio
+ }
+
- /* no pending audio; is there a pending packet to decode? */
- if (ogg_stream_packetout(&vo,&op)>0){
- if(vorbis_synthesis(&vb,&op)==0) { /* test for success! */
- vorbis_synthesis_blockin(&vd,&vb);
}
- } else { /* we need more data; break out to suck in another page */
- //printf("need moar data\n");
- break;
- };
- }
- }
- while(theora_p && !videobuf_ready){
- /* theora is one in, one out... */
- if(ogg_stream_packetout(&to,&op)>0){
+ int tr = vorbis_synthesis_read(&vd, ret-to_read);
- if(pp_inc){
- pp_level+=pp_inc;
- th_decode_ctl(td,TH_DECCTL_SET_PPLEVEL,&pp_level,
- sizeof(pp_level));
- pp_inc=0;
- }
- /*HACK: This should be set after a seek or a gap, but we might not have
- a granulepos for the first packet (we only have them for the last
- packet on a page), so we just set it as often as we get it.
- To do this right, we should back-track from the last packet on the
- page and compute the correct granulepos for the first packet after
- a seek or a gap.*/
- if(op.granulepos>=0){
- th_decode_ctl(td,TH_DECCTL_SET_GRANPOS,&op.granulepos,
- sizeof(op.granulepos));
- }
- ogg_int64_t videobuf_granulepos;
- if(th_decode_packetin(td,&op,&videobuf_granulepos)==0){
- videobuf_time=th_granule_time(td,videobuf_granulepos);
- //printf("frame time %f, play time %f, ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);
-
- /* is it already too old to be useful? This is only actually
- useful cosmetically after a SIGSTOP. Note that we have to
- decode the frame even if we don't show it (for now) due to
- keyframing. Soon enough libtheora will be able to deal
- with non-keyframe seeks. */
-
- if(videobuf_time>=get_time())
- videobuf_ready=1;
- else{
- /*If we are too slow, reduce the pp level.*/
- pp_inc=pp_level>0?-1:0;
+ audio_pending=true;
+
+
+ if (vd.granulepos>=0) {
+ // print_line("wrote: "+itos(audio_frames_wrote)+" gpos: "+itos(vd.granulepos));
}
+
+ //print_line("mix audio!");
+
+ audio_frames_wrote+=ret-to_read;
+
+ //print_line("AGP: "+itos(vd.granulepos)+" added "+itos(ret-to_read));
+
+
+ } else {
+
+ /* no pending audio; is there a pending packet to decode? */
+ if (ogg_stream_packetout(&vo,&op)>0){
+ if(vorbis_synthesis(&vb,&op)==0) { /* test for success! */
+ vorbis_synthesis_blockin(&vd,&vb);
+ }
+ } else { /* we need more data; break out to suck in another page */
+ //printf("need moar data\n");
+ break;
+ };
}
- } else
- break;
- }
- if (/*!videobuf_ready && */ audio_pending == 0 && file->eof_reached()) {
- printf("video done, stopping\n");
- stop();
- return;
- };
+ audio_done = videobuf_time < (audio_frames_wrote/float(vi.rate));
+
+ if (buffer_full)
+ break;
+ }
- if (!videobuf_ready || audio_todo > 0){
- /* no data yet for somebody. Grab another page */
+ while(theora_p && !frame_done){
+ /* theora is one in, one out... */
+ if(ogg_stream_packetout(&to,&op)>0){
- buffer_data();
- while(ogg_sync_pageout(&oy,&og)>0){
- queue_page(&og);
+
+ if(pp_inc){
+ pp_level+=pp_inc;
+ th_decode_ctl(td,TH_DECCTL_SET_PPLEVEL,&pp_level,
+ sizeof(pp_level));
+ pp_inc=0;
+ }
+ /*HACK: This should be set after a seek or a gap, but we might not have
+ a granulepos for the first packet (we only have them for the last
+ packet on a page), so we just set it as often as we get it.
+ To do this right, we should back-track from the last packet on the
+ page and compute the correct granulepos for the first packet after
+ a seek or a gap.*/
+ if(op.granulepos>=0){
+ th_decode_ctl(td,TH_DECCTL_SET_GRANPOS,&op.granulepos,
+ sizeof(op.granulepos));
+ }
+ ogg_int64_t videobuf_granulepos;
+ if(th_decode_packetin(td,&op,&videobuf_granulepos)==0){
+ videobuf_time=th_granule_time(td,videobuf_granulepos);
+
+ //printf("frame time %f, play time %f, ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);
+
+ /* is it already too old to be useful? This is only actually
+ useful cosmetically after a SIGSTOP. Note that we have to
+ decode the frame even if we don't show it (for now) due to
+ keyframing. Soon enough libtheora will be able to deal
+ with non-keyframe seeks. */
+
+ if(videobuf_time>=get_time())
+ frame_done=true;
+ else{
+ /*If we are too slow, reduce the pp level.*/
+ pp_inc=pp_level>0?-1:0;
+ }
+ }
+
+ } else
+ break;
}
- }
- /* If playback has begun, top audio buffer off immediately. */
- //if(stateflag) audio_write_nonblocking();
+ if (file && /*!videobuf_ready && */ file->eof_reached()) {
+ printf("video done, stopping\n");
+ stop();
+ return;
+ };
+ #if 0
+ if (!videobuf_ready || audio_todo > 0){
+ /* no data yet for somebody. Grab another page */
+
+ buffer_data();
+ while(ogg_sync_pageout(&oy,&og)>0){
+ queue_page(&og);
+ }
+ }
+ #else
- /* are we at or past time for this video frame? */
- if(videobuf_ready && videobuf_time<=get_time()){
- video_write();
- videobuf_ready=0;
- } else {
- //printf("frame at %f not ready (time %f), ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);
- }
+ if (!frame_done || !audio_done){
+ //what's the point of waiting for audio to grab a page?
- float tdiff=videobuf_time-get_time();
- /*If we have lots of extra time, increase the post-processing level.*/
- if(tdiff>ti.fps_denominator*0.25/ti.fps_numerator){
- pp_inc=pp_level<pp_level_max?1:0;
- }
- else if(tdiff<ti.fps_denominator*0.05/ti.fps_numerator){
- pp_inc=pp_level>0?-1:0;
+ buffer_data();
+ while(ogg_sync_pageout(&oy,&og)>0){
+ queue_page(&og);
+ }
+ }
+ #endif
+ /* If playback has begun, top audio buffer off immediately. */
+ //if(stateflag) audio_write_nonblocking();
+
+ /* are we at or past time for this video frame? */
+ if(videobuf_ready && videobuf_time<=get_time()){
+
+ //video_write();
+ //videobuf_ready=0;
+ } else {
+ //printf("frame at %f not ready (time %f), ready %i\n", (float)videobuf_time, get_time(), videobuf_ready);
+ }
+
+ float tdiff=videobuf_time-get_time();
+ /*If we have lots of extra time, increase the post-processing level.*/
+ if(tdiff>ti.fps_denominator*0.25/ti.fps_numerator){
+ pp_inc=pp_level<pp_level_max?1:0;
+ }
+ else if(tdiff<ti.fps_denominator*0.05/ti.fps_numerator){
+ pp_inc=pp_level>0?-1:0;
+ }
}
-};
-bool VideoStreamTheora::_can_mix() const {
+ video_write();
- return !buffering;
};
-void VideoStreamTheora::play() {
+
+void VideoStreamPlaybackTheora::play() {
if (!playing)
- last_update_time=0;
+ time=0;
playing = true;
+ delay_compensation=Globals::get_singleton()->get("audio/video_delay_compensation_ms");
+ delay_compensation/=1000.0;
+
};
-void VideoStreamTheora::stop() {
+void VideoStreamPlaybackTheora::stop() {
+ if (playing) {
+ clear();
+ set_file(file_name); //reset
+ }
playing = false;
- last_update_time=0;
+ time=0;
};
-bool VideoStreamTheora::is_playing() const {
+bool VideoStreamPlaybackTheora::is_playing() const {
return playing;
};
-void VideoStreamTheora::set_paused(bool p_paused) {
+void VideoStreamPlaybackTheora::set_paused(bool p_paused) {
playing = !p_paused;
};
-bool VideoStreamTheora::is_paused(bool p_paused) const {
+bool VideoStreamPlaybackTheora::is_paused(bool p_paused) const {
return playing;
};
-void VideoStreamTheora::set_loop(bool p_enable) {
+void VideoStreamPlaybackTheora::set_loop(bool p_enable) {
};
-bool VideoStreamTheora::has_loop() const {
+bool VideoStreamPlaybackTheora::has_loop() const {
return false;
};
-float VideoStreamTheora::get_length() const {
+float VideoStreamPlaybackTheora::get_length() const {
return 0;
};
-String VideoStreamTheora::get_stream_name() const {
+String VideoStreamPlaybackTheora::get_stream_name() const {
return "";
};
-int VideoStreamTheora::get_loop_count() const {
+int VideoStreamPlaybackTheora::get_loop_count() const {
return 0;
};
-float VideoStreamTheora::get_pos() const {
+float VideoStreamPlaybackTheora::get_pos() const {
return get_time();
};
-void VideoStreamTheora::seek_pos(float p_time) {
+void VideoStreamPlaybackTheora::seek_pos(float p_time) {
// no
};
-VideoStreamTheora::VideoStreamTheora() {
+void VideoStreamPlaybackTheora::set_mix_callback(AudioMixCallback p_callback,void *p_userdata) {
+
+ mix_callback=p_callback;
+ mix_udata=p_userdata;
+}
+
+int VideoStreamPlaybackTheora::get_channels() const{
+
+ return vi.channels;
+}
+
+void VideoStreamPlaybackTheora::set_audio_track(int p_idx) {
+
+ audio_track=p_idx;
+}
+
+int VideoStreamPlaybackTheora::get_mix_rate() const{
+
+ return vi.rate;
+}
+
+
+
+VideoStreamPlaybackTheora::VideoStreamPlaybackTheora() {
file = NULL;
theora_p = 0;
@@ -640,11 +735,17 @@ VideoStreamTheora::VideoStreamTheora() {
playing = false;
frames_pending = 0;
videobuf_time = 0;
- last_update_time =0;
+
buffering=false;
+ texture = Ref<ImageTexture>( memnew(ImageTexture ));
+ mix_callback=NULL;
+ mix_udata=NULL;
+ audio_track=0;
+ delay_compensation=0;
+ audio_frames_wrote=0;
};
-VideoStreamTheora::~VideoStreamTheora() {
+VideoStreamPlaybackTheora::~VideoStreamPlaybackTheora() {
clear();
@@ -653,10 +754,16 @@ VideoStreamTheora::~VideoStreamTheora() {
};
-RES ResourceFormatLoaderVideoStreamTheora::load(const String &p_path,const String& p_original_path) {
+RES ResourceFormatLoaderVideoStreamTheora::load(const String &p_path,const String& p_original_path, Error *r_error) {
+ if (r_error)
+ *r_error=ERR_FILE_CANT_OPEN;
VideoStreamTheora *stream = memnew(VideoStreamTheora);
stream->set_file(p_path);
+
+ if (r_error)
+ *r_error=OK;
+
return Ref<VideoStreamTheora>(stream);
}
@@ -666,16 +773,16 @@ void ResourceFormatLoaderVideoStreamTheora::get_recognized_extensions(List<Strin
p_extensions->push_back("ogv");
}
bool ResourceFormatLoaderVideoStreamTheora::handles_type(const String& p_type) const {
- return (p_type=="AudioStream" || p_type=="VideoStreamTheora");
+ return (p_type=="VideoStream" || p_type=="VideoStreamTheora");
}
String ResourceFormatLoaderVideoStreamTheora::get_resource_type(const String &p_path) const {
String exl=p_path.extension().to_lower();
if (exl=="ogm" || exl=="ogv")
- return "AudioStreamTheora";
+ return "VideoStreamTheora";
return "";
}
#endif
-#endif
+
diff --git a/drivers/theora/video_stream_theora.h b/drivers/theora/video_stream_theora.h
index b408f9db13..5540f050f9 100644
--- a/drivers/theora/video_stream_theora.h
+++ b/drivers/theora/video_stream_theora.h
@@ -10,9 +10,9 @@
#include "io/resource_loader.h"
#include "scene/resources/video_stream.h"
-class VideoStreamTheora : public VideoStream {
+class VideoStreamPlaybackTheora : public VideoStreamPlayback {
- OBJ_TYPE(VideoStreamTheora, VideoStream);
+ OBJ_TYPE(VideoStreamPlaybackTheora, VideoStreamPlayback);
enum {
MAX_FRAMES = 4,
@@ -32,6 +32,7 @@ class VideoStreamTheora : public VideoStream {
void video_write(void);
float get_time() const;
+
ogg_sync_state oy;
ogg_page og;
ogg_stream_state vo;
@@ -58,16 +59,19 @@ class VideoStreamTheora : public VideoStream {
double last_update_time;
double time;
+ double delay_compensation;
-protected:
+ Ref<ImageTexture> texture;
- virtual UpdateMode get_update_mode() const;
- virtual void update();
+ AudioMixCallback mix_callback;
+ void* mix_udata;
- void clear();
+ int audio_track;
- virtual bool _can_mix() const;
+protected:
+ void clear();
+
public:
virtual void play();
@@ -92,17 +96,48 @@ public:
void set_file(const String& p_file);
- int get_pending_frame_count() const;
- Image pop_frame();
- Image peek_frame() const;
+ virtual Ref<Texture> get_texture();
+ virtual void update(float p_delta);
+
+ virtual void set_mix_callback(AudioMixCallback p_callback,void *p_userdata);
+ virtual int get_channels() const;
+ virtual int get_mix_rate() const;
+
+ virtual void set_audio_track(int p_idx);
+
+ VideoStreamPlaybackTheora();
+ ~VideoStreamPlaybackTheora();
+};
+
+
+
+class VideoStreamTheora : public VideoStream {
+
+ OBJ_TYPE(VideoStreamTheora,VideoStream);
+
+ String file;
+ int audio_track;
+
+
+public:
+
+ Ref<VideoStreamPlayback> instance_playback() {
+ Ref<VideoStreamPlaybackTheora> pb = memnew( VideoStreamPlaybackTheora );
+ pb->set_audio_track(audio_track);
+ pb->set_file(file);
+ return pb;
+ }
+
+ void set_file(const String& p_file) { file=p_file; }
+ void set_audio_track(int p_track) { audio_track=p_track; }
+
+ VideoStreamTheora() { audio_track=0; }
- VideoStreamTheora();
- ~VideoStreamTheora();
};
class ResourceFormatLoaderVideoStreamTheora : public ResourceFormatLoader {
public:
- virtual RES load(const String &p_path,const String& p_original_path="");
+ virtual RES load(const String &p_path,const String& p_original_path="",Error *r_error=NULL);
virtual void get_recognized_extensions(List<String> *p_extensions) const;
virtual bool handles_type(const String& p_type) const;
virtual String get_resource_type(const String &p_path) const;