forked from muaz-khan/WebRTC-Experiment
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmjr_video_mosaic.c
More file actions
1859 lines (1757 loc) · 77.3 KB
/
mjr_video_mosaic.c
File metadata and controls
1859 lines (1757 loc) · 77.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*! \file posst-processing.c
* \author Aboobeker Sidhik <aboosidhik@gmail.com>
* \copyright GNU General Public License v3
*/
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/cuda.hpp>
#include <opencv2/cudaimgproc.hpp>
extern "C"
{
#include <arpa/inet.h>
#ifdef __MACH__
#include <machine/endian.h>
#else
#include <endian.h>
#endif
#include <inttypes.h>
#include <string.h>
#include <stdlib.h>
#include <signal.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <glib.h>
#include <jansson.h>
#include <vpx/vpx_decoder.h>
#include <vpx/vp8dx.h>
}
using namespace std;
using namespace cv;
string type2str(int type) {
string r;
uchar depth = type & CV_MAT_DEPTH_MASK;
uchar chans = 1 + (type >> CV_CN_SHIFT);
switch ( depth ) {
case CV_8U: r = "8U"; break;
case CV_8S: r = "8S"; break;
case CV_16U: r = "16U"; break;
case CV_16S: r = "16S"; break;
case CV_32S: r = "32S"; break;
case CV_32F: r = "32F"; break;
case CV_64F: r = "64F"; break;
default: r = "User"; break;
}
r += "C";
r += (chans+'0');
return r;
}
#define fourcc 0x30395056
#define interface (&vpx_codec_vp9_dx_algo)
static int kVp9FrameMarker = 2;
static int kMinTileWidthB64 = 4;
static int kMaxTileWidthB64 = 64;
static int kRefFrames = 8;
static int kRefsPerFrame = 3;
static int kRefFrames_LOG2 = 3;
static int kVpxCsBt601 = 1;
static int kVpxCsSrgb = 7;
static int kVpxCrStudioRange = 0;
static int kVpxCrFullRange = 1;
static int kMiSizeLog2 = 3;
static int bit_depth_ = 0;
static int profile_ = -1;
static int show_existing_frame_ = 0;
static int key_ = 0;
static int altref_ = 0;
static int error_resilient_mode_ = 0;
static int intra_only_ = 0;
static int reset_frame_context_ = 0;
static int color_space_ = 0;
static int color_range_ = 0;
static int subsampling_x_ = 0;
static int subsampling_y_ = 0;
static int refresh_frame_flags_ = 0;
static int width_;
static int height_;
static int row_tiles_;
static int column_tiles_;
static int frame_parallel_mode_;
static int fm_count;
#define htonll(x) ((1==htonl(1)) ? (x) : ((gint64)htonl((x) & 0xFFFFFFFF) << 32) | htonl((x) >> 32))
#define ntohll(x) ((1==ntohl(1)) ? (x) : ((gint64)ntohl((x) & 0xFFFFFFFF) << 32) | ntohl((x) >> 32))
typedef struct janus_pp_rtp_header
{
#if __BYTE_ORDER == __BIG_ENDIAN
uint16_t version:2;
uint16_t padding:1;
uint16_t extension:1;
uint16_t csrccount:4;
uint16_t markerbit:1;
uint16_t type:7;
#elif __BYTE_ORDER == __LITTLE_ENDIAN
uint16_t csrccount:4;
uint16_t extension:1;
uint16_t padding:1;
uint16_t version:2;
uint16_t type:7;
uint16_t markerbit:1;
#endif
uint16_t seq_number;
uint32_t timestamp;
uint32_t ssrc;
uint32_t csrc[16];
} janus_pp_rtp_header;
typedef struct janus_pp_rtp_header_extension {
uint16_t type;
uint16_t length;
} janus_pp_rtp_header_extension;
typedef struct janus_pp_frame_packet {
uint16_t seq; /* RTP Sequence number */
uint64_t ts; /* RTP Timestamp */
uint16_t len; /* Length of the data */
int pt; /* Payload type of the data */
long offset; /* Offset of the data in the file */
int skip; /* Bytes to skip, besides the RTP header */
uint8_t drop; /* Whether this packet can be dropped (e.g., padding)*/
struct janus_pp_frame_packet *next;
struct janus_pp_frame_packet *prev;
} janus_pp_frame_packet;
typedef struct frame_packet {
AVFrame *frame;
AVPacket *pkt;
struct frame_packet *next;
struct frame_packet *prev;
} frame_packet;
typedef struct file_av {
char *source;
FILE *file;
long fsize;
long offset;
int opus ;
int vp9;
int count;
gboolean parsed_header;
janus_pp_frame_packet *list;
janus_pp_frame_packet *last;
gint64 c_time;
gint64 w_time;
uint32_t last_ts;
uint32_t reset;
AVCodecContext *codec_ctx;
AVCodec *codec_dec;
int times_resetted;
int numBytes;
uint8_t *received_frame;
uint8_t *buffer;
uint8_t *start;
int max_width, max_height, fps;
int min_ts_diff, max_ts_diff;
uint32_t post_reset_pkts;
int len, frameLen;
int audio_len;
int keyFrame;
uint32_t keyframe_ts;
int64_t audio_ts;
int audio_pts;
int video_pts;
int audio;
int video;
gchar *buf;
struct file_av *next;
struct file_av *prev;
}file_av;
typedef struct file_av_list {
size_t size;
struct file_av *head;
struct file_av *tail;
}file_av_list;
typedef struct file_combine {
int num;
char *audio_source;
char *video_source;
file_av_list *file_av_list_1;
struct file_combine *next;
struct file_combine *prev;
} file_combine;
typedef struct file_combine_list {
size_t size;
struct file_combine *head;
struct file_combine *tail;
}file_combine_list;
int janus_log_level = 4;
gboolean janus_log_timestamps = FALSE;
gboolean janus_log_colors = TRUE;
int working = 0;
/* Signal handler */
void janus_pp_handle_signal(int signum);
void janus_pp_handle_signal(int signum) {
working = 0;
}
/*! \file pp-webm.c
* \author Lorenzo Miniero <lorenzo@meetecho.com>
* \copyright GNU General Public License v3
* \brief Post-processing to generate .webm files
* \details Implementation of the post-processing code (based on FFmpeg)
* needed to generate .webm files out of VP8/VP9 RTP frames.
*
* \ingroup postprocessing
* \ref postprocessing
*/
/* WebRTC stuff (VP8/VP9) */
#if defined(__ppc__) || defined(__ppc64__)
# define swap2(d) \
((d&0x000000ff)<<8) | \
((d&0x0000ff00)>>8)
#else
# define swap2(d) d
#endif
#define LIBAVCODEC_VER_AT_LEAST(major, minor) \
(LIBAVCODEC_VERSION_MAJOR > major || \
(LIBAVCODEC_VERSION_MAJOR == major && \
LIBAVCODEC_VERSION_MINOR >= minor))
#if LIBAVCODEC_VER_AT_LEAST(51, 42)
#define PIX_FMT_YUV420P AV_PIX_FMT_YUV420P
#endif
/* WebM output */
static AVFormatContext *fctx;
static AVStream *vStream;
static AVStream *aStream;
static int max_width = 0, max_height = 0, fps = 0;
static AVRational audio_timebase;
static AVRational video_timebase;
static AVOutputFormat *fmt;
static AVCodec *audio_codec;
static AVCodec *video_codec;
static AVDictionary *opt_arg;
static AVCodecContext *context;
static AVCodecContext *video_context;
int janus_pp_webm_create(char *destination) {
if(destination == NULL)
return -1;
#if LIBAVCODEC_VERSION_MAJOR < 55
printf("Your FFmpeg version does not support VP9\n");
return -1;
#endif
/* Setup FFmpeg */
av_register_all();
avformat_alloc_output_context2(&fctx, NULL, NULL, destination);
if (!fctx) {
printf("Could not deduce output format from file extension: using WEBM.\n");
avformat_alloc_output_context2(&fctx, fmt, "webm", destination);
}
if (!fctx) {
return -1;
}
fmt = fctx->oformat;
audio_codec = avcodec_find_encoder(AV_CODEC_ID_OPUS);
video_codec = avcodec_find_encoder(AV_CODEC_ID_VP9);
vStream = avformat_new_stream(fctx, NULL);
aStream = avformat_new_stream(fctx, NULL);
if (!aStream) {
printf("Could not allocate audio stream\n");
return -1;
}
if (!vStream) {
printf("Could not allocate video stream\n");
return -1;
}
vStream->id = fctx->nb_streams-1;
aStream->id = fctx->nb_streams-1;
video_context = avcodec_alloc_context3(video_codec);
context = avcodec_alloc_context3(audio_codec);
if (!context) {
printf("Could not alloc an encoding context\n");
return -1;
}
if (!video_context) {
printf("Could not alloc an encoding context\n");
return -1;
}
context->codec_type = AVMEDIA_TYPE_AUDIO;
context->codec_id = AV_CODEC_ID_OPUS;
context->sample_fmt = AV_SAMPLE_FMT_S16;
context->bit_rate = 64000;
context->sample_rate = 48000;
context->channel_layout = AV_CH_LAYOUT_STEREO;
context->channels = 2;
aStream->time_base = (AVRational){ 1, context->sample_rate};
audio_timebase = (AVRational){ 1, context->sample_rate};
video_context->codec_type = AVMEDIA_TYPE_VIDEO;
video_context->codec_id = AV_CODEC_ID_VP9;
video_context->width = 1200;
video_context->height = 900;
vStream->time_base = (AVRational){1, 30};
video_timebase = (AVRational){1, 30};
video_context->time_base =vStream->time_base;
video_context->pix_fmt = AV_PIX_FMT_YUV420P;
/* Some formats want stream headers to be separate. */
if (fctx->oformat->flags & AVFMT_GLOBALHEADER) {
context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
video_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
int ret;
/* open it */
opt_arg = NULL;
ret = avcodec_open2(context, audio_codec, &opt_arg);
if (ret < 0) {
printf("Could not open audio codec\n");
return -1;
}
/*
ret = avcodec_open2(node->video_context, node->video_codec, &node->opt_arg);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
return NULL;
}
*/
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(vStream->codecpar, video_context);
if (ret < 0) {
printf("Could not copy the stream parameters\n");
return -1;
}
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(aStream->codecpar, context);
if (ret < 0) {
printf("Could not copy the stream parameters\n");
return -1;
}
av_dump_format(fctx, 0, destination, 1);
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&fctx->pb, destination, AVIO_FLAG_WRITE);
if (ret < 0) {
printf("Could not open file\n");
return -1;
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(fctx, &opt_arg);
if (ret < 0) {
printf("Error occurred when opening output file\n");
return -1;
}
/*
fctx = avformat_alloc_context();
if(fctx == NULL) {
printf( "Error allocating context\n");
return -1;
}
fctx->oformat = av_guess_format("webm", NULL, NULL);
if(fctx->oformat == NULL) {
printf( "Error guessing format\n");
return -1;
}
snprintf(fctx->filename, sizeof(fctx->filename), "%s", destination);
//~ vStream = av_new_stream(fctx, 0);
vStream = avformat_new_stream(fctx, 0);
if(vStream == NULL) {
printf("Error adding stream\n");
return -1;
}
//~ avcodec_get_context_defaults2(vStream->codec, CODEC_TYPE_VIDEO);
#if LIBAVCODEC_VER_AT_LEAST(53, 21)
avcodec_get_context_defaults3(vStream->codec, AVMEDIA_TYPE_VIDEO);
#else
avcodec_get_context_defaults2(vStream->codec, AVMEDIA_TYPE_VIDEO);
#endif
#if LIBAVCODEC_VER_AT_LEAST(54, 25)
#if LIBAVCODEC_VERSION_MAJOR >= 55
vStream->codec->codec_id = vp8 ? AV_CODEC_ID_VP8 : AV_CODEC_ID_VP9;
#else
vStream->codec->codec_id = AV_CODEC_ID_VP8;
#endif
#else
vStream->codec->codec_id = CODEC_ID_VP8;
#endif
//~ vStream->codec->codec_type = CODEC_TYPE_VIDEO;
vStream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vStream->codec->time_base = (AVRational){1, fps};
vStream->codec->width = max_width;
vStream->codec->height = max_height;
vStream->codec->pix_fmt = PIX_FMT_YUV420P;
if (fctx->flags & AVFMT_GLOBALHEADER)
vStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
//~ fctx->timestamp = 0;
//~ if(url_fopen(&fctx->pb, fctx->filename, URL_WRONLY) < 0) {
if(avio_open(&fctx->pb, fctx->filename, AVIO_FLAG_WRITE) < 0) {
printf( "Error opening file for output\n");
return -1;
}
//~ memset(¶meters, 0, sizeof(AVFormatParameters));
//~ av_set_parameters(fctx, ¶meters);
//~ fctx->preload = (int)(0.5 * AV_TIME_BASE);
//~ fctx->max_delay = (int)(0.7 * AV_TIME_BASE);
//~ if(av_write_header(fctx) < 0) {
if(avformat_write_header(fctx, NULL) < 0) {
printf( "Error writing header\n");
return -1;
}
*/
return 0;
}
int janus_pp_webm_preprocess(file_av *file_av_1) {
if(!file_av_1->file || !file_av_1->list)
return -1;
janus_pp_frame_packet *tmp = file_av_1->list;
int bytes = 0;
file_av_1->min_ts_diff = 0;
file_av_1->max_ts_diff = 0;
char prebuffer[1500];
memset(prebuffer, 0, 1500);
while(tmp) {
if(tmp == file_av_1->list || tmp->ts > tmp->prev->ts) {
if(tmp->prev != NULL && tmp->ts > tmp->prev->ts) {
int diff = tmp->ts - tmp->prev->ts;
if(file_av_1->min_ts_diff == 0 || file_av_1->min_ts_diff > diff)
file_av_1->min_ts_diff = diff;
if(file_av_1->max_ts_diff == 0 || file_av_1->max_ts_diff < diff)
file_av_1->max_ts_diff = diff;
}
if(tmp->prev != NULL && (tmp->seq - tmp->prev->seq > 1)) {
printf("Lost a packet here? (got seq %"SCNu16" after %"SCNu16", time ~%"SCNu64"s)\n",
tmp->seq, tmp->prev->seq, (tmp->ts-file_av_1->list->ts)/90000);
}
}
if(tmp->drop) {
/* We marked this packet as one to drop, before */
printf("Dropping previously marked video packet (time ~%"SCNu64"s)\n", (tmp->ts-file_av_1->list->ts)/90000);
tmp = tmp->next;
continue;
}
/* https://tools.ietf.org/html/draft-ietf-payload-vp9 */
/* Read the first bytes of the payload, and get the first octet (VP9 Payload Descriptor) */
fseek(file_av_1->file, tmp->offset+12+tmp->skip, SEEK_SET);
bytes = fread(prebuffer, sizeof(char), 16, file_av_1->file);
if(bytes != 16)
printf("Didn't manage to read all the bytes we needed (%d < 16)...\n", bytes);
char *buffer = (char *)&prebuffer;
uint8_t vp9pd = *buffer;
uint8_t ibit = (vp9pd & 0x80);
uint8_t pbit = (vp9pd & 0x40);
uint8_t lbit = (vp9pd & 0x20);
uint8_t fbit = (vp9pd & 0x10);
uint8_t vbit = (vp9pd & 0x02);
//printf("%" PRIu8 ",%" PRIu8 ",%" PRIu8 ",%" PRIu8 ",%" PRIu8 "\n", ibit,pbit,lbit,fbit,vbit);
buffer++;
if(ibit) {
/* Read the PictureID octet */
vp9pd = *buffer;
uint16_t picid = vp9pd, wholepicid = picid;
uint8_t mbit = (vp9pd & 0x80);
if(!mbit) {
buffer++;
} else {
memcpy(&picid, buffer, sizeof(uint16_t));
wholepicid = ntohs(picid);
picid = (wholepicid & 0x7FFF);
buffer += 2;
}
}
if(lbit) {
buffer++;
if(!fbit) {
/* Non-flexible mode, skip TL0PICIDX */
buffer++;
}
}
if(fbit && pbit) {
/* Skip reference indices */
uint8_t nbit = 1;
while(nbit) {
vp9pd = *buffer;
nbit = (vp9pd & 0x01);
buffer++;
}
}
if(vbit) {
/* Parse and skip SS */
vp9pd = *buffer;
uint n_s = (vp9pd & 0xE0) >> 5;
n_s++;
uint8_t ybit = (vp9pd & 0x10);
if(ybit) {
/* Iterate on all spatial layers and get resolution */
buffer++;
uint i=0;
for(i=0; i<n_s; i++) {
/* Width */
uint16_t *w = (uint16_t *)buffer;
int width = ntohs(*w);
buffer += 2;
/* Height */
uint16_t *h = (uint16_t *)buffer;
int height = ntohs(*h);
buffer += 2;
if(width > file_av_1->max_width)
file_av_1->max_width = width;
if(height > file_av_1->max_height)
file_av_1->max_height = height;
}
}
}
tmp = tmp->next;
}
int mean_ts = file_av_1->min_ts_diff; /* FIXME: was an actual mean, (max_ts_diff+min_ts_diff)/2; */
file_av_1->fps = (90000/(mean_ts > 0 ? mean_ts : 30));
printf( " -- %dx%d (fps [%d,%d] ~ %d)\n", file_av_1->max_width, file_av_1->max_height, file_av_1->min_ts_diff, file_av_1->max_ts_diff, file_av_1->fps);
if(file_av_1->max_width == 0 && file_av_1->max_height == 0) {
printf("No key frame?? assuming 640x480...\n");
file_av_1->max_width = 640;
file_av_1->max_height = 480;
}
if(file_av_1->fps == 0) {
printf("No fps?? assuming 1...\n");
file_av_1->fps = 1; /* Prevent divide by zero error */
}
return 0;
}
int ReadBit(const uint8_t* frame_, size_t frame_size_, size_t bit_offset_) {
const size_t off = bit_offset_;
const size_t byte_offset = off >> 3;
const int bit_shift = 7 - (int)(off & 0x7);
if (byte_offset < frame_size_) {
const int bit = (frame_[byte_offset] >> bit_shift) & 1;
bit_offset_++;
return bit;
} else {
return 0;
}
}
int VpxReadLiteral(int bits, const uint8_t* frame_, size_t frame_size_, size_t bit_offset_) {
int value = 0;
int bit;
for (bit = bits - 1; bit >= 0; --bit)
value |= ReadBit(frame_,frame_size_,bit_offset_) << bit;
return value;
}
int ValidateVp9SyncCode(const uint8_t* frame_, size_t frame_size_, size_t bit_offset_) {
const int sync_code_0 = VpxReadLiteral(8,frame_,frame_size_,bit_offset_);
const int sync_code_1 = VpxReadLiteral(8,frame_,frame_size_,bit_offset_);
const int sync_code_2 = VpxReadLiteral(8,frame_,frame_size_,bit_offset_);
return (sync_code_0 == 0x49 && sync_code_1 == 0x83 && sync_code_2 == 0x42);
}
void ParseColorSpace(const uint8_t* frame_, size_t frame_size_, size_t bit_offset_) {
bit_depth_ = 0;
if (profile_ >= 2)
bit_depth_ = ReadBit(frame_,frame_size_,bit_offset_) ? 12 : 10;
else
bit_depth_ = 8;
color_space_ = VpxReadLiteral(3,frame_,frame_size_,bit_offset_);
if (color_space_ != kVpxCsSrgb) {
color_range_ = ReadBit(frame_,frame_size_,bit_offset_);
if (profile_ == 1 || profile_ == 3) {
subsampling_x_ = ReadBit(frame_,frame_size_,bit_offset_);
subsampling_y_ = ReadBit(frame_,frame_size_,bit_offset_);
ReadBit(frame_,frame_size_,bit_offset_);
} else {
subsampling_y_ = subsampling_x_ = 1;
}
} else {
color_range_ = kVpxCrFullRange;
if (profile_ == 1 || profile_ == 3) {
subsampling_y_ = subsampling_x_ = 0;
ReadBit(frame_,frame_size_,bit_offset_);
}
}
}
void ParseFrameResolution(const uint8_t* frame_, size_t frame_size_, size_t bit_offset_) {
width_ = VpxReadLiteral(16,frame_,frame_size_,bit_offset_) + 1;
height_ = VpxReadLiteral(16,frame_,frame_size_,bit_offset_) + 1;
printf("width:%d, height:%d\n", width_,height_);
}
void ParseFrameParallelMode(const uint8_t* frame_, size_t frame_size_, size_t bit_offset_) {
if (ReadBit(frame_,frame_size_,bit_offset_)) {
VpxReadLiteral(16,frame_,frame_size_,bit_offset_); // display width
VpxReadLiteral(16,frame_,frame_size_,bit_offset_); // display height
}
if (!error_resilient_mode_) {
ReadBit(frame_,frame_size_,bit_offset_); // Consume refresh frame context
frame_parallel_mode_ = ReadBit(frame_,frame_size_,bit_offset_);
} else {
frame_parallel_mode_ = 1;
}
}
void SkipDeltaQ(const uint8_t* frame_, size_t frame_size_, size_t bit_offset_) {
if (ReadBit(frame_,frame_size_,bit_offset_))
VpxReadLiteral(4,frame_,frame_size_,bit_offset_);
}
int AlignPowerOfTwo(int value, int n) {
return (((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1));
}
void ParseTileInfo(const uint8_t* frame_, size_t frame_size_, size_t bit_offset_) {
VpxReadLiteral(2,frame_,frame_size_,bit_offset_); // Consume frame context index
// loopfilter
VpxReadLiteral(6,frame_,frame_size_,bit_offset_); // Consume filter level
VpxReadLiteral(3,frame_,frame_size_,bit_offset_); // Consume sharpness level
int mode_ref_delta_enabled = ReadBit(frame_,frame_size_,bit_offset_);
if (mode_ref_delta_enabled) {
int mode_ref_delta_update = ReadBit(frame_,frame_size_,bit_offset_);
if (mode_ref_delta_update) {
const int kMaxRefLFDeltas = 4;
int i;
for (i = 0; i < kMaxRefLFDeltas; ++i) {
if (ReadBit(frame_,frame_size_,bit_offset_))
VpxReadLiteral(7,frame_,frame_size_,bit_offset_); // Consume ref_deltas + sign
}
const int kMaxModeDeltas = 2;
for (i = 0; i < kMaxModeDeltas; ++i) {
if (ReadBit(frame_,frame_size_,bit_offset_))
VpxReadLiteral(7,frame_,frame_size_,bit_offset_); // Consume mode_delta + sign
}
}
}
// quantization
VpxReadLiteral(8,frame_,frame_size_,bit_offset_); // Consume base_q
SkipDeltaQ(frame_,frame_size_,bit_offset_); // y dc
SkipDeltaQ(frame_,frame_size_,bit_offset_); // uv ac
SkipDeltaQ(frame_,frame_size_,bit_offset_); // uv dc
// segmentation
int segmentation_enabled = ReadBit(frame_,frame_size_,bit_offset_);
if (!segmentation_enabled) {
const int aligned_width = AlignPowerOfTwo(width_, kMiSizeLog2);
const int mi_cols = aligned_width >> kMiSizeLog2;
const int aligned_mi_cols = AlignPowerOfTwo(mi_cols, kMiSizeLog2);
const int sb_cols = aligned_mi_cols >> 3; // to_sbs(mi_cols);
int min_log2_n_tiles, max_log2_n_tiles;
for (max_log2_n_tiles = 0;
(sb_cols >> max_log2_n_tiles) >= kMinTileWidthB64;
max_log2_n_tiles++) {
}
max_log2_n_tiles--;
if (max_log2_n_tiles < 0)
max_log2_n_tiles = 0;
for (min_log2_n_tiles = 0; (kMaxTileWidthB64 << min_log2_n_tiles) < sb_cols;
min_log2_n_tiles++) {
}
// columns
const int max_log2_tile_cols = max_log2_n_tiles;
const int min_log2_tile_cols = min_log2_n_tiles;
int max_ones = max_log2_tile_cols - min_log2_tile_cols;
int log2_tile_cols = min_log2_tile_cols;
while (max_ones-- && ReadBit(frame_,frame_size_,bit_offset_))
log2_tile_cols++;
// rows
int log2_tile_rows = ReadBit(frame_,frame_size_,bit_offset_);
if (log2_tile_rows)
log2_tile_rows += ReadBit(frame_,frame_size_,bit_offset_);
row_tiles_ = 1 << log2_tile_rows;
column_tiles_ = 1 << log2_tile_cols;
}
}
int ParseUncompressedHeader(const uint8_t* frame, size_t length) {
if (!frame || length == 0)
return 0;
const uint8_t* frame_ = frame;
size_t frame_size_ = length;
size_t bit_offset_ = 0;
int bits = 2;
int value = 0;
int bit;
for (bit = bits - 1; bit >= 0; --bit)
value |= ReadBit(frame_, frame_size_,bit_offset_) << bit;
const int frame_marker = value;
printf("marker:%d\n",value);
profile_ = ReadBit(frame_, frame_size_,bit_offset_);
profile_ |= ReadBit(frame_, frame_size_,bit_offset_) << 1;
if (profile_ > 2)
profile_ += ReadBit(frame_, frame_size_,bit_offset_);
show_existing_frame_ = ReadBit(frame_, frame_size_,bit_offset_);
key_ = !ReadBit(frame_, frame_size_,bit_offset_);
altref_ = !ReadBit(frame_, frame_size_,bit_offset_);
error_resilient_mode_ = ReadBit(frame_, frame_size_,bit_offset_);
if (key_) {
if (!ValidateVp9SyncCode(frame_, frame_size_,bit_offset_)) {
printf("Invalid Sync code!\n");
return 0;
}
//printf("frame_marker:%d, profile:%d\n", frame_marker,profile_);
ParseColorSpace(frame_,frame_size_,bit_offset_);
ParseFrameResolution(frame_,frame_size_,bit_offset_);
ParseFrameParallelMode(frame_,frame_size_,bit_offset_);
ParseTileInfo(frame_,frame_size_,bit_offset_);
} else {
intra_only_ = altref_ ? ReadBit(frame_,frame_size_,bit_offset_) : 0;
reset_frame_context_ = error_resilient_mode_ ? 0 : VpxReadLiteral(2,frame_,frame_size_,bit_offset_);
if (intra_only_) {
if (!ValidateVp9SyncCode(frame_,frame_size_,bit_offset_)) {
printf("Invalid Sync code!\n");
return 0;
}
if (profile_ > 0) {
ParseColorSpace(frame_,frame_size_,bit_offset_);
} else {
// NOTE: The intra-only frame header does not include the specification
// of either the color format or color sub-sampling in profile 0. VP9
// specifies that the default color format should be YUV 4:2:0 in this
// case (normative).
color_space_ = kVpxCsBt601;
color_range_ = kVpxCrStudioRange;
subsampling_y_ = subsampling_x_ = 1;
bit_depth_ = 8;
}
refresh_frame_flags_ = VpxReadLiteral(kRefFrames,frame_,frame_size_,bit_offset_);
ParseFrameResolution(frame_,frame_size_,bit_offset_);
} else {
refresh_frame_flags_ = VpxReadLiteral(kRefFrames,frame_,frame_size_,bit_offset_);
int i;
for ( i = 0; i < kRefsPerFrame; ++i) {
VpxReadLiteral(kRefFrames_LOG2,frame_,frame_size_,bit_offset_); // Consume ref.
ReadBit(frame_,frame_size_,bit_offset_); // Consume ref sign bias.
}
//printf(" else frame_marker:%d, profile:%d\n", frame_marker,profile_);
int found = 0;
for ( i = 0; i < kRefsPerFrame; ++i) {
if (ReadBit(frame_,frame_size_,bit_offset_)) {
// Found previous reference, width and height did not change since
// last frame.
found = 1;
break;
}
}
if (!found)
ParseFrameResolution(frame_,frame_size_,bit_offset_);
}
}
return 1;
}
int janus_pp_webm_process(file_combine_list *file_combine_list_1, int *working) {
if(!file_combine_list_1)
return -1;
FILE *f;
f = fopen("ss.h264", "wb");
//video decoding of all :
// 1) parse vp9 header and send to decoder for all files
//mux using cuda
// encode using h264 hardware
//mux audio and video into mp4 file
cv::VideoWriter out("output1.h264", CV_FOURCC('x','2', '6', '4'), 32, cv::Size(400,300));
if(!out.isOpened()) {
cout <<"Error! Unable to open video file for output." << std::endl;
exit(-1);
}
AVFormatContext *fctx_v;
AVStream *vStream_v;
int max_width_v = 0, max_height_v = 0, fps_v = 0;
AVRational video_timebase_v;
AVOutputFormat *fmt_v;
AVCodec *video_codec_v;
AVDictionary *opt_arg_v;
AVCodecContext *video_context_v;
char *destination = (char *)malloc(sizeof(char)*128);
destination = "ott.mp4";
/* Setup FFmpeg */
av_register_all();
avformat_alloc_output_context2(&fctx_v, NULL, NULL, destination);
if (!fctx_v) {
printf("Could not deduce output format from file extension: using WEBM.\n");
avformat_alloc_output_context2(&fctx_v, fmt, "mp4", destination);
}
if (!fctx_v) {
return -1;
}
fmt_v = fctx_v->oformat;
video_codec_v = avcodec_find_encoder(AV_CODEC_ID_H264);
vStream_v = avformat_new_stream(fctx_v, NULL);
if (!vStream_v) {
printf("Could not allocate video stream\n");
return -1;
}
vStream_v->id = fctx_v->nb_streams-1;
video_context_v = avcodec_alloc_context3(video_codec_v);
if (!video_context_v) {
printf("Could not alloc an encoding context\n");
return -1;
}
video_context_v->codec_type = AVMEDIA_TYPE_VIDEO;
video_context_v->codec_id = AV_CODEC_ID_H264;
video_context_v->width = 400;
video_context_v->height = 300;
vStream_v->time_base = (AVRational){1, 32};
video_timebase_v = (AVRational){1, 32};
video_context_v->time_base = vStream_v->time_base;
video_context_v->pix_fmt = AV_PIX_FMT_YUV420P;
video_context_v->gop_size = 10;
video_context_v->max_b_frames = 1;
/* Some formats want stream headers to be separate. */
if (fctx_v->oformat->flags & AVFMT_GLOBALHEADER) {
video_context_v->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
int ret;
opt_arg_v = NULL;
/*
ret = avcodec_open2(node->video_context, node->video_codec, &node->opt_arg);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
return NULL;
}
*/
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(vStream_v->codecpar, video_context_v);
if (ret < 0) {
printf("Could not copy the stream parameters\n");
return -1;
}
/* copy the stream parameters to the muxer */
av_dump_format(fctx_v, 0, destination, 1);
/* open the output file, if needed */
if (!(fmt_v->flags & AVFMT_NOFILE)) {
ret = avio_open(&fctx_v->pb, destination, AVIO_FLAG_WRITE);
if (ret < 0) {
printf("Could not open file\n");
return -1;
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(fctx_v, &opt_arg_v);
if (ret < 0) {
printf("Error occurred when opening output file\n");
return -1;
}
// video decoding
file_combine *file_combine_1 = file_combine_list_1->head;
int j,m;
int frame_cnt = 0;
for(j = 0; j <file_combine_list_1->size; j++) {
file_av *file_av_1 = file_combine_1->file_av_list_1->head;
for(m = 0; m < file_combine_1->file_av_list_1->size; m++) {
if(file_av_1->vp9 == 1) {
avcodec_register_all();
file_av_1->codec_dec = avcodec_find_decoder_by_name("libvpx-vp9");
AVCodec *codec_enc = avcodec_find_encoder(AV_CODEC_ID_H264);
file_av_1->codec_ctx = avcodec_alloc_context3(file_av_1->codec_dec);
AVCodecContext *codec_enc_ctx = avcodec_alloc_context3(codec_enc);
codec_enc_ctx->width = 400;
codec_enc_ctx->height = 300;
codec_enc_ctx->time_base = (AVRational){1, 32};
codec_enc_ctx->framerate = (AVRational){32,1};
codec_enc_ctx->gop_size = 10;
codec_enc_ctx->max_b_frames = 1;
codec_enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
//av_opt_set(codec_enc_ctx->priv_data, "preset", "slow", 0);
if (avcodec_open2(codec_enc_ctx, codec_enc, NULL) < 0)
exit(1);
if (avcodec_open2(file_av_1->codec_ctx, file_av_1->codec_dec, NULL) < 0)
exit(1);
AVPacket *pkt = av_packet_alloc();
if (!pkt)
exit(1);
AVPacket *pkt_enc = av_packet_alloc();
if (!pkt_enc)
exit(1);
AVFrame *frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
file_av_1->codec_ctx->width = 400;
file_av_1->codec_ctx->height = 300;
file_av_1->codec_ctx->framerate = (AVRational){32,1};
janus_pp_frame_packet *tmp = file_av_1->list;
file_av_1->numBytes = file_av_1->max_width*file_av_1->max_height*3;
file_av_1->received_frame = (uint8_t*)g_malloc0(file_av_1->numBytes);
file_av_1->buffer = (uint8_t*)g_malloc0(10000);
file_av_1->start = file_av_1->buffer;
uint8_t *buffer = (uint8_t*)g_malloc0(10000), *start = buffer;
file_av_1->len = 0, file_av_1->frameLen = 0;
file_av_1->audio_len = 0;
file_av_1->keyFrame = 0;
file_av_1->keyframe_ts = 0;
file_av_1->audio_ts = 0;
file_av_1->audio_pts = 0;
file_av_1->video_pts = 0;
file_av_1->audio = 0;
file_av_1->video = 0;
int bytes = 0;
while(*working && tmp != NULL) {
file_av_1->keyFrame = 0;
file_av_1->frameLen = 0;
file_av_1->len = 0;
file_av_1->audio_len = 0;
file_av_1->buf = (gchar*)g_malloc0(1000);
while(1) {
fm_count++;
if(tmp->drop) {
// Check if timestamp changes: marker bit is not mandatory, and may be lost as well
if(tmp->next == NULL || tmp->next->ts > tmp->ts)
break;
tmp = tmp->next;
continue;
}
// RTP payload
buffer = start;
file_av_1->buffer = file_av_1->start;
fseek(file_av_1->file, tmp->offset+12+tmp->skip, SEEK_SET);
file_av_1->len = tmp->len-12-tmp->skip;
bytes = fread(buffer, sizeof(char), file_av_1->len, file_av_1->file);
if(bytes != file_av_1->len)
printf("Didn't manage to read all the bytes we needed (%d < %d)...\n", bytes, file_av_1->len);
int skipped = 0;
uint8_t vp9pd = *buffer;
uint8_t ibit = (vp9pd & 0x80);
uint8_t pbit = (vp9pd & 0x40);
uint8_t lbit = (vp9pd & 0x20);
uint8_t fbit = (vp9pd & 0x10);
uint8_t vbit = (vp9pd & 0x02);
/* Move to the next octet and see what's there */
buffer++;
file_av_1->len--;
skipped++;
if(ibit) {
/* Read the PictureID octet */
vp9pd = *buffer;
uint16_t picid = vp9pd, wholepicid = picid;
uint8_t mbit = (vp9pd & 0x80);
if(!mbit) {
buffer++;
file_av_1->len--;
skipped++;
} else {
memcpy(&picid, buffer, sizeof(uint16_t));
wholepicid = ntohs(picid);
picid = (wholepicid & 0x7FFF);
buffer += 2;
file_av_1->len -= 2;
skipped += 2;
}
}
if(lbit) {
buffer++;
file_av_1->len--;
skipped++;
if(!fbit) {
/* Non-flexible mode, skip TL0PICIDX */
buffer++;
file_av_1->len--;
skipped++;
}