14282 lines
460 KiB
JavaScript
14282 lines
460 KiB
JavaScript
(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
|
||
|
||
},{}],2:[function(require,module,exports){
|
||
(function (global){
|
||
var topLevel = typeof global !== 'undefined' ? global :
|
||
typeof window !== 'undefined' ? window : {}
|
||
var minDoc = require('min-document');
|
||
|
||
var doccy;
|
||
|
||
if (typeof document !== 'undefined') {
|
||
doccy = document;
|
||
} else {
|
||
doccy = topLevel['__GLOBAL_DOCUMENT_CACHE@4'];
|
||
|
||
if (!doccy) {
|
||
doccy = topLevel['__GLOBAL_DOCUMENT_CACHE@4'] = minDoc;
|
||
}
|
||
}
|
||
|
||
module.exports = doccy;
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"min-document":1}],3:[function(require,module,exports){
|
||
(function (global){
|
||
var win;
|
||
|
||
if (typeof window !== "undefined") {
|
||
win = window;
|
||
} else if (typeof global !== "undefined") {
|
||
win = global;
|
||
} else if (typeof self !== "undefined"){
|
||
win = self;
|
||
} else {
|
||
win = {};
|
||
}
|
||
|
||
module.exports = win;
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{}],4:[function(require,module,exports){
|
||
/**
|
||
* mux.js
|
||
*
|
||
* Copyright (c) 2016 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* A stream-based aac to mp4 converter. This utility can be used to
|
||
* deliver mp4s to a SourceBuffer on platforms that support native
|
||
* Media Source Extensions.
|
||
*/
|
||
'use strict';
|
||
var Stream = require('../utils/stream.js');
|
||
|
||
// Constants
|
||
var AacStream;
|
||
|
||
/**
|
||
* Splits an incoming stream of binary data into ADTS and ID3 Frames.
|
||
*/
|
||
|
||
AacStream = function() {
|
||
var
|
||
everything = new Uint8Array(),
|
||
timeStamp = 0;
|
||
|
||
AacStream.prototype.init.call(this);
|
||
|
||
this.setTimestamp = function(timestamp) {
|
||
timeStamp = timestamp;
|
||
};
|
||
|
||
this.parseId3TagSize = function(header, byteIndex) {
|
||
var
|
||
returnSize = (header[byteIndex + 6] << 21) |
|
||
(header[byteIndex + 7] << 14) |
|
||
(header[byteIndex + 8] << 7) |
|
||
(header[byteIndex + 9]),
|
||
flags = header[byteIndex + 5],
|
||
footerPresent = (flags & 16) >> 4;
|
||
|
||
if (footerPresent) {
|
||
return returnSize + 20;
|
||
}
|
||
return returnSize + 10;
|
||
};
|
||
|
||
this.parseAdtsSize = function(header, byteIndex) {
|
||
var
|
||
lowThree = (header[byteIndex + 5] & 0xE0) >> 5,
|
||
middle = header[byteIndex + 4] << 3,
|
||
highTwo = header[byteIndex + 3] & 0x3 << 11;
|
||
|
||
return (highTwo | middle) | lowThree;
|
||
};
|
||
|
||
this.push = function(bytes) {
|
||
var
|
||
frameSize = 0,
|
||
byteIndex = 0,
|
||
bytesLeft,
|
||
chunk,
|
||
packet,
|
||
tempLength;
|
||
|
||
// If there are bytes remaining from the last segment, prepend them to the
|
||
// bytes that were pushed in
|
||
if (everything.length) {
|
||
tempLength = everything.length;
|
||
everything = new Uint8Array(bytes.byteLength + tempLength);
|
||
everything.set(everything.subarray(0, tempLength));
|
||
everything.set(bytes, tempLength);
|
||
} else {
|
||
everything = bytes;
|
||
}
|
||
|
||
while (everything.length - byteIndex >= 3) {
|
||
if ((everything[byteIndex] === 'I'.charCodeAt(0)) &&
|
||
(everything[byteIndex + 1] === 'D'.charCodeAt(0)) &&
|
||
(everything[byteIndex + 2] === '3'.charCodeAt(0))) {
|
||
|
||
// Exit early because we don't have enough to parse
|
||
// the ID3 tag header
|
||
if (everything.length - byteIndex < 10) {
|
||
break;
|
||
}
|
||
|
||
// check framesize
|
||
frameSize = this.parseId3TagSize(everything, byteIndex);
|
||
|
||
// Exit early if we don't have enough in the buffer
|
||
// to emit a full packet
|
||
if (frameSize > everything.length) {
|
||
break;
|
||
}
|
||
chunk = {
|
||
type: 'timed-metadata',
|
||
data: everything.subarray(byteIndex, byteIndex + frameSize)
|
||
};
|
||
this.trigger('data', chunk);
|
||
byteIndex += frameSize;
|
||
continue;
|
||
} else if ((everything[byteIndex] & 0xff === 0xff) &&
|
||
((everything[byteIndex + 1] & 0xf0) === 0xf0)) {
|
||
|
||
// Exit early because we don't have enough to parse
|
||
// the ADTS frame header
|
||
if (everything.length - byteIndex < 7) {
|
||
break;
|
||
}
|
||
|
||
frameSize = this.parseAdtsSize(everything, byteIndex);
|
||
|
||
// Exit early if we don't have enough in the buffer
|
||
// to emit a full packet
|
||
if (frameSize > everything.length) {
|
||
break;
|
||
}
|
||
|
||
packet = {
|
||
type: 'audio',
|
||
data: everything.subarray(byteIndex, byteIndex + frameSize),
|
||
pts: timeStamp,
|
||
dts: timeStamp
|
||
};
|
||
this.trigger('data', packet);
|
||
byteIndex += frameSize;
|
||
continue;
|
||
}
|
||
byteIndex++;
|
||
}
|
||
bytesLeft = everything.length - byteIndex;
|
||
|
||
if (bytesLeft > 0) {
|
||
everything = everything.subarray(byteIndex);
|
||
} else {
|
||
everything = new Uint8Array();
|
||
}
|
||
};
|
||
};
|
||
|
||
AacStream.prototype = new Stream();
|
||
|
||
module.exports = AacStream;
|
||
|
||
},{"../utils/stream.js":33}],5:[function(require,module,exports){
|
||
/**
|
||
* mux.js
|
||
*
|
||
* Copyright (c) 2016 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* Utilities to detect basic properties and metadata about Aac data.
|
||
*/
|
||
'use strict';
|
||
|
||
var ADTS_SAMPLING_FREQUENCIES = [
|
||
96000,
|
||
88200,
|
||
64000,
|
||
48000,
|
||
44100,
|
||
32000,
|
||
24000,
|
||
22050,
|
||
16000,
|
||
12000,
|
||
11025,
|
||
8000,
|
||
7350
|
||
];
|
||
|
||
var parseSyncSafeInteger = function(data) {
|
||
return (data[0] << 21) |
|
||
(data[1] << 14) |
|
||
(data[2] << 7) |
|
||
(data[3]);
|
||
};
|
||
|
||
// return a percent-encoded representation of the specified byte range
|
||
// @see http://en.wikipedia.org/wiki/Percent-encoding
|
||
var percentEncode = function(bytes, start, end) {
|
||
var i, result = '';
|
||
for (i = start; i < end; i++) {
|
||
result += '%' + ('00' + bytes[i].toString(16)).slice(-2);
|
||
}
|
||
return result;
|
||
};
|
||
|
||
// return the string representation of the specified byte range,
|
||
// interpreted as ISO-8859-1.
|
||
var parseIso88591 = function(bytes, start, end) {
|
||
return unescape(percentEncode(bytes, start, end)); // jshint ignore:line
|
||
};
|
||
|
||
var parseId3TagSize = function(header, byteIndex) {
|
||
var
|
||
returnSize = (header[byteIndex + 6] << 21) |
|
||
(header[byteIndex + 7] << 14) |
|
||
(header[byteIndex + 8] << 7) |
|
||
(header[byteIndex + 9]),
|
||
flags = header[byteIndex + 5],
|
||
footerPresent = (flags & 16) >> 4;
|
||
|
||
if (footerPresent) {
|
||
return returnSize + 20;
|
||
}
|
||
return returnSize + 10;
|
||
};
|
||
|
||
var parseAdtsSize = function(header, byteIndex) {
|
||
var
|
||
lowThree = (header[byteIndex + 5] & 0xE0) >> 5,
|
||
middle = header[byteIndex + 4] << 3,
|
||
highTwo = header[byteIndex + 3] & 0x3 << 11;
|
||
|
||
return (highTwo | middle) | lowThree;
|
||
};
|
||
|
||
var parseType = function(header, byteIndex) {
|
||
if ((header[byteIndex] === 'I'.charCodeAt(0)) &&
|
||
(header[byteIndex + 1] === 'D'.charCodeAt(0)) &&
|
||
(header[byteIndex + 2] === '3'.charCodeAt(0))) {
|
||
return 'timed-metadata';
|
||
} else if ((header[byteIndex] & 0xff === 0xff) &&
|
||
((header[byteIndex + 1] & 0xf0) === 0xf0)) {
|
||
return 'audio';
|
||
}
|
||
return null;
|
||
};
|
||
|
||
var parseSampleRate = function(packet) {
|
||
var i = 0;
|
||
|
||
while (i + 5 < packet.length) {
|
||
if (packet[i] !== 0xFF || (packet[i + 1] & 0xF6) !== 0xF0) {
|
||
// If a valid header was not found, jump one forward and attempt to
|
||
// find a valid ADTS header starting at the next byte
|
||
i++;
|
||
continue;
|
||
}
|
||
return ADTS_SAMPLING_FREQUENCIES[(packet[i + 2] & 0x3c) >>> 2];
|
||
}
|
||
|
||
return null;
|
||
};
|
||
|
||
var parseAacTimestamp = function(packet) {
|
||
var frameStart, frameSize, frame, frameHeader;
|
||
|
||
// find the start of the first frame and the end of the tag
|
||
frameStart = 10;
|
||
if (packet[5] & 0x40) {
|
||
// advance the frame start past the extended header
|
||
frameStart += 4; // header size field
|
||
frameStart += parseSyncSafeInteger(packet.subarray(10, 14));
|
||
}
|
||
|
||
// parse one or more ID3 frames
|
||
// http://id3.org/id3v2.3.0#ID3v2_frame_overview
|
||
do {
|
||
// determine the number of bytes in this frame
|
||
frameSize = parseSyncSafeInteger(packet.subarray(frameStart + 4, frameStart + 8));
|
||
if (frameSize < 1) {
|
||
return null;
|
||
}
|
||
frameHeader = String.fromCharCode(packet[frameStart],
|
||
packet[frameStart + 1],
|
||
packet[frameStart + 2],
|
||
packet[frameStart + 3]);
|
||
|
||
if (frameHeader === 'PRIV') {
|
||
frame = packet.subarray(frameStart + 10, frameStart + frameSize + 10);
|
||
|
||
for (var i = 0; i < frame.byteLength; i++) {
|
||
if (frame[i] === 0) {
|
||
var owner = parseIso88591(frame, 0, i);
|
||
if (owner === 'com.apple.streaming.transportStreamTimestamp') {
|
||
var d = frame.subarray(i + 1);
|
||
var size = ((d[3] & 0x01) << 30) |
|
||
(d[4] << 22) |
|
||
(d[5] << 14) |
|
||
(d[6] << 6) |
|
||
(d[7] >>> 2);
|
||
size *= 4;
|
||
size += d[7] & 0x03;
|
||
|
||
return size;
|
||
}
|
||
break;
|
||
}
|
||
}
|
||
}
|
||
|
||
frameStart += 10; // advance past the frame header
|
||
frameStart += frameSize; // advance past the frame body
|
||
} while (frameStart < packet.byteLength);
|
||
return null;
|
||
};
|
||
|
||
module.exports = {
|
||
parseId3TagSize: parseId3TagSize,
|
||
parseAdtsSize: parseAdtsSize,
|
||
parseType: parseType,
|
||
parseSampleRate: parseSampleRate,
|
||
parseAacTimestamp: parseAacTimestamp
|
||
};
|
||
|
||
},{}],6:[function(require,module,exports){
|
||
'use strict';
|
||
|
||
var Stream = require('../utils/stream.js');
|
||
|
||
var AdtsStream;
|
||
|
||
var
|
||
ADTS_SAMPLING_FREQUENCIES = [
|
||
96000,
|
||
88200,
|
||
64000,
|
||
48000,
|
||
44100,
|
||
32000,
|
||
24000,
|
||
22050,
|
||
16000,
|
||
12000,
|
||
11025,
|
||
8000,
|
||
7350
|
||
];
|
||
|
||
/*
|
||
* Accepts a ElementaryStream and emits data events with parsed
|
||
* AAC Audio Frames of the individual packets. Input audio in ADTS
|
||
* format is unpacked and re-emitted as AAC frames.
|
||
*
|
||
* @see http://wiki.multimedia.cx/index.php?title=ADTS
|
||
* @see http://wiki.multimedia.cx/?title=Understanding_AAC
|
||
*/
|
||
AdtsStream = function() {
|
||
var buffer;
|
||
|
||
AdtsStream.prototype.init.call(this);
|
||
|
||
this.push = function(packet) {
|
||
var
|
||
i = 0,
|
||
frameNum = 0,
|
||
frameLength,
|
||
protectionSkipBytes,
|
||
frameEnd,
|
||
oldBuffer,
|
||
sampleCount,
|
||
adtsFrameDuration;
|
||
|
||
if (packet.type !== 'audio') {
|
||
// ignore non-audio data
|
||
return;
|
||
}
|
||
|
||
// Prepend any data in the buffer to the input data so that we can parse
|
||
// aac frames the cross a PES packet boundary
|
||
if (buffer) {
|
||
oldBuffer = buffer;
|
||
buffer = new Uint8Array(oldBuffer.byteLength + packet.data.byteLength);
|
||
buffer.set(oldBuffer);
|
||
buffer.set(packet.data, oldBuffer.byteLength);
|
||
} else {
|
||
buffer = packet.data;
|
||
}
|
||
|
||
// unpack any ADTS frames which have been fully received
|
||
// for details on the ADTS header, see http://wiki.multimedia.cx/index.php?title=ADTS
|
||
while (i + 5 < buffer.length) {
|
||
|
||
// Loook for the start of an ADTS header..
|
||
if (buffer[i] !== 0xFF || (buffer[i + 1] & 0xF6) !== 0xF0) {
|
||
// If a valid header was not found, jump one forward and attempt to
|
||
// find a valid ADTS header starting at the next byte
|
||
i++;
|
||
continue;
|
||
}
|
||
|
||
// The protection skip bit tells us if we have 2 bytes of CRC data at the
|
||
// end of the ADTS header
|
||
protectionSkipBytes = (~buffer[i + 1] & 0x01) * 2;
|
||
|
||
// Frame length is a 13 bit integer starting 16 bits from the
|
||
// end of the sync sequence
|
||
frameLength = ((buffer[i + 3] & 0x03) << 11) |
|
||
(buffer[i + 4] << 3) |
|
||
((buffer[i + 5] & 0xe0) >> 5);
|
||
|
||
sampleCount = ((buffer[i + 6] & 0x03) + 1) * 1024;
|
||
adtsFrameDuration = (sampleCount * 90000) /
|
||
ADTS_SAMPLING_FREQUENCIES[(buffer[i + 2] & 0x3c) >>> 2];
|
||
|
||
frameEnd = i + frameLength;
|
||
|
||
// If we don't have enough data to actually finish this ADTS frame, return
|
||
// and wait for more data
|
||
if (buffer.byteLength < frameEnd) {
|
||
return;
|
||
}
|
||
|
||
// Otherwise, deliver the complete AAC frame
|
||
this.trigger('data', {
|
||
pts: packet.pts + (frameNum * adtsFrameDuration),
|
||
dts: packet.dts + (frameNum * adtsFrameDuration),
|
||
sampleCount: sampleCount,
|
||
audioobjecttype: ((buffer[i + 2] >>> 6) & 0x03) + 1,
|
||
channelcount: ((buffer[i + 2] & 1) << 2) |
|
||
((buffer[i + 3] & 0xc0) >>> 6),
|
||
samplerate: ADTS_SAMPLING_FREQUENCIES[(buffer[i + 2] & 0x3c) >>> 2],
|
||
samplingfrequencyindex: (buffer[i + 2] & 0x3c) >>> 2,
|
||
// assume ISO/IEC 14496-12 AudioSampleEntry default of 16
|
||
samplesize: 16,
|
||
data: buffer.subarray(i + 7 + protectionSkipBytes, frameEnd)
|
||
});
|
||
|
||
// If the buffer is empty, clear it and return
|
||
if (buffer.byteLength === frameEnd) {
|
||
buffer = undefined;
|
||
return;
|
||
}
|
||
|
||
frameNum++;
|
||
|
||
// Remove the finished frame from the buffer and start the process again
|
||
buffer = buffer.subarray(frameEnd);
|
||
}
|
||
};
|
||
this.flush = function() {
|
||
this.trigger('done');
|
||
};
|
||
};
|
||
|
||
AdtsStream.prototype = new Stream();
|
||
|
||
module.exports = AdtsStream;
|
||
|
||
},{"../utils/stream.js":33}],7:[function(require,module,exports){
|
||
'use strict';
|
||
|
||
var Stream = require('../utils/stream.js');
|
||
var ExpGolomb = require('../utils/exp-golomb.js');
|
||
|
||
var H264Stream, NalByteStream;
|
||
var PROFILES_WITH_OPTIONAL_SPS_DATA;
|
||
|
||
/**
|
||
* Accepts a NAL unit byte stream and unpacks the embedded NAL units.
|
||
*/
|
||
NalByteStream = function() {
|
||
var
|
||
syncPoint = 0,
|
||
i,
|
||
buffer;
|
||
NalByteStream.prototype.init.call(this);
|
||
|
||
this.push = function(data) {
|
||
var swapBuffer;
|
||
|
||
if (!buffer) {
|
||
buffer = data.data;
|
||
} else {
|
||
swapBuffer = new Uint8Array(buffer.byteLength + data.data.byteLength);
|
||
swapBuffer.set(buffer);
|
||
swapBuffer.set(data.data, buffer.byteLength);
|
||
buffer = swapBuffer;
|
||
}
|
||
|
||
// Rec. ITU-T H.264, Annex B
|
||
// scan for NAL unit boundaries
|
||
|
||
// a match looks like this:
|
||
// 0 0 1 .. NAL .. 0 0 1
|
||
// ^ sync point ^ i
|
||
// or this:
|
||
// 0 0 1 .. NAL .. 0 0 0
|
||
// ^ sync point ^ i
|
||
|
||
// advance the sync point to a NAL start, if necessary
|
||
for (; syncPoint < buffer.byteLength - 3; syncPoint++) {
|
||
if (buffer[syncPoint + 2] === 1) {
|
||
// the sync point is properly aligned
|
||
i = syncPoint + 5;
|
||
break;
|
||
}
|
||
}
|
||
|
||
while (i < buffer.byteLength) {
|
||
// look at the current byte to determine if we've hit the end of
|
||
// a NAL unit boundary
|
||
switch (buffer[i]) {
|
||
case 0:
|
||
// skip past non-sync sequences
|
||
if (buffer[i - 1] !== 0) {
|
||
i += 2;
|
||
break;
|
||
} else if (buffer[i - 2] !== 0) {
|
||
i++;
|
||
break;
|
||
}
|
||
|
||
// deliver the NAL unit if it isn't empty
|
||
if (syncPoint + 3 !== i - 2) {
|
||
this.trigger('data', buffer.subarray(syncPoint + 3, i - 2));
|
||
}
|
||
|
||
// drop trailing zeroes
|
||
do {
|
||
i++;
|
||
} while (buffer[i] !== 1 && i < buffer.length);
|
||
syncPoint = i - 2;
|
||
i += 3;
|
||
break;
|
||
case 1:
|
||
// skip past non-sync sequences
|
||
if (buffer[i - 1] !== 0 ||
|
||
buffer[i - 2] !== 0) {
|
||
i += 3;
|
||
break;
|
||
}
|
||
|
||
// deliver the NAL unit
|
||
this.trigger('data', buffer.subarray(syncPoint + 3, i - 2));
|
||
syncPoint = i - 2;
|
||
i += 3;
|
||
break;
|
||
default:
|
||
// the current byte isn't a one or zero, so it cannot be part
|
||
// of a sync sequence
|
||
i += 3;
|
||
break;
|
||
}
|
||
}
|
||
// filter out the NAL units that were delivered
|
||
buffer = buffer.subarray(syncPoint);
|
||
i -= syncPoint;
|
||
syncPoint = 0;
|
||
};
|
||
|
||
this.flush = function() {
|
||
// deliver the last buffered NAL unit
|
||
if (buffer && buffer.byteLength > 3) {
|
||
this.trigger('data', buffer.subarray(syncPoint + 3));
|
||
}
|
||
// reset the stream state
|
||
buffer = null;
|
||
syncPoint = 0;
|
||
this.trigger('done');
|
||
};
|
||
};
|
||
NalByteStream.prototype = new Stream();
|
||
|
||
// values of profile_idc that indicate additional fields are included in the SPS
|
||
// see Recommendation ITU-T H.264 (4/2013),
|
||
// 7.3.2.1.1 Sequence parameter set data syntax
|
||
PROFILES_WITH_OPTIONAL_SPS_DATA = {
|
||
100: true,
|
||
110: true,
|
||
122: true,
|
||
244: true,
|
||
44: true,
|
||
83: true,
|
||
86: true,
|
||
118: true,
|
||
128: true,
|
||
138: true,
|
||
139: true,
|
||
134: true
|
||
};
|
||
|
||
/**
|
||
* Accepts input from a ElementaryStream and produces H.264 NAL unit data
|
||
* events.
|
||
*/
|
||
H264Stream = function() {
|
||
var
|
||
nalByteStream = new NalByteStream(),
|
||
self,
|
||
trackId,
|
||
currentPts,
|
||
currentDts,
|
||
|
||
discardEmulationPreventionBytes,
|
||
readSequenceParameterSet,
|
||
skipScalingList;
|
||
|
||
H264Stream.prototype.init.call(this);
|
||
self = this;
|
||
|
||
this.push = function(packet) {
|
||
if (packet.type !== 'video') {
|
||
return;
|
||
}
|
||
trackId = packet.trackId;
|
||
currentPts = packet.pts;
|
||
currentDts = packet.dts;
|
||
|
||
nalByteStream.push(packet);
|
||
};
|
||
|
||
nalByteStream.on('data', function(data) {
|
||
var
|
||
event = {
|
||
trackId: trackId,
|
||
pts: currentPts,
|
||
dts: currentDts,
|
||
data: data
|
||
};
|
||
|
||
switch (data[0] & 0x1f) {
|
||
case 0x05:
|
||
event.nalUnitType = 'slice_layer_without_partitioning_rbsp_idr';
|
||
break;
|
||
case 0x06:
|
||
event.nalUnitType = 'sei_rbsp';
|
||
event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1));
|
||
break;
|
||
case 0x07:
|
||
event.nalUnitType = 'seq_parameter_set_rbsp';
|
||
event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1));
|
||
event.config = readSequenceParameterSet(event.escapedRBSP);
|
||
break;
|
||
case 0x08:
|
||
event.nalUnitType = 'pic_parameter_set_rbsp';
|
||
break;
|
||
case 0x09:
|
||
event.nalUnitType = 'access_unit_delimiter_rbsp';
|
||
break;
|
||
|
||
default:
|
||
break;
|
||
}
|
||
self.trigger('data', event);
|
||
});
|
||
nalByteStream.on('done', function() {
|
||
self.trigger('done');
|
||
});
|
||
|
||
this.flush = function() {
|
||
nalByteStream.flush();
|
||
};
|
||
|
||
/**
|
||
* Advance the ExpGolomb decoder past a scaling list. The scaling
|
||
* list is optionally transmitted as part of a sequence parameter
|
||
* set and is not relevant to transmuxing.
|
||
* @param count {number} the number of entries in this scaling list
|
||
* @param expGolombDecoder {object} an ExpGolomb pointed to the
|
||
* start of a scaling list
|
||
* @see Recommendation ITU-T H.264, Section 7.3.2.1.1.1
|
||
*/
|
||
skipScalingList = function(count, expGolombDecoder) {
|
||
var
|
||
lastScale = 8,
|
||
nextScale = 8,
|
||
j,
|
||
deltaScale;
|
||
|
||
for (j = 0; j < count; j++) {
|
||
if (nextScale !== 0) {
|
||
deltaScale = expGolombDecoder.readExpGolomb();
|
||
nextScale = (lastScale + deltaScale + 256) % 256;
|
||
}
|
||
|
||
lastScale = (nextScale === 0) ? lastScale : nextScale;
|
||
}
|
||
};
|
||
|
||
/**
|
||
* Expunge any "Emulation Prevention" bytes from a "Raw Byte
|
||
* Sequence Payload"
|
||
* @param data {Uint8Array} the bytes of a RBSP from a NAL
|
||
* unit
|
||
* @return {Uint8Array} the RBSP without any Emulation
|
||
* Prevention Bytes
|
||
*/
|
||
discardEmulationPreventionBytes = function(data) {
|
||
var
|
||
length = data.byteLength,
|
||
emulationPreventionBytesPositions = [],
|
||
i = 1,
|
||
newLength, newData;
|
||
|
||
// Find all `Emulation Prevention Bytes`
|
||
while (i < length - 2) {
|
||
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {
|
||
emulationPreventionBytesPositions.push(i + 2);
|
||
i += 2;
|
||
} else {
|
||
i++;
|
||
}
|
||
}
|
||
|
||
// If no Emulation Prevention Bytes were found just return the original
|
||
// array
|
||
if (emulationPreventionBytesPositions.length === 0) {
|
||
return data;
|
||
}
|
||
|
||
// Create a new array to hold the NAL unit data
|
||
newLength = length - emulationPreventionBytesPositions.length;
|
||
newData = new Uint8Array(newLength);
|
||
var sourceIndex = 0;
|
||
|
||
for (i = 0; i < newLength; sourceIndex++, i++) {
|
||
if (sourceIndex === emulationPreventionBytesPositions[0]) {
|
||
// Skip this byte
|
||
sourceIndex++;
|
||
// Remove this position index
|
||
emulationPreventionBytesPositions.shift();
|
||
}
|
||
newData[i] = data[sourceIndex];
|
||
}
|
||
|
||
return newData;
|
||
};
|
||
|
||
/**
|
||
* Read a sequence parameter set and return some interesting video
|
||
* properties. A sequence parameter set is the H264 metadata that
|
||
* describes the properties of upcoming video frames.
|
||
* @param data {Uint8Array} the bytes of a sequence parameter set
|
||
* @return {object} an object with configuration parsed from the
|
||
* sequence parameter set, including the dimensions of the
|
||
* associated video frames.
|
||
*/
|
||
readSequenceParameterSet = function(data) {
|
||
var
|
||
frameCropLeftOffset = 0,
|
||
frameCropRightOffset = 0,
|
||
frameCropTopOffset = 0,
|
||
frameCropBottomOffset = 0,
|
||
sarScale = 1,
|
||
expGolombDecoder, profileIdc, levelIdc, profileCompatibility,
|
||
chromaFormatIdc, picOrderCntType,
|
||
numRefFramesInPicOrderCntCycle, picWidthInMbsMinus1,
|
||
picHeightInMapUnitsMinus1,
|
||
frameMbsOnlyFlag,
|
||
scalingListCount,
|
||
sarRatio,
|
||
aspectRatioIdc,
|
||
i;
|
||
|
||
expGolombDecoder = new ExpGolomb(data);
|
||
profileIdc = expGolombDecoder.readUnsignedByte(); // profile_idc
|
||
profileCompatibility = expGolombDecoder.readUnsignedByte(); // constraint_set[0-5]_flag
|
||
levelIdc = expGolombDecoder.readUnsignedByte(); // level_idc u(8)
|
||
expGolombDecoder.skipUnsignedExpGolomb(); // seq_parameter_set_id
|
||
|
||
// some profiles have more optional data we don't need
|
||
if (PROFILES_WITH_OPTIONAL_SPS_DATA[profileIdc]) {
|
||
chromaFormatIdc = expGolombDecoder.readUnsignedExpGolomb();
|
||
if (chromaFormatIdc === 3) {
|
||
expGolombDecoder.skipBits(1); // separate_colour_plane_flag
|
||
}
|
||
expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_luma_minus8
|
||
expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_chroma_minus8
|
||
expGolombDecoder.skipBits(1); // qpprime_y_zero_transform_bypass_flag
|
||
if (expGolombDecoder.readBoolean()) { // seq_scaling_matrix_present_flag
|
||
scalingListCount = (chromaFormatIdc !== 3) ? 8 : 12;
|
||
for (i = 0; i < scalingListCount; i++) {
|
||
if (expGolombDecoder.readBoolean()) { // seq_scaling_list_present_flag[ i ]
|
||
if (i < 6) {
|
||
skipScalingList(16, expGolombDecoder);
|
||
} else {
|
||
skipScalingList(64, expGolombDecoder);
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
expGolombDecoder.skipUnsignedExpGolomb(); // log2_max_frame_num_minus4
|
||
picOrderCntType = expGolombDecoder.readUnsignedExpGolomb();
|
||
|
||
if (picOrderCntType === 0) {
|
||
expGolombDecoder.readUnsignedExpGolomb(); // log2_max_pic_order_cnt_lsb_minus4
|
||
} else if (picOrderCntType === 1) {
|
||
expGolombDecoder.skipBits(1); // delta_pic_order_always_zero_flag
|
||
expGolombDecoder.skipExpGolomb(); // offset_for_non_ref_pic
|
||
expGolombDecoder.skipExpGolomb(); // offset_for_top_to_bottom_field
|
||
numRefFramesInPicOrderCntCycle = expGolombDecoder.readUnsignedExpGolomb();
|
||
for (i = 0; i < numRefFramesInPicOrderCntCycle; i++) {
|
||
expGolombDecoder.skipExpGolomb(); // offset_for_ref_frame[ i ]
|
||
}
|
||
}
|
||
|
||
expGolombDecoder.skipUnsignedExpGolomb(); // max_num_ref_frames
|
||
expGolombDecoder.skipBits(1); // gaps_in_frame_num_value_allowed_flag
|
||
|
||
picWidthInMbsMinus1 = expGolombDecoder.readUnsignedExpGolomb();
|
||
picHeightInMapUnitsMinus1 = expGolombDecoder.readUnsignedExpGolomb();
|
||
|
||
frameMbsOnlyFlag = expGolombDecoder.readBits(1);
|
||
if (frameMbsOnlyFlag === 0) {
|
||
expGolombDecoder.skipBits(1); // mb_adaptive_frame_field_flag
|
||
}
|
||
|
||
expGolombDecoder.skipBits(1); // direct_8x8_inference_flag
|
||
if (expGolombDecoder.readBoolean()) { // frame_cropping_flag
|
||
frameCropLeftOffset = expGolombDecoder.readUnsignedExpGolomb();
|
||
frameCropRightOffset = expGolombDecoder.readUnsignedExpGolomb();
|
||
frameCropTopOffset = expGolombDecoder.readUnsignedExpGolomb();
|
||
frameCropBottomOffset = expGolombDecoder.readUnsignedExpGolomb();
|
||
}
|
||
if (expGolombDecoder.readBoolean()) {
|
||
// vui_parameters_present_flag
|
||
if (expGolombDecoder.readBoolean()) {
|
||
// aspect_ratio_info_present_flag
|
||
aspectRatioIdc = expGolombDecoder.readUnsignedByte();
|
||
switch (aspectRatioIdc) {
|
||
case 1: sarRatio = [1, 1]; break;
|
||
case 2: sarRatio = [12, 11]; break;
|
||
case 3: sarRatio = [10, 11]; break;
|
||
case 4: sarRatio = [16, 11]; break;
|
||
case 5: sarRatio = [40, 33]; break;
|
||
case 6: sarRatio = [24, 11]; break;
|
||
case 7: sarRatio = [20, 11]; break;
|
||
case 8: sarRatio = [32, 11]; break;
|
||
case 9: sarRatio = [80, 33]; break;
|
||
case 10: sarRatio = [18, 11]; break;
|
||
case 11: sarRatio = [15, 11]; break;
|
||
case 12: sarRatio = [64, 33]; break;
|
||
case 13: sarRatio = [160, 99]; break;
|
||
case 14: sarRatio = [4, 3]; break;
|
||
case 15: sarRatio = [3, 2]; break;
|
||
case 16: sarRatio = [2, 1]; break;
|
||
case 255: {
|
||
sarRatio = [expGolombDecoder.readUnsignedByte() << 8 |
|
||
expGolombDecoder.readUnsignedByte(),
|
||
expGolombDecoder.readUnsignedByte() << 8 |
|
||
expGolombDecoder.readUnsignedByte() ];
|
||
break;
|
||
}
|
||
}
|
||
if (sarRatio) {
|
||
sarScale = sarRatio[0] / sarRatio[1];
|
||
}
|
||
}
|
||
}
|
||
return {
|
||
profileIdc: profileIdc,
|
||
levelIdc: levelIdc,
|
||
profileCompatibility: profileCompatibility,
|
||
width: Math.ceil((((picWidthInMbsMinus1 + 1) * 16) - frameCropLeftOffset * 2 - frameCropRightOffset * 2) * sarScale),
|
||
height: ((2 - frameMbsOnlyFlag) * (picHeightInMapUnitsMinus1 + 1) * 16) - (frameCropTopOffset * 2) - (frameCropBottomOffset * 2)
|
||
};
|
||
};
|
||
|
||
};
|
||
H264Stream.prototype = new Stream();
|
||
|
||
module.exports = {
|
||
H264Stream: H264Stream,
|
||
NalByteStream: NalByteStream
|
||
};
|
||
|
||
},{"../utils/exp-golomb.js":32,"../utils/stream.js":33}],8:[function(require,module,exports){
|
||
module.exports = {
|
||
adts: require('./adts'),
|
||
h264: require('./h264')
|
||
};
|
||
|
||
},{"./adts":6,"./h264":7}],9:[function(require,module,exports){
|
||
var highPrefix = [33, 16, 5, 32, 164, 27];
|
||
var lowPrefix = [33, 65, 108, 84, 1, 2, 4, 8, 168, 2, 4, 8, 17, 191, 252];
|
||
var zeroFill = function(count) {
|
||
var a = [];
|
||
while (count--) {
|
||
a.push(0);
|
||
}
|
||
return a;
|
||
};
|
||
|
||
var makeTable = function(metaTable) {
|
||
return Object.keys(metaTable).reduce(function(obj, key) {
|
||
obj[key] = new Uint8Array(metaTable[key].reduce(function(arr, part) {
|
||
return arr.concat(part);
|
||
}, []));
|
||
return obj;
|
||
}, {});
|
||
};
|
||
|
||
// Frames-of-silence to use for filling in missing AAC frames
|
||
var coneOfSilence = {
|
||
96000: [highPrefix, [227, 64], zeroFill(154), [56]],
|
||
88200: [highPrefix, [231], zeroFill(170), [56]],
|
||
64000: [highPrefix, [248, 192], zeroFill(240), [56]],
|
||
48000: [highPrefix, [255, 192], zeroFill(268), [55, 148, 128], zeroFill(54), [112]],
|
||
44100: [highPrefix, [255, 192], zeroFill(268), [55, 163, 128], zeroFill(84), [112]],
|
||
32000: [highPrefix, [255, 192], zeroFill(268), [55, 234], zeroFill(226), [112]],
|
||
24000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 112], zeroFill(126), [224]],
|
||
16000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 255], zeroFill(269), [223, 108], zeroFill(195), [1, 192]],
|
||
12000: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 253, 128], zeroFill(259), [56]],
|
||
11025: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 255, 192], zeroFill(268), [55, 175, 128], zeroFill(108), [112]],
|
||
8000: [lowPrefix, zeroFill(268), [3, 121, 16], zeroFill(47), [7]]
|
||
};
|
||
|
||
module.exports = makeTable(coneOfSilence);
|
||
|
||
},{}],10:[function(require,module,exports){
|
||
'use strict';
|
||
|
||
var Stream = require('../utils/stream.js');
|
||
|
||
/**
|
||
* The final stage of the transmuxer that emits the flv tags
|
||
* for audio, video, and metadata. Also tranlates in time and
|
||
* outputs caption data and id3 cues.
|
||
*/
|
||
var CoalesceStream = function(options) {
|
||
// Number of Tracks per output segment
|
||
// If greater than 1, we combine multiple
|
||
// tracks into a single segment
|
||
this.numberOfTracks = 0;
|
||
this.metadataStream = options.metadataStream;
|
||
|
||
this.videoTags = [];
|
||
this.audioTags = [];
|
||
this.videoTrack = null;
|
||
this.audioTrack = null;
|
||
this.pendingCaptions = [];
|
||
this.pendingMetadata = [];
|
||
this.pendingTracks = 0;
|
||
this.processedTracks = 0;
|
||
|
||
CoalesceStream.prototype.init.call(this);
|
||
|
||
// Take output from multiple
|
||
this.push = function(output) {
|
||
// buffer incoming captions until the associated video segment
|
||
// finishes
|
||
if (output.text) {
|
||
return this.pendingCaptions.push(output);
|
||
}
|
||
// buffer incoming id3 tags until the final flush
|
||
if (output.frames) {
|
||
return this.pendingMetadata.push(output);
|
||
}
|
||
|
||
if (output.track.type === 'video') {
|
||
this.videoTrack = output.track;
|
||
this.videoTags = output.tags;
|
||
this.pendingTracks++;
|
||
}
|
||
if (output.track.type === 'audio') {
|
||
this.audioTrack = output.track;
|
||
this.audioTags = output.tags;
|
||
this.pendingTracks++;
|
||
}
|
||
};
|
||
};
|
||
|
||
CoalesceStream.prototype = new Stream();
|
||
CoalesceStream.prototype.flush = function(flushSource) {
|
||
var
|
||
id3,
|
||
caption,
|
||
i,
|
||
timelineStartPts,
|
||
event = {
|
||
tags: {},
|
||
captions: [],
|
||
captionStreams: {},
|
||
metadata: []
|
||
};
|
||
|
||
if (this.pendingTracks < this.numberOfTracks) {
|
||
if (flushSource !== 'VideoSegmentStream' &&
|
||
flushSource !== 'AudioSegmentStream') {
|
||
// Return because we haven't received a flush from a data-generating
|
||
// portion of the segment (meaning that we have only recieved meta-data
|
||
// or captions.)
|
||
return;
|
||
} else if (this.pendingTracks === 0) {
|
||
// In the case where we receive a flush without any data having been
|
||
// received we consider it an emitted track for the purposes of coalescing
|
||
// `done` events.
|
||
// We do this for the case where there is an audio and video track in the
|
||
// segment but no audio data. (seen in several playlists with alternate
|
||
// audio tracks and no audio present in the main TS segments.)
|
||
this.processedTracks++;
|
||
|
||
if (this.processedTracks < this.numberOfTracks) {
|
||
return;
|
||
}
|
||
}
|
||
}
|
||
|
||
this.processedTracks += this.pendingTracks;
|
||
this.pendingTracks = 0;
|
||
|
||
if (this.processedTracks < this.numberOfTracks) {
|
||
return;
|
||
}
|
||
|
||
if (this.videoTrack) {
|
||
timelineStartPts = this.videoTrack.timelineStartInfo.pts;
|
||
} else if (this.audioTrack) {
|
||
timelineStartPts = this.audioTrack.timelineStartInfo.pts;
|
||
}
|
||
|
||
event.tags.videoTags = this.videoTags;
|
||
event.tags.audioTags = this.audioTags;
|
||
|
||
// Translate caption PTS times into second offsets into the
|
||
// video timeline for the segment, and add track info
|
||
for (i = 0; i < this.pendingCaptions.length; i++) {
|
||
caption = this.pendingCaptions[i];
|
||
caption.startTime = caption.startPts - timelineStartPts;
|
||
caption.startTime /= 90e3;
|
||
caption.endTime = caption.endPts - timelineStartPts;
|
||
caption.endTime /= 90e3;
|
||
event.captionStreams[caption.stream] = true;
|
||
event.captions.push(caption);
|
||
}
|
||
|
||
// Translate ID3 frame PTS times into second offsets into the
|
||
// video timeline for the segment
|
||
for (i = 0; i < this.pendingMetadata.length; i++) {
|
||
id3 = this.pendingMetadata[i];
|
||
id3.cueTime = id3.pts - timelineStartPts;
|
||
id3.cueTime /= 90e3;
|
||
event.metadata.push(id3);
|
||
}
|
||
// We add this to every single emitted segment even though we only need
|
||
// it for the first
|
||
event.metadata.dispatchType = this.metadataStream.dispatchType;
|
||
|
||
// Reset stream state
|
||
this.videoTrack = null;
|
||
this.audioTrack = null;
|
||
this.videoTags = [];
|
||
this.audioTags = [];
|
||
this.pendingCaptions.length = 0;
|
||
this.pendingMetadata.length = 0;
|
||
this.pendingTracks = 0;
|
||
this.processedTracks = 0;
|
||
|
||
// Emit the final segment
|
||
this.trigger('data', event);
|
||
|
||
this.trigger('done');
|
||
};
|
||
|
||
module.exports = CoalesceStream;
|
||
|
||
},{"../utils/stream.js":33}],11:[function(require,module,exports){
|
||
'use strict';
|
||
|
||
var FlvTag = require('./flv-tag.js');
|
||
|
||
// For information on the FLV format, see
|
||
// http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf.
|
||
// Technically, this function returns the header and a metadata FLV tag
|
||
// if duration is greater than zero
|
||
// duration in seconds
|
||
// @return {object} the bytes of the FLV header as a Uint8Array
|
||
var getFlvHeader = function(duration, audio, video) { // :ByteArray {
|
||
var
|
||
headBytes = new Uint8Array(3 + 1 + 1 + 4),
|
||
head = new DataView(headBytes.buffer),
|
||
metadata,
|
||
result,
|
||
metadataLength;
|
||
|
||
// default arguments
|
||
duration = duration || 0;
|
||
audio = audio === undefined ? true : audio;
|
||
video = video === undefined ? true : video;
|
||
|
||
// signature
|
||
head.setUint8(0, 0x46); // 'F'
|
||
head.setUint8(1, 0x4c); // 'L'
|
||
head.setUint8(2, 0x56); // 'V'
|
||
|
||
// version
|
||
head.setUint8(3, 0x01);
|
||
|
||
// flags
|
||
head.setUint8(4, (audio ? 0x04 : 0x00) | (video ? 0x01 : 0x00));
|
||
|
||
// data offset, should be 9 for FLV v1
|
||
head.setUint32(5, headBytes.byteLength);
|
||
|
||
// init the first FLV tag
|
||
if (duration <= 0) {
|
||
// no duration available so just write the first field of the first
|
||
// FLV tag
|
||
result = new Uint8Array(headBytes.byteLength + 4);
|
||
result.set(headBytes);
|
||
result.set([0, 0, 0, 0], headBytes.byteLength);
|
||
return result;
|
||
}
|
||
|
||
// write out the duration metadata tag
|
||
metadata = new FlvTag(FlvTag.METADATA_TAG);
|
||
metadata.pts = metadata.dts = 0;
|
||
metadata.writeMetaDataDouble('duration', duration);
|
||
metadataLength = metadata.finalize().length;
|
||
result = new Uint8Array(headBytes.byteLength + metadataLength);
|
||
result.set(headBytes);
|
||
result.set(head.byteLength, metadataLength);
|
||
|
||
return result;
|
||
};
|
||
|
||
module.exports = getFlvHeader;
|
||
|
||
},{"./flv-tag.js":12}],12:[function(require,module,exports){
|
||
/**
|
||
* An object that stores the bytes of an FLV tag and methods for
|
||
* querying and manipulating that data.
|
||
* @see http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf
|
||
*/
|
||
'use strict';
|
||
|
||
var FlvTag;
|
||
|
||
// (type:uint, extraData:Boolean = false) extends ByteArray
|
||
FlvTag = function(type, extraData) {
|
||
var
|
||
// Counter if this is a metadata tag, nal start marker if this is a video
|
||
// tag. unused if this is an audio tag
|
||
adHoc = 0, // :uint
|
||
|
||
// The default size is 16kb but this is not enough to hold iframe
|
||
// data and the resizing algorithm costs a bit so we create a larger
|
||
// starting buffer for video tags
|
||
bufferStartSize = 16384,
|
||
|
||
// checks whether the FLV tag has enough capacity to accept the proposed
|
||
// write and re-allocates the internal buffers if necessary
|
||
prepareWrite = function(flv, count) {
|
||
var
|
||
bytes,
|
||
minLength = flv.position + count;
|
||
if (minLength < flv.bytes.byteLength) {
|
||
// there's enough capacity so do nothing
|
||
return;
|
||
}
|
||
|
||
// allocate a new buffer and copy over the data that will not be modified
|
||
bytes = new Uint8Array(minLength * 2);
|
||
bytes.set(flv.bytes.subarray(0, flv.position), 0);
|
||
flv.bytes = bytes;
|
||
flv.view = new DataView(flv.bytes.buffer);
|
||
},
|
||
|
||
// commonly used metadata properties
|
||
widthBytes = FlvTag.widthBytes || new Uint8Array('width'.length),
|
||
heightBytes = FlvTag.heightBytes || new Uint8Array('height'.length),
|
||
videocodecidBytes = FlvTag.videocodecidBytes || new Uint8Array('videocodecid'.length),
|
||
i;
|
||
|
||
if (!FlvTag.widthBytes) {
|
||
// calculating the bytes of common metadata names ahead of time makes the
|
||
// corresponding writes faster because we don't have to loop over the
|
||
// characters
|
||
// re-test with test/perf.html if you're planning on changing this
|
||
for (i = 0; i < 'width'.length; i++) {
|
||
widthBytes[i] = 'width'.charCodeAt(i);
|
||
}
|
||
for (i = 0; i < 'height'.length; i++) {
|
||
heightBytes[i] = 'height'.charCodeAt(i);
|
||
}
|
||
for (i = 0; i < 'videocodecid'.length; i++) {
|
||
videocodecidBytes[i] = 'videocodecid'.charCodeAt(i);
|
||
}
|
||
|
||
FlvTag.widthBytes = widthBytes;
|
||
FlvTag.heightBytes = heightBytes;
|
||
FlvTag.videocodecidBytes = videocodecidBytes;
|
||
}
|
||
|
||
this.keyFrame = false; // :Boolean
|
||
|
||
switch (type) {
|
||
case FlvTag.VIDEO_TAG:
|
||
this.length = 16;
|
||
// Start the buffer at 256k
|
||
bufferStartSize *= 6;
|
||
break;
|
||
case FlvTag.AUDIO_TAG:
|
||
this.length = 13;
|
||
this.keyFrame = true;
|
||
break;
|
||
case FlvTag.METADATA_TAG:
|
||
this.length = 29;
|
||
this.keyFrame = true;
|
||
break;
|
||
default:
|
||
throw new Error('Unknown FLV tag type');
|
||
}
|
||
|
||
this.bytes = new Uint8Array(bufferStartSize);
|
||
this.view = new DataView(this.bytes.buffer);
|
||
this.bytes[0] = type;
|
||
this.position = this.length;
|
||
this.keyFrame = extraData; // Defaults to false
|
||
|
||
// presentation timestamp
|
||
this.pts = 0;
|
||
// decoder timestamp
|
||
this.dts = 0;
|
||
|
||
// ByteArray#writeBytes(bytes:ByteArray, offset:uint = 0, length:uint = 0)
|
||
this.writeBytes = function(bytes, offset, length) {
|
||
var
|
||
start = offset || 0,
|
||
end;
|
||
length = length || bytes.byteLength;
|
||
end = start + length;
|
||
|
||
prepareWrite(this, length);
|
||
this.bytes.set(bytes.subarray(start, end), this.position);
|
||
|
||
this.position += length;
|
||
this.length = Math.max(this.length, this.position);
|
||
};
|
||
|
||
// ByteArray#writeByte(value:int):void
|
||
this.writeByte = function(byte) {
|
||
prepareWrite(this, 1);
|
||
this.bytes[this.position] = byte;
|
||
this.position++;
|
||
this.length = Math.max(this.length, this.position);
|
||
};
|
||
|
||
// ByteArray#writeShort(value:int):void
|
||
this.writeShort = function(short) {
|
||
prepareWrite(this, 2);
|
||
this.view.setUint16(this.position, short);
|
||
this.position += 2;
|
||
this.length = Math.max(this.length, this.position);
|
||
};
|
||
|
||
// Negative index into array
|
||
// (pos:uint):int
|
||
this.negIndex = function(pos) {
|
||
return this.bytes[this.length - pos];
|
||
};
|
||
|
||
// The functions below ONLY work when this[0] == VIDEO_TAG.
|
||
// We are not going to check for that because we dont want the overhead
|
||
// (nal:ByteArray = null):int
|
||
this.nalUnitSize = function() {
|
||
if (adHoc === 0) {
|
||
return 0;
|
||
}
|
||
|
||
return this.length - (adHoc + 4);
|
||
};
|
||
|
||
this.startNalUnit = function() {
|
||
// remember position and add 4 bytes
|
||
if (adHoc > 0) {
|
||
throw new Error('Attempted to create new NAL wihout closing the old one');
|
||
}
|
||
|
||
// reserve 4 bytes for nal unit size
|
||
adHoc = this.length;
|
||
this.length += 4;
|
||
this.position = this.length;
|
||
};
|
||
|
||
// (nal:ByteArray = null):void
|
||
this.endNalUnit = function(nalContainer) {
|
||
var
|
||
nalStart, // :uint
|
||
nalLength; // :uint
|
||
|
||
// Rewind to the marker and write the size
|
||
if (this.length === adHoc + 4) {
|
||
// we started a nal unit, but didnt write one, so roll back the 4 byte size value
|
||
this.length -= 4;
|
||
} else if (adHoc > 0) {
|
||
nalStart = adHoc + 4;
|
||
nalLength = this.length - nalStart;
|
||
|
||
this.position = adHoc;
|
||
this.view.setUint32(this.position, nalLength);
|
||
this.position = this.length;
|
||
|
||
if (nalContainer) {
|
||
// Add the tag to the NAL unit
|
||
nalContainer.push(this.bytes.subarray(nalStart, nalStart + nalLength));
|
||
}
|
||
}
|
||
|
||
adHoc = 0;
|
||
};
|
||
|
||
/**
|
||
* Write out a 64-bit floating point valued metadata property. This method is
|
||
* called frequently during a typical parse and needs to be fast.
|
||
*/
|
||
// (key:String, val:Number):void
|
||
this.writeMetaDataDouble = function(key, val) {
|
||
var i;
|
||
prepareWrite(this, 2 + key.length + 9);
|
||
|
||
// write size of property name
|
||
this.view.setUint16(this.position, key.length);
|
||
this.position += 2;
|
||
|
||
// this next part looks terrible but it improves parser throughput by
|
||
// 10kB/s in my testing
|
||
|
||
// write property name
|
||
if (key === 'width') {
|
||
this.bytes.set(widthBytes, this.position);
|
||
this.position += 5;
|
||
} else if (key === 'height') {
|
||
this.bytes.set(heightBytes, this.position);
|
||
this.position += 6;
|
||
} else if (key === 'videocodecid') {
|
||
this.bytes.set(videocodecidBytes, this.position);
|
||
this.position += 12;
|
||
} else {
|
||
for (i = 0; i < key.length; i++) {
|
||
this.bytes[this.position] = key.charCodeAt(i);
|
||
this.position++;
|
||
}
|
||
}
|
||
|
||
// skip null byte
|
||
this.position++;
|
||
|
||
// write property value
|
||
this.view.setFloat64(this.position, val);
|
||
this.position += 8;
|
||
|
||
// update flv tag length
|
||
this.length = Math.max(this.length, this.position);
|
||
++adHoc;
|
||
};
|
||
|
||
// (key:String, val:Boolean):void
|
||
this.writeMetaDataBoolean = function(key, val) {
|
||
var i;
|
||
prepareWrite(this, 2);
|
||
this.view.setUint16(this.position, key.length);
|
||
this.position += 2;
|
||
for (i = 0; i < key.length; i++) {
|
||
// if key.charCodeAt(i) >= 255, handle error
|
||
prepareWrite(this, 1);
|
||
this.bytes[this.position] = key.charCodeAt(i);
|
||
this.position++;
|
||
}
|
||
prepareWrite(this, 2);
|
||
this.view.setUint8(this.position, 0x01);
|
||
this.position++;
|
||
this.view.setUint8(this.position, val ? 0x01 : 0x00);
|
||
this.position++;
|
||
this.length = Math.max(this.length, this.position);
|
||
++adHoc;
|
||
};
|
||
|
||
// ():ByteArray
|
||
this.finalize = function() {
|
||
var
|
||
dtsDelta, // :int
|
||
len; // :int
|
||
|
||
switch (this.bytes[0]) {
|
||
// Video Data
|
||
case FlvTag.VIDEO_TAG:
|
||
// We only support AVC, 1 = key frame (for AVC, a seekable
|
||
// frame), 2 = inter frame (for AVC, a non-seekable frame)
|
||
this.bytes[11] = ((this.keyFrame || extraData) ? 0x10 : 0x20) | 0x07;
|
||
this.bytes[12] = extraData ? 0x00 : 0x01;
|
||
|
||
dtsDelta = this.pts - this.dts;
|
||
this.bytes[13] = (dtsDelta & 0x00FF0000) >>> 16;
|
||
this.bytes[14] = (dtsDelta & 0x0000FF00) >>> 8;
|
||
this.bytes[15] = (dtsDelta & 0x000000FF) >>> 0;
|
||
break;
|
||
|
||
case FlvTag.AUDIO_TAG:
|
||
this.bytes[11] = 0xAF; // 44 kHz, 16-bit stereo
|
||
this.bytes[12] = extraData ? 0x00 : 0x01;
|
||
break;
|
||
|
||
case FlvTag.METADATA_TAG:
|
||
this.position = 11;
|
||
this.view.setUint8(this.position, 0x02); // String type
|
||
this.position++;
|
||
this.view.setUint16(this.position, 0x0A); // 10 Bytes
|
||
this.position += 2;
|
||
// set "onMetaData"
|
||
this.bytes.set([0x6f, 0x6e, 0x4d, 0x65,
|
||
0x74, 0x61, 0x44, 0x61,
|
||
0x74, 0x61], this.position);
|
||
this.position += 10;
|
||
this.bytes[this.position] = 0x08; // Array type
|
||
this.position++;
|
||
this.view.setUint32(this.position, adHoc);
|
||
this.position = this.length;
|
||
this.bytes.set([0, 0, 9], this.position);
|
||
this.position += 3; // End Data Tag
|
||
this.length = this.position;
|
||
break;
|
||
}
|
||
|
||
len = this.length - 11;
|
||
|
||
// write the DataSize field
|
||
this.bytes[ 1] = (len & 0x00FF0000) >>> 16;
|
||
this.bytes[ 2] = (len & 0x0000FF00) >>> 8;
|
||
this.bytes[ 3] = (len & 0x000000FF) >>> 0;
|
||
// write the Timestamp
|
||
this.bytes[ 4] = (this.dts & 0x00FF0000) >>> 16;
|
||
this.bytes[ 5] = (this.dts & 0x0000FF00) >>> 8;
|
||
this.bytes[ 6] = (this.dts & 0x000000FF) >>> 0;
|
||
this.bytes[ 7] = (this.dts & 0xFF000000) >>> 24;
|
||
// write the StreamID
|
||
this.bytes[ 8] = 0;
|
||
this.bytes[ 9] = 0;
|
||
this.bytes[10] = 0;
|
||
|
||
// Sometimes we're at the end of the view and have one slot to write a
|
||
// uint32, so, prepareWrite of count 4, since, view is uint8
|
||
prepareWrite(this, 4);
|
||
this.view.setUint32(this.length, this.length);
|
||
this.length += 4;
|
||
this.position += 4;
|
||
|
||
// trim down the byte buffer to what is actually being used
|
||
this.bytes = this.bytes.subarray(0, this.length);
|
||
this.frameTime = FlvTag.frameTime(this.bytes);
|
||
// if bytes.bytelength isn't equal to this.length, handle error
|
||
return this;
|
||
};
|
||
};
|
||
|
||
FlvTag.AUDIO_TAG = 0x08; // == 8, :uint
|
||
FlvTag.VIDEO_TAG = 0x09; // == 9, :uint
|
||
FlvTag.METADATA_TAG = 0x12; // == 18, :uint
|
||
|
||
// (tag:ByteArray):Boolean {
|
||
FlvTag.isAudioFrame = function(tag) {
|
||
return FlvTag.AUDIO_TAG === tag[0];
|
||
};
|
||
|
||
// (tag:ByteArray):Boolean {
|
||
FlvTag.isVideoFrame = function(tag) {
|
||
return FlvTag.VIDEO_TAG === tag[0];
|
||
};
|
||
|
||
// (tag:ByteArray):Boolean {
|
||
FlvTag.isMetaData = function(tag) {
|
||
return FlvTag.METADATA_TAG === tag[0];
|
||
};
|
||
|
||
// (tag:ByteArray):Boolean {
|
||
FlvTag.isKeyFrame = function(tag) {
|
||
if (FlvTag.isVideoFrame(tag)) {
|
||
return tag[11] === 0x17;
|
||
}
|
||
|
||
if (FlvTag.isAudioFrame(tag)) {
|
||
return true;
|
||
}
|
||
|
||
if (FlvTag.isMetaData(tag)) {
|
||
return true;
|
||
}
|
||
|
||
return false;
|
||
};
|
||
|
||
// (tag:ByteArray):uint {
|
||
FlvTag.frameTime = function(tag) {
|
||
var pts = tag[ 4] << 16; // :uint
|
||
pts |= tag[ 5] << 8;
|
||
pts |= tag[ 6] << 0;
|
||
pts |= tag[ 7] << 24;
|
||
return pts;
|
||
};
|
||
|
||
module.exports = FlvTag;
|
||
|
||
},{}],13:[function(require,module,exports){
|
||
module.exports = {
|
||
tag: require('./flv-tag'),
|
||
Transmuxer: require('./transmuxer'),
|
||
getFlvHeader: require('./flv-header')
|
||
};
|
||
|
||
},{"./flv-header":11,"./flv-tag":12,"./transmuxer":15}],14:[function(require,module,exports){
|
||
'use strict';
|
||
|
||
var TagList = function() {
|
||
var self = this;
|
||
|
||
this.list = [];
|
||
|
||
this.push = function(tag) {
|
||
this.list.push({
|
||
bytes: tag.bytes,
|
||
dts: tag.dts,
|
||
pts: tag.pts,
|
||
keyFrame: tag.keyFrame,
|
||
metaDataTag: tag.metaDataTag
|
||
});
|
||
};
|
||
|
||
Object.defineProperty(this, 'length', {
|
||
get: function() {
|
||
return self.list.length;
|
||
}
|
||
});
|
||
};
|
||
|
||
module.exports = TagList;
|
||
|
||
},{}],15:[function(require,module,exports){
|
||
'use strict';
|
||
|
||
var Stream = require('../utils/stream.js');
|
||
var FlvTag = require('./flv-tag.js');
|
||
var m2ts = require('../m2ts/m2ts.js');
|
||
var AdtsStream = require('../codecs/adts.js');
|
||
var H264Stream = require('../codecs/h264').H264Stream;
|
||
var CoalesceStream = require('./coalesce-stream.js');
|
||
var TagList = require('./tag-list.js');
|
||
|
||
var
|
||
Transmuxer,
|
||
VideoSegmentStream,
|
||
AudioSegmentStream,
|
||
collectTimelineInfo,
|
||
metaDataTag,
|
||
extraDataTag;
|
||
|
||
/**
|
||
* Store information about the start and end of the tracka and the
|
||
* duration for each frame/sample we process in order to calculate
|
||
* the baseMediaDecodeTime
|
||
*/
|
||
collectTimelineInfo = function(track, data) {
|
||
if (typeof data.pts === 'number') {
|
||
if (track.timelineStartInfo.pts === undefined) {
|
||
track.timelineStartInfo.pts = data.pts;
|
||
} else {
|
||
track.timelineStartInfo.pts =
|
||
Math.min(track.timelineStartInfo.pts, data.pts);
|
||
}
|
||
}
|
||
|
||
if (typeof data.dts === 'number') {
|
||
if (track.timelineStartInfo.dts === undefined) {
|
||
track.timelineStartInfo.dts = data.dts;
|
||
} else {
|
||
track.timelineStartInfo.dts =
|
||
Math.min(track.timelineStartInfo.dts, data.dts);
|
||
}
|
||
}
|
||
};
|
||
|
||
metaDataTag = function(track, pts) {
|
||
var
|
||
tag = new FlvTag(FlvTag.METADATA_TAG); // :FlvTag
|
||
|
||
tag.dts = pts;
|
||
tag.pts = pts;
|
||
|
||
tag.writeMetaDataDouble('videocodecid', 7);
|
||
tag.writeMetaDataDouble('width', track.width);
|
||
tag.writeMetaDataDouble('height', track.height);
|
||
|
||
return tag;
|
||
};
|
||
|
||
extraDataTag = function(track, pts) {
|
||
var
|
||
i,
|
||
tag = new FlvTag(FlvTag.VIDEO_TAG, true);
|
||
|
||
tag.dts = pts;
|
||
tag.pts = pts;
|
||
|
||
tag.writeByte(0x01);// version
|
||
tag.writeByte(track.profileIdc);// profile
|
||
tag.writeByte(track.profileCompatibility);// compatibility
|
||
tag.writeByte(track.levelIdc);// level
|
||
tag.writeByte(0xFC | 0x03); // reserved (6 bits), NULA length size - 1 (2 bits)
|
||
tag.writeByte(0xE0 | 0x01); // reserved (3 bits), num of SPS (5 bits)
|
||
tag.writeShort(track.sps[0].length); // data of SPS
|
||
tag.writeBytes(track.sps[0]); // SPS
|
||
|
||
tag.writeByte(track.pps.length); // num of PPS (will there ever be more that 1 PPS?)
|
||
for (i = 0; i < track.pps.length; ++i) {
|
||
tag.writeShort(track.pps[i].length); // 2 bytes for length of PPS
|
||
tag.writeBytes(track.pps[i]); // data of PPS
|
||
}
|
||
|
||
return tag;
|
||
};
|
||
|
||
/**
|
||
* Constructs a single-track, media segment from AAC data
|
||
* events. The output of this stream can be fed to flash.
|
||
*/
|
||
AudioSegmentStream = function(track) {
|
||
var
|
||
adtsFrames = [],
|
||
videoKeyFrames = [],
|
||
oldExtraData;
|
||
|
||
AudioSegmentStream.prototype.init.call(this);
|
||
|
||
this.push = function(data) {
|
||
collectTimelineInfo(track, data);
|
||
|
||
if (track) {
|
||
track.audioobjecttype = data.audioobjecttype;
|
||
track.channelcount = data.channelcount;
|
||
track.samplerate = data.samplerate;
|
||
track.samplingfrequencyindex = data.samplingfrequencyindex;
|
||
track.samplesize = data.samplesize;
|
||
track.extraData = (track.audioobjecttype << 11) |
|
||
(track.samplingfrequencyindex << 7) |
|
||
(track.channelcount << 3);
|
||
}
|
||
|
||
data.pts = Math.round(data.pts / 90);
|
||
data.dts = Math.round(data.dts / 90);
|
||
|
||
// buffer audio data until end() is called
|
||
adtsFrames.push(data);
|
||
};
|
||
|
||
this.flush = function() {
|
||
var currentFrame, adtsFrame, lastMetaPts, tags = new TagList();
|
||
// return early if no audio data has been observed
|
||
if (adtsFrames.length === 0) {
|
||
this.trigger('done', 'AudioSegmentStream');
|
||
return;
|
||
}
|
||
|
||
lastMetaPts = -Infinity;
|
||
|
||
while (adtsFrames.length) {
|
||
currentFrame = adtsFrames.shift();
|
||
|
||
// write out a metadata frame at every video key frame
|
||
if (videoKeyFrames.length && currentFrame.pts >= videoKeyFrames[0]) {
|
||
lastMetaPts = videoKeyFrames.shift();
|
||
this.writeMetaDataTags(tags, lastMetaPts);
|
||
}
|
||
|
||
// also write out metadata tags every 1 second so that the decoder
|
||
// is re-initialized quickly after seeking into a different
|
||
// audio configuration.
|
||
if (track.extraData !== oldExtraData || currentFrame.pts - lastMetaPts >= 1000) {
|
||
this.writeMetaDataTags(tags, currentFrame.pts);
|
||
oldExtraData = track.extraData;
|
||
lastMetaPts = currentFrame.pts;
|
||
}
|
||
|
||
adtsFrame = new FlvTag(FlvTag.AUDIO_TAG);
|
||
adtsFrame.pts = currentFrame.pts;
|
||
adtsFrame.dts = currentFrame.dts;
|
||
|
||
adtsFrame.writeBytes(currentFrame.data);
|
||
|
||
tags.push(adtsFrame.finalize());
|
||
}
|
||
|
||
videoKeyFrames.length = 0;
|
||
oldExtraData = null;
|
||
this.trigger('data', {track: track, tags: tags.list});
|
||
|
||
this.trigger('done', 'AudioSegmentStream');
|
||
};
|
||
|
||
this.writeMetaDataTags = function(tags, pts) {
|
||
var adtsFrame;
|
||
|
||
adtsFrame = new FlvTag(FlvTag.METADATA_TAG);
|
||
// For audio, DTS is always the same as PTS. We want to set the DTS
|
||
// however so we can compare with video DTS to determine approximate
|
||
// packet order
|
||
adtsFrame.pts = pts;
|
||
adtsFrame.dts = pts;
|
||
|
||
// AAC is always 10
|
||
adtsFrame.writeMetaDataDouble('audiocodecid', 10);
|
||
adtsFrame.writeMetaDataBoolean('stereo', track.channelcount === 2);
|
||
adtsFrame.writeMetaDataDouble('audiosamplerate', track.samplerate);
|
||
// Is AAC always 16 bit?
|
||
adtsFrame.writeMetaDataDouble('audiosamplesize', 16);
|
||
|
||
tags.push(adtsFrame.finalize());
|
||
|
||
adtsFrame = new FlvTag(FlvTag.AUDIO_TAG, true);
|
||
// For audio, DTS is always the same as PTS. We want to set the DTS
|
||
// however so we can compare with video DTS to determine approximate
|
||
// packet order
|
||
adtsFrame.pts = pts;
|
||
adtsFrame.dts = pts;
|
||
|
||
adtsFrame.view.setUint16(adtsFrame.position, track.extraData);
|
||
adtsFrame.position += 2;
|
||
adtsFrame.length = Math.max(adtsFrame.length, adtsFrame.position);
|
||
|
||
tags.push(adtsFrame.finalize());
|
||
};
|
||
|
||
this.onVideoKeyFrame = function(pts) {
|
||
videoKeyFrames.push(pts);
|
||
};
|
||
};
|
||
AudioSegmentStream.prototype = new Stream();
|
||
|
||
/**
|
||
* Store FlvTags for the h264 stream
|
||
* @param track {object} track metadata configuration
|
||
*/
|
||
VideoSegmentStream = function(track) {
|
||
var
|
||
nalUnits = [],
|
||
config,
|
||
h264Frame;
|
||
VideoSegmentStream.prototype.init.call(this);
|
||
|
||
this.finishFrame = function(tags, frame) {
|
||
if (!frame) {
|
||
return;
|
||
}
|
||
// Check if keyframe and the length of tags.
|
||
// This makes sure we write metadata on the first frame of a segment.
|
||
if (config && track && track.newMetadata &&
|
||
(frame.keyFrame || tags.length === 0)) {
|
||
// Push extra data on every IDR frame in case we did a stream change + seek
|
||
var metaTag = metaDataTag(config, frame.dts).finalize();
|
||
var extraTag = extraDataTag(track, frame.dts).finalize();
|
||
|
||
metaTag.metaDataTag = extraTag.metaDataTag = true;
|
||
|
||
tags.push(metaTag);
|
||
tags.push(extraTag);
|
||
track.newMetadata = false;
|
||
|
||
this.trigger('keyframe', frame.dts);
|
||
}
|
||
|
||
frame.endNalUnit();
|
||
tags.push(frame.finalize());
|
||
h264Frame = null;
|
||
};
|
||
|
||
this.push = function(data) {
|
||
collectTimelineInfo(track, data);
|
||
|
||
data.pts = Math.round(data.pts / 90);
|
||
data.dts = Math.round(data.dts / 90);
|
||
|
||
// buffer video until flush() is called
|
||
nalUnits.push(data);
|
||
};
|
||
|
||
this.flush = function() {
|
||
var
|
||
currentNal,
|
||
tags = new TagList();
|
||
|
||
// Throw away nalUnits at the start of the byte stream until we find
|
||
// the first AUD
|
||
while (nalUnits.length) {
|
||
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {
|
||
break;
|
||
}
|
||
nalUnits.shift();
|
||
}
|
||
|
||
// return early if no video data has been observed
|
||
if (nalUnits.length === 0) {
|
||
this.trigger('done', 'VideoSegmentStream');
|
||
return;
|
||
}
|
||
|
||
while (nalUnits.length) {
|
||
currentNal = nalUnits.shift();
|
||
|
||
// record the track config
|
||
if (currentNal.nalUnitType === 'seq_parameter_set_rbsp') {
|
||
track.newMetadata = true;
|
||
config = currentNal.config;
|
||
track.width = config.width;
|
||
track.height = config.height;
|
||
track.sps = [currentNal.data];
|
||
track.profileIdc = config.profileIdc;
|
||
track.levelIdc = config.levelIdc;
|
||
track.profileCompatibility = config.profileCompatibility;
|
||
h264Frame.endNalUnit();
|
||
} else if (currentNal.nalUnitType === 'pic_parameter_set_rbsp') {
|
||
track.newMetadata = true;
|
||
track.pps = [currentNal.data];
|
||
h264Frame.endNalUnit();
|
||
} else if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') {
|
||
if (h264Frame) {
|
||
this.finishFrame(tags, h264Frame);
|
||
}
|
||
h264Frame = new FlvTag(FlvTag.VIDEO_TAG);
|
||
h264Frame.pts = currentNal.pts;
|
||
h264Frame.dts = currentNal.dts;
|
||
} else {
|
||
if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') {
|
||
// the current sample is a key frame
|
||
h264Frame.keyFrame = true;
|
||
}
|
||
h264Frame.endNalUnit();
|
||
}
|
||
h264Frame.startNalUnit();
|
||
h264Frame.writeBytes(currentNal.data);
|
||
}
|
||
if (h264Frame) {
|
||
this.finishFrame(tags, h264Frame);
|
||
}
|
||
|
||
this.trigger('data', {track: track, tags: tags.list});
|
||
|
||
// Continue with the flush process now
|
||
this.trigger('done', 'VideoSegmentStream');
|
||
};
|
||
};
|
||
|
||
VideoSegmentStream.prototype = new Stream();
|
||
|
||
/**
|
||
* An object that incrementally transmuxes MPEG2 Trasport Stream
|
||
* chunks into an FLV.
|
||
*/
|
||
Transmuxer = function(options) {
|
||
var
|
||
self = this,
|
||
|
||
packetStream, parseStream, elementaryStream,
|
||
videoTimestampRolloverStream, audioTimestampRolloverStream,
|
||
timedMetadataTimestampRolloverStream,
|
||
adtsStream, h264Stream,
|
||
videoSegmentStream, audioSegmentStream, captionStream,
|
||
coalesceStream;
|
||
|
||
Transmuxer.prototype.init.call(this);
|
||
|
||
options = options || {};
|
||
|
||
// expose the metadata stream
|
||
this.metadataStream = new m2ts.MetadataStream();
|
||
|
||
options.metadataStream = this.metadataStream;
|
||
|
||
// set up the parsing pipeline
|
||
packetStream = new m2ts.TransportPacketStream();
|
||
parseStream = new m2ts.TransportParseStream();
|
||
elementaryStream = new m2ts.ElementaryStream();
|
||
videoTimestampRolloverStream = new m2ts.TimestampRolloverStream('video');
|
||
audioTimestampRolloverStream = new m2ts.TimestampRolloverStream('audio');
|
||
timedMetadataTimestampRolloverStream = new m2ts.TimestampRolloverStream('timed-metadata');
|
||
|
||
adtsStream = new AdtsStream();
|
||
h264Stream = new H264Stream();
|
||
coalesceStream = new CoalesceStream(options);
|
||
|
||
// disassemble MPEG2-TS packets into elementary streams
|
||
packetStream
|
||
.pipe(parseStream)
|
||
.pipe(elementaryStream);
|
||
|
||
// !!THIS ORDER IS IMPORTANT!!
|
||
// demux the streams
|
||
elementaryStream
|
||
.pipe(videoTimestampRolloverStream)
|
||
.pipe(h264Stream);
|
||
elementaryStream
|
||
.pipe(audioTimestampRolloverStream)
|
||
.pipe(adtsStream);
|
||
|
||
elementaryStream
|
||
.pipe(timedMetadataTimestampRolloverStream)
|
||
.pipe(this.metadataStream)
|
||
.pipe(coalesceStream);
|
||
// if CEA-708 parsing is available, hook up a caption stream
|
||
captionStream = new m2ts.CaptionStream();
|
||
h264Stream.pipe(captionStream)
|
||
.pipe(coalesceStream);
|
||
|
||
// hook up the segment streams once track metadata is delivered
|
||
elementaryStream.on('data', function(data) {
|
||
var i, videoTrack, audioTrack;
|
||
|
||
if (data.type === 'metadata') {
|
||
i = data.tracks.length;
|
||
|
||
// scan the tracks listed in the metadata
|
||
while (i--) {
|
||
if (data.tracks[i].type === 'video') {
|
||
videoTrack = data.tracks[i];
|
||
} else if (data.tracks[i].type === 'audio') {
|
||
audioTrack = data.tracks[i];
|
||
}
|
||
}
|
||
|
||
// hook up the video segment stream to the first track with h264 data
|
||
if (videoTrack && !videoSegmentStream) {
|
||
coalesceStream.numberOfTracks++;
|
||
videoSegmentStream = new VideoSegmentStream(videoTrack);
|
||
|
||
// Set up the final part of the video pipeline
|
||
h264Stream
|
||
.pipe(videoSegmentStream)
|
||
.pipe(coalesceStream);
|
||
}
|
||
|
||
if (audioTrack && !audioSegmentStream) {
|
||
// hook up the audio segment stream to the first track with aac data
|
||
coalesceStream.numberOfTracks++;
|
||
audioSegmentStream = new AudioSegmentStream(audioTrack);
|
||
|
||
// Set up the final part of the audio pipeline
|
||
adtsStream
|
||
.pipe(audioSegmentStream)
|
||
.pipe(coalesceStream);
|
||
|
||
if (videoSegmentStream) {
|
||
videoSegmentStream.on('keyframe', audioSegmentStream.onVideoKeyFrame);
|
||
}
|
||
}
|
||
}
|
||
});
|
||
|
||
// feed incoming data to the front of the parsing pipeline
|
||
this.push = function(data) {
|
||
packetStream.push(data);
|
||
};
|
||
|
||
// flush any buffered data
|
||
this.flush = function() {
|
||
// Start at the top of the pipeline and flush all pending work
|
||
packetStream.flush();
|
||
};
|
||
|
||
// Caption data has to be reset when seeking outside buffered range
|
||
this.resetCaptions = function() {
|
||
captionStream.reset();
|
||
};
|
||
|
||
// Re-emit any data coming from the coalesce stream to the outside world
|
||
coalesceStream.on('data', function(event) {
|
||
self.trigger('data', event);
|
||
});
|
||
|
||
// Let the consumer know we have finished flushing the entire pipeline
|
||
coalesceStream.on('done', function() {
|
||
self.trigger('done');
|
||
});
|
||
};
|
||
Transmuxer.prototype = new Stream();
|
||
|
||
// forward compatibility
|
||
module.exports = Transmuxer;
|
||
|
||
},{"../codecs/adts.js":6,"../codecs/h264":7,"../m2ts/m2ts.js":19,"../utils/stream.js":33,"./coalesce-stream.js":10,"./flv-tag.js":12,"./tag-list.js":14}],16:[function(require,module,exports){
|
||
'use strict';
|
||
|
||
var muxjs = {
|
||
codecs: require('./codecs'),
|
||
mp4: require('./mp4'),
|
||
flv: require('./flv'),
|
||
mp2t: require('./m2ts')
|
||
};
|
||
|
||
// include all the tools when the full library is required
|
||
muxjs.mp4.tools = require('./tools/mp4-inspector');
|
||
muxjs.flv.tools = require('./tools/flv-inspector');
|
||
muxjs.mp2t.tools = require('./tools/ts-inspector');
|
||
|
||
|
||
module.exports = muxjs;
|
||
|
||
},{"./codecs":8,"./flv":13,"./m2ts":18,"./mp4":24,"./tools/flv-inspector":28,"./tools/mp4-inspector":29,"./tools/ts-inspector":30}],17:[function(require,module,exports){
|
||
/**
|
||
* mux.js
|
||
*
|
||
* Copyright (c) 2015 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* Reads in-band caption information from a video elementary
|
||
* stream. Captions must follow the CEA-708 standard for injection
|
||
* into an MPEG-2 transport streams.
|
||
* @see https://en.wikipedia.org/wiki/CEA-708
|
||
* @see https://www.gpo.gov/fdsys/pkg/CFR-2007-title47-vol1/pdf/CFR-2007-title47-vol1-sec15-119.pdf
|
||
*/
|
||
|
||
'use strict';
|
||
|
||
// -----------------
|
||
// Link To Transport
|
||
// -----------------
|
||
|
||
// Supplemental enhancement information (SEI) NAL units have a
|
||
// payload type field to indicate how they are to be
|
||
// interpreted. CEAS-708 caption content is always transmitted with
|
||
// payload type 0x04.
|
||
var USER_DATA_REGISTERED_ITU_T_T35 = 4,
|
||
RBSP_TRAILING_BITS = 128,
|
||
Stream = require('../utils/stream');
|
||
|
||
/**
|
||
* Parse a supplemental enhancement information (SEI) NAL unit.
|
||
* Stops parsing once a message of type ITU T T35 has been found.
|
||
*
|
||
* @param bytes {Uint8Array} the bytes of a SEI NAL unit
|
||
* @return {object} the parsed SEI payload
|
||
* @see Rec. ITU-T H.264, 7.3.2.3.1
|
||
*/
|
||
var parseSei = function(bytes) {
|
||
var
|
||
i = 0,
|
||
result = {
|
||
payloadType: -1,
|
||
payloadSize: 0
|
||
},
|
||
payloadType = 0,
|
||
payloadSize = 0;
|
||
|
||
// go through the sei_rbsp parsing each each individual sei_message
|
||
while (i < bytes.byteLength) {
|
||
// stop once we have hit the end of the sei_rbsp
|
||
if (bytes[i] === RBSP_TRAILING_BITS) {
|
||
break;
|
||
}
|
||
|
||
// Parse payload type
|
||
while (bytes[i] === 0xFF) {
|
||
payloadType += 255;
|
||
i++;
|
||
}
|
||
payloadType += bytes[i++];
|
||
|
||
// Parse payload size
|
||
while (bytes[i] === 0xFF) {
|
||
payloadSize += 255;
|
||
i++;
|
||
}
|
||
payloadSize += bytes[i++];
|
||
|
||
// this sei_message is a 608/708 caption so save it and break
|
||
// there can only ever be one caption message in a frame's sei
|
||
if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) {
|
||
result.payloadType = payloadType;
|
||
result.payloadSize = payloadSize;
|
||
result.payload = bytes.subarray(i, i + payloadSize);
|
||
break;
|
||
}
|
||
|
||
// skip the payload and parse the next message
|
||
i += payloadSize;
|
||
payloadType = 0;
|
||
payloadSize = 0;
|
||
}
|
||
|
||
return result;
|
||
};
|
||
|
||
// see ANSI/SCTE 128-1 (2013), section 8.1
|
||
var parseUserData = function(sei) {
|
||
// itu_t_t35_contry_code must be 181 (United States) for
|
||
// captions
|
||
if (sei.payload[0] !== 181) {
|
||
return null;
|
||
}
|
||
|
||
// itu_t_t35_provider_code should be 49 (ATSC) for captions
|
||
if (((sei.payload[1] << 8) | sei.payload[2]) !== 49) {
|
||
return null;
|
||
}
|
||
|
||
// the user_identifier should be "GA94" to indicate ATSC1 data
|
||
if (String.fromCharCode(sei.payload[3],
|
||
sei.payload[4],
|
||
sei.payload[5],
|
||
sei.payload[6]) !== 'GA94') {
|
||
return null;
|
||
}
|
||
|
||
// finally, user_data_type_code should be 0x03 for caption data
|
||
if (sei.payload[7] !== 0x03) {
|
||
return null;
|
||
}
|
||
|
||
// return the user_data_type_structure and strip the trailing
|
||
// marker bits
|
||
return sei.payload.subarray(8, sei.payload.length - 1);
|
||
};
|
||
|
||
// see CEA-708-D, section 4.4
|
||
var parseCaptionPackets = function(pts, userData) {
|
||
var results = [], i, count, offset, data;
|
||
|
||
// if this is just filler, return immediately
|
||
if (!(userData[0] & 0x40)) {
|
||
return results;
|
||
}
|
||
|
||
// parse out the cc_data_1 and cc_data_2 fields
|
||
count = userData[0] & 0x1f;
|
||
for (i = 0; i < count; i++) {
|
||
offset = i * 3;
|
||
data = {
|
||
type: userData[offset + 2] & 0x03,
|
||
pts: pts
|
||
};
|
||
|
||
// capture cc data when cc_valid is 1
|
||
if (userData[offset + 2] & 0x04) {
|
||
data.ccData = (userData[offset + 3] << 8) | userData[offset + 4];
|
||
results.push(data);
|
||
}
|
||
}
|
||
return results;
|
||
};
|
||
|
||
var CaptionStream = function() {
|
||
|
||
CaptionStream.prototype.init.call(this);
|
||
|
||
this.captionPackets_ = [];
|
||
|
||
this.ccStreams_ = [
|
||
new Cea608Stream(0, 0), // eslint-disable-line no-use-before-define
|
||
new Cea608Stream(0, 1), // eslint-disable-line no-use-before-define
|
||
new Cea608Stream(1, 0), // eslint-disable-line no-use-before-define
|
||
new Cea608Stream(1, 1) // eslint-disable-line no-use-before-define
|
||
];
|
||
|
||
this.reset();
|
||
|
||
// forward data and done events from CCs to this CaptionStream
|
||
this.ccStreams_.forEach(function(cc) {
|
||
cc.on('data', this.trigger.bind(this, 'data'));
|
||
cc.on('done', this.trigger.bind(this, 'done'));
|
||
}, this);
|
||
|
||
};
|
||
|
||
CaptionStream.prototype = new Stream();
|
||
CaptionStream.prototype.push = function(event) {
|
||
var sei, userData;
|
||
|
||
// only examine SEI NALs
|
||
if (event.nalUnitType !== 'sei_rbsp') {
|
||
return;
|
||
}
|
||
|
||
// parse the sei
|
||
sei = parseSei(event.escapedRBSP);
|
||
|
||
// ignore everything but user_data_registered_itu_t_t35
|
||
if (sei.payloadType !== USER_DATA_REGISTERED_ITU_T_T35) {
|
||
return;
|
||
}
|
||
|
||
// parse out the user data payload
|
||
userData = parseUserData(sei);
|
||
|
||
// ignore unrecognized userData
|
||
if (!userData) {
|
||
return;
|
||
}
|
||
|
||
// Sometimes, the same segment # will be downloaded twice. To stop the
|
||
// caption data from being processed twice, we track the latest dts we've
|
||
// received and ignore everything with a dts before that. However, since
|
||
// data for a specific dts can be split across 2 packets on either side of
|
||
// a segment boundary, we need to make sure we *don't* ignore the second
|
||
// dts packet we receive that has dts === this.latestDts_. And thus, the
|
||
// ignoreNextEqualDts_ flag was born.
|
||
if (event.dts < this.latestDts_) {
|
||
// We've started getting older data, so set the flag.
|
||
this.ignoreNextEqualDts_ = true;
|
||
return;
|
||
} else if ((event.dts === this.latestDts_) && (this.ignoreNextEqualDts_)) {
|
||
// We've received the last duplicate packet, time to start processing again
|
||
this.ignoreNextEqualDts_ = false;
|
||
return;
|
||
}
|
||
|
||
// parse out CC data packets and save them for later
|
||
this.captionPackets_ = this.captionPackets_.concat(parseCaptionPackets(event.pts, userData));
|
||
this.latestDts_ = event.dts;
|
||
};
|
||
|
||
CaptionStream.prototype.flush = function() {
|
||
// make sure we actually parsed captions before proceeding
|
||
if (!this.captionPackets_.length) {
|
||
this.ccStreams_.forEach(function(cc) {
|
||
cc.flush();
|
||
}, this);
|
||
return;
|
||
}
|
||
|
||
// In Chrome, the Array#sort function is not stable so add a
|
||
// presortIndex that we can use to ensure we get a stable-sort
|
||
this.captionPackets_.forEach(function(elem, idx) {
|
||
elem.presortIndex = idx;
|
||
});
|
||
|
||
// sort caption byte-pairs based on their PTS values
|
||
this.captionPackets_.sort(function(a, b) {
|
||
if (a.pts === b.pts) {
|
||
return a.presortIndex - b.presortIndex;
|
||
}
|
||
return a.pts - b.pts;
|
||
});
|
||
|
||
this.captionPackets_.forEach(function(packet) {
|
||
if (packet.type < 2) {
|
||
// Dispatch packet to the right Cea608Stream
|
||
this.dispatchCea608Packet(packet);
|
||
}
|
||
// this is where an 'else' would go for a dispatching packets
|
||
// to a theoretical Cea708Stream that handles SERVICEn data
|
||
}, this);
|
||
|
||
this.captionPackets_.length = 0;
|
||
this.ccStreams_.forEach(function(cc) {
|
||
cc.flush();
|
||
}, this);
|
||
return;
|
||
};
|
||
|
||
CaptionStream.prototype.reset = function() {
|
||
this.latestDts_ = null;
|
||
this.ignoreNextEqualDts_ = false;
|
||
this.activeCea608Channel_ = [null, null];
|
||
this.ccStreams_.forEach(function(ccStream) {
|
||
ccStream.reset();
|
||
});
|
||
};
|
||
|
||
CaptionStream.prototype.dispatchCea608Packet = function(packet) {
|
||
// NOTE: packet.type is the CEA608 field
|
||
if (this.setsChannel1Active(packet)) {
|
||
this.activeCea608Channel_[packet.type] = 0;
|
||
} else if (this.setsChannel2Active(packet)) {
|
||
this.activeCea608Channel_[packet.type] = 1;
|
||
}
|
||
if (this.activeCea608Channel_[packet.type] === null) {
|
||
// If we haven't received anything to set the active channel, discard the
|
||
// data; we don't want jumbled captions
|
||
return;
|
||
}
|
||
this.ccStreams_[(packet.type << 1) + this.activeCea608Channel_[packet.type]].push(packet);
|
||
};
|
||
|
||
CaptionStream.prototype.setsChannel1Active = function(packet) {
|
||
return ((packet.ccData & 0x7800) === 0x1000);
|
||
};
|
||
CaptionStream.prototype.setsChannel2Active = function(packet) {
|
||
return ((packet.ccData & 0x7800) === 0x1800);
|
||
};
|
||
|
||
// ----------------------
|
||
// Session to Application
|
||
// ----------------------
|
||
|
||
var CHARACTER_TRANSLATION = {
|
||
0x2a: 0xe1, // á
|
||
0x5c: 0xe9, // é
|
||
0x5e: 0xed, // í
|
||
0x5f: 0xf3, // ó
|
||
0x60: 0xfa, // ú
|
||
0x7b: 0xe7, // ç
|
||
0x7c: 0xf7, // ÷
|
||
0x7d: 0xd1, // Ñ
|
||
0x7e: 0xf1, // ñ
|
||
0x7f: 0x2588, // █
|
||
0x0130: 0xae, // ®
|
||
0x0131: 0xb0, // °
|
||
0x0132: 0xbd, // ½
|
||
0x0133: 0xbf, // ¿
|
||
0x0134: 0x2122, // ™
|
||
0x0135: 0xa2, // ¢
|
||
0x0136: 0xa3, // £
|
||
0x0137: 0x266a, // ♪
|
||
0x0138: 0xe0, // à
|
||
0x0139: 0xa0, //
|
||
0x013a: 0xe8, // è
|
||
0x013b: 0xe2, // â
|
||
0x013c: 0xea, // ê
|
||
0x013d: 0xee, // î
|
||
0x013e: 0xf4, // ô
|
||
0x013f: 0xfb, // û
|
||
0x0220: 0xc1, // Á
|
||
0x0221: 0xc9, // É
|
||
0x0222: 0xd3, // Ó
|
||
0x0223: 0xda, // Ú
|
||
0x0224: 0xdc, // Ü
|
||
0x0225: 0xfc, // ü
|
||
0x0226: 0x2018, // ‘
|
||
0x0227: 0xa1, // ¡
|
||
0x0228: 0x2a, // *
|
||
0x0229: 0x27, // '
|
||
0x022a: 0x2014, // —
|
||
0x022b: 0xa9, // ©
|
||
0x022c: 0x2120, // ℠
|
||
0x022d: 0x2022, // •
|
||
0x022e: 0x201c, // “
|
||
0x022f: 0x201d, // ”
|
||
0x0230: 0xc0, // À
|
||
0x0231: 0xc2, // Â
|
||
0x0232: 0xc7, // Ç
|
||
0x0233: 0xc8, // È
|
||
0x0234: 0xca, // Ê
|
||
0x0235: 0xcb, // Ë
|
||
0x0236: 0xeb, // ë
|
||
0x0237: 0xce, // Î
|
||
0x0238: 0xcf, // Ï
|
||
0x0239: 0xef, // ï
|
||
0x023a: 0xd4, // Ô
|
||
0x023b: 0xd9, // Ù
|
||
0x023c: 0xf9, // ù
|
||
0x023d: 0xdb, // Û
|
||
0x023e: 0xab, // «
|
||
0x023f: 0xbb, // »
|
||
0x0320: 0xc3, // Ã
|
||
0x0321: 0xe3, // ã
|
||
0x0322: 0xcd, // Í
|
||
0x0323: 0xcc, // Ì
|
||
0x0324: 0xec, // ì
|
||
0x0325: 0xd2, // Ò
|
||
0x0326: 0xf2, // ò
|
||
0x0327: 0xd5, // Õ
|
||
0x0328: 0xf5, // õ
|
||
0x0329: 0x7b, // {
|
||
0x032a: 0x7d, // }
|
||
0x032b: 0x5c, // \
|
||
0x032c: 0x5e, // ^
|
||
0x032d: 0x5f, // _
|
||
0x032e: 0x7c, // |
|
||
0x032f: 0x7e, // ~
|
||
0x0330: 0xc4, // Ä
|
||
0x0331: 0xe4, // ä
|
||
0x0332: 0xd6, // Ö
|
||
0x0333: 0xf6, // ö
|
||
0x0334: 0xdf, // ß
|
||
0x0335: 0xa5, // ¥
|
||
0x0336: 0xa4, // ¤
|
||
0x0337: 0x2502, // │
|
||
0x0338: 0xc5, // Å
|
||
0x0339: 0xe5, // å
|
||
0x033a: 0xd8, // Ø
|
||
0x033b: 0xf8, // ø
|
||
0x033c: 0x250c, // ┌
|
||
0x033d: 0x2510, // ┐
|
||
0x033e: 0x2514, // └
|
||
0x033f: 0x2518 // ┘
|
||
};
|
||
|
||
var getCharFromCode = function(code) {
|
||
if (code === null) {
|
||
return '';
|
||
}
|
||
code = CHARACTER_TRANSLATION[code] || code;
|
||
return String.fromCharCode(code);
|
||
};
|
||
|
||
// the index of the last row in a CEA-608 display buffer
|
||
var BOTTOM_ROW = 14;
|
||
|
||
// This array is used for mapping PACs -> row #, since there's no way of
|
||
// getting it through bit logic.
|
||
var ROWS = [0x1100, 0x1120, 0x1200, 0x1220, 0x1500, 0x1520, 0x1600, 0x1620,
|
||
0x1700, 0x1720, 0x1000, 0x1300, 0x1320, 0x1400, 0x1420];
|
||
|
||
// CEA-608 captions are rendered onto a 34x15 matrix of character
|
||
// cells. The "bottom" row is the last element in the outer array.
|
||
var createDisplayBuffer = function() {
|
||
var result = [], i = BOTTOM_ROW + 1;
|
||
while (i--) {
|
||
result.push('');
|
||
}
|
||
return result;
|
||
};
|
||
|
||
var Cea608Stream = function(field, dataChannel) {
|
||
Cea608Stream.prototype.init.call(this);
|
||
|
||
this.field_ = field || 0;
|
||
this.dataChannel_ = dataChannel || 0;
|
||
|
||
this.name_ = 'CC' + (((this.field_ << 1) | this.dataChannel_) + 1);
|
||
|
||
this.setConstants();
|
||
this.reset();
|
||
|
||
this.push = function(packet) {
|
||
var data, swap, char0, char1, text;
|
||
// remove the parity bits
|
||
data = packet.ccData & 0x7f7f;
|
||
|
||
// ignore duplicate control codes; the spec demands they're sent twice
|
||
if (data === this.lastControlCode_) {
|
||
this.lastControlCode_ = null;
|
||
return;
|
||
}
|
||
|
||
// Store control codes
|
||
if ((data & 0xf000) === 0x1000) {
|
||
this.lastControlCode_ = data;
|
||
} else if (data !== this.PADDING_) {
|
||
this.lastControlCode_ = null;
|
||
}
|
||
|
||
char0 = data >>> 8;
|
||
char1 = data & 0xff;
|
||
|
||
if (data === this.PADDING_) {
|
||
return;
|
||
|
||
} else if (data === this.RESUME_CAPTION_LOADING_) {
|
||
this.mode_ = 'popOn';
|
||
|
||
} else if (data === this.END_OF_CAPTION_) {
|
||
this.clearFormatting(packet.pts);
|
||
// if a caption was being displayed, it's gone now
|
||
this.flushDisplayed(packet.pts);
|
||
|
||
// flip memory
|
||
swap = this.displayed_;
|
||
this.displayed_ = this.nonDisplayed_;
|
||
this.nonDisplayed_ = swap;
|
||
|
||
// start measuring the time to display the caption
|
||
this.startPts_ = packet.pts;
|
||
|
||
} else if (data === this.ROLL_UP_2_ROWS_) {
|
||
this.topRow_ = BOTTOM_ROW - 1;
|
||
this.mode_ = 'rollUp';
|
||
} else if (data === this.ROLL_UP_3_ROWS_) {
|
||
this.topRow_ = BOTTOM_ROW - 2;
|
||
this.mode_ = 'rollUp';
|
||
} else if (data === this.ROLL_UP_4_ROWS_) {
|
||
this.topRow_ = BOTTOM_ROW - 3;
|
||
this.mode_ = 'rollUp';
|
||
} else if (data === this.CARRIAGE_RETURN_) {
|
||
this.clearFormatting(packet.pts);
|
||
this.flushDisplayed(packet.pts);
|
||
this.shiftRowsUp_();
|
||
this.startPts_ = packet.pts;
|
||
|
||
} else if (data === this.BACKSPACE_) {
|
||
if (this.mode_ === 'popOn') {
|
||
this.nonDisplayed_[BOTTOM_ROW] = this.nonDisplayed_[BOTTOM_ROW].slice(0, -1);
|
||
} else {
|
||
this.displayed_[BOTTOM_ROW] = this.displayed_[BOTTOM_ROW].slice(0, -1);
|
||
}
|
||
} else if (data === this.ERASE_DISPLAYED_MEMORY_) {
|
||
this.flushDisplayed(packet.pts);
|
||
this.displayed_ = createDisplayBuffer();
|
||
} else if (data === this.ERASE_NON_DISPLAYED_MEMORY_) {
|
||
this.nonDisplayed_ = createDisplayBuffer();
|
||
|
||
} else if (data === this.RESUME_DIRECT_CAPTIONING_) {
|
||
this.mode_ = 'paintOn';
|
||
|
||
// Append special characters to caption text
|
||
} else if (this.isSpecialCharacter(char0, char1)) {
|
||
// Bitmask char0 so that we can apply character transformations
|
||
// regardless of field and data channel.
|
||
// Then byte-shift to the left and OR with char1 so we can pass the
|
||
// entire character code to `getCharFromCode`.
|
||
char0 = (char0 & 0x03) << 8;
|
||
text = getCharFromCode(char0 | char1);
|
||
this[this.mode_](packet.pts, text);
|
||
this.column_++;
|
||
|
||
// Append extended characters to caption text
|
||
} else if (this.isExtCharacter(char0, char1)) {
|
||
// Extended characters always follow their "non-extended" equivalents.
|
||
// IE if a "è" is desired, you'll always receive "eè"; non-compliant
|
||
// decoders are supposed to drop the "è", while compliant decoders
|
||
// backspace the "e" and insert "è".
|
||
|
||
// Delete the previous character
|
||
if (this.mode_ === 'popOn') {
|
||
this.nonDisplayed_[this.row_] = this.nonDisplayed_[this.row_].slice(0, -1);
|
||
} else {
|
||
this.displayed_[BOTTOM_ROW] = this.displayed_[BOTTOM_ROW].slice(0, -1);
|
||
}
|
||
|
||
// Bitmask char0 so that we can apply character transformations
|
||
// regardless of field and data channel.
|
||
// Then byte-shift to the left and OR with char1 so we can pass the
|
||
// entire character code to `getCharFromCode`.
|
||
char0 = (char0 & 0x03) << 8;
|
||
text = getCharFromCode(char0 | char1);
|
||
this[this.mode_](packet.pts, text);
|
||
this.column_++;
|
||
|
||
// Process mid-row codes
|
||
} else if (this.isMidRowCode(char0, char1)) {
|
||
// Attributes are not additive, so clear all formatting
|
||
this.clearFormatting(packet.pts);
|
||
|
||
// According to the standard, mid-row codes
|
||
// should be replaced with spaces, so add one now
|
||
this[this.mode_](packet.pts, ' ');
|
||
this.column_++;
|
||
|
||
if ((char1 & 0xe) === 0xe) {
|
||
this.addFormatting(packet.pts, ['i']);
|
||
}
|
||
|
||
if ((char1 & 0x1) === 0x1) {
|
||
this.addFormatting(packet.pts, ['u']);
|
||
}
|
||
|
||
// Detect offset control codes and adjust cursor
|
||
} else if (this.isOffsetControlCode(char0, char1)) {
|
||
// Cursor position is set by indent PAC (see below) in 4-column
|
||
// increments, with an additional offset code of 1-3 to reach any
|
||
// of the 32 columns specified by CEA-608. So all we need to do
|
||
// here is increment the column cursor by the given offset.
|
||
this.column_ += (char1 & 0x03);
|
||
|
||
// Detect PACs (Preamble Address Codes)
|
||
} else if (this.isPAC(char0, char1)) {
|
||
|
||
// There's no logic for PAC -> row mapping, so we have to just
|
||
// find the row code in an array and use its index :(
|
||
var row = ROWS.indexOf(data & 0x1f20);
|
||
|
||
if (row !== this.row_) {
|
||
// formatting is only persistent for current row
|
||
this.clearFormatting(packet.pts);
|
||
this.row_ = row;
|
||
}
|
||
// All PACs can apply underline, so detect and apply
|
||
// (All odd-numbered second bytes set underline)
|
||
if ((char1 & 0x1) && (this.formatting_.indexOf('u') === -1)) {
|
||
this.addFormatting(packet.pts, ['u']);
|
||
}
|
||
|
||
if ((data & 0x10) === 0x10) {
|
||
// We've got an indent level code. Each successive even number
|
||
// increments the column cursor by 4, so we can get the desired
|
||
// column position by bit-shifting to the right (to get n/2)
|
||
// and multiplying by 4.
|
||
this.column_ = ((data & 0xe) >> 1) * 4;
|
||
}
|
||
|
||
if (this.isColorPAC(char1)) {
|
||
// it's a color code, though we only support white, which
|
||
// can be either normal or italicized. white italics can be
|
||
// either 0x4e or 0x6e depending on the row, so we just
|
||
// bitwise-and with 0xe to see if italics should be turned on
|
||
if ((char1 & 0xe) === 0xe) {
|
||
this.addFormatting(packet.pts, ['i']);
|
||
}
|
||
}
|
||
|
||
// We have a normal character in char0, and possibly one in char1
|
||
} else if (this.isNormalChar(char0)) {
|
||
if (char1 === 0x00) {
|
||
char1 = null;
|
||
}
|
||
text = getCharFromCode(char0);
|
||
text += getCharFromCode(char1);
|
||
this[this.mode_](packet.pts, text);
|
||
this.column_ += text.length;
|
||
|
||
} // finish data processing
|
||
|
||
};
|
||
};
|
||
Cea608Stream.prototype = new Stream();
|
||
// Trigger a cue point that captures the current state of the
|
||
// display buffer
|
||
Cea608Stream.prototype.flushDisplayed = function(pts) {
|
||
var content = this.displayed_
|
||
// remove spaces from the start and end of the string
|
||
.map(function(row) {
|
||
return row.trim();
|
||
})
|
||
// combine all text rows to display in one cue
|
||
.join('\n')
|
||
// and remove blank rows from the start and end, but not the middle
|
||
.replace(/^\n+|\n+$/g, '');
|
||
|
||
if (content.length) {
|
||
this.trigger('data', {
|
||
startPts: this.startPts_,
|
||
endPts: pts,
|
||
text: content,
|
||
stream: this.name_
|
||
});
|
||
}
|
||
};
|
||
|
||
/**
|
||
* Zero out the data, used for startup and on seek
|
||
*/
|
||
Cea608Stream.prototype.reset = function() {
|
||
this.mode_ = 'popOn';
|
||
// When in roll-up mode, the index of the last row that will
|
||
// actually display captions. If a caption is shifted to a row
|
||
// with a lower index than this, it is cleared from the display
|
||
// buffer
|
||
this.topRow_ = 0;
|
||
this.startPts_ = 0;
|
||
this.displayed_ = createDisplayBuffer();
|
||
this.nonDisplayed_ = createDisplayBuffer();
|
||
this.lastControlCode_ = null;
|
||
|
||
// Track row and column for proper line-breaking and spacing
|
||
this.column_ = 0;
|
||
this.row_ = BOTTOM_ROW;
|
||
|
||
// This variable holds currently-applied formatting
|
||
this.formatting_ = [];
|
||
};
|
||
|
||
/**
|
||
* Sets up control code and related constants for this instance
|
||
*/
|
||
Cea608Stream.prototype.setConstants = function() {
|
||
// The following attributes have these uses:
|
||
// ext_ : char0 for mid-row codes, and the base for extended
|
||
// chars (ext_+0, ext_+1, and ext_+2 are char0s for
|
||
// extended codes)
|
||
// control_: char0 for control codes, except byte-shifted to the
|
||
// left so that we can do this.control_ | CONTROL_CODE
|
||
// offset_: char0 for tab offset codes
|
||
//
|
||
// It's also worth noting that control codes, and _only_ control codes,
|
||
// differ between field 1 and field2. Field 2 control codes are always
|
||
// their field 1 value plus 1. That's why there's the "| field" on the
|
||
// control value.
|
||
if (this.dataChannel_ === 0) {
|
||
this.BASE_ = 0x10;
|
||
this.EXT_ = 0x11;
|
||
this.CONTROL_ = (0x14 | this.field_) << 8;
|
||
this.OFFSET_ = 0x17;
|
||
} else if (this.dataChannel_ === 1) {
|
||
this.BASE_ = 0x18;
|
||
this.EXT_ = 0x19;
|
||
this.CONTROL_ = (0x1c | this.field_) << 8;
|
||
this.OFFSET_ = 0x1f;
|
||
}
|
||
|
||
// Constants for the LSByte command codes recognized by Cea608Stream. This
|
||
// list is not exhaustive. For a more comprehensive listing and semantics see
|
||
// http://www.gpo.gov/fdsys/pkg/CFR-2010-title47-vol1/pdf/CFR-2010-title47-vol1-sec15-119.pdf
|
||
// Padding
|
||
this.PADDING_ = 0x0000;
|
||
// Pop-on Mode
|
||
this.RESUME_CAPTION_LOADING_ = this.CONTROL_ | 0x20;
|
||
this.END_OF_CAPTION_ = this.CONTROL_ | 0x2f;
|
||
// Roll-up Mode
|
||
this.ROLL_UP_2_ROWS_ = this.CONTROL_ | 0x25;
|
||
this.ROLL_UP_3_ROWS_ = this.CONTROL_ | 0x26;
|
||
this.ROLL_UP_4_ROWS_ = this.CONTROL_ | 0x27;
|
||
this.CARRIAGE_RETURN_ = this.CONTROL_ | 0x2d;
|
||
// paint-on mode (not supported)
|
||
this.RESUME_DIRECT_CAPTIONING_ = this.CONTROL_ | 0x29;
|
||
// Erasure
|
||
this.BACKSPACE_ = this.CONTROL_ | 0x21;
|
||
this.ERASE_DISPLAYED_MEMORY_ = this.CONTROL_ | 0x2c;
|
||
this.ERASE_NON_DISPLAYED_MEMORY_ = this.CONTROL_ | 0x2e;
|
||
};
|
||
|
||
/**
|
||
* Detects if the 2-byte packet data is a special character
|
||
*
|
||
* Special characters have a second byte in the range 0x30 to 0x3f,
|
||
* with the first byte being 0x11 (for data channel 1) or 0x19 (for
|
||
* data channel 2).
|
||
*
|
||
* @param {Integer} char0 The first byte
|
||
* @param {Integer} char1 The second byte
|
||
* @return {Boolean} Whether the 2 bytes are an special character
|
||
*/
|
||
Cea608Stream.prototype.isSpecialCharacter = function(char0, char1) {
|
||
return (char0 === this.EXT_ && char1 >= 0x30 && char1 <= 0x3f);
|
||
};
|
||
|
||
/**
|
||
* Detects if the 2-byte packet data is an extended character
|
||
*
|
||
* Extended characters have a second byte in the range 0x20 to 0x3f,
|
||
* with the first byte being 0x12 or 0x13 (for data channel 1) or
|
||
* 0x1a or 0x1b (for data channel 2).
|
||
*
|
||
* @param {Integer} char0 The first byte
|
||
* @param {Integer} char1 The second byte
|
||
* @return {Boolean} Whether the 2 bytes are an extended character
|
||
*/
|
||
Cea608Stream.prototype.isExtCharacter = function(char0, char1) {
|
||
return ((char0 === (this.EXT_ + 1) || char0 === (this.EXT_ + 2)) &&
|
||
(char1 >= 0x20 && char1 <= 0x3f));
|
||
};
|
||
|
||
/**
|
||
* Detects if the 2-byte packet is a mid-row code
|
||
*
|
||
* Mid-row codes have a second byte in the range 0x20 to 0x2f, with
|
||
* the first byte being 0x11 (for data channel 1) or 0x19 (for data
|
||
* channel 2).
|
||
*
|
||
* @param {Integer} char0 The first byte
|
||
* @param {Integer} char1 The second byte
|
||
* @return {Boolean} Whether the 2 bytes are a mid-row code
|
||
*/
|
||
Cea608Stream.prototype.isMidRowCode = function(char0, char1) {
|
||
return (char0 === this.EXT_ && (char1 >= 0x20 && char1 <= 0x2f));
|
||
};
|
||
|
||
/**
|
||
* Detects if the 2-byte packet is an offset control code
|
||
*
|
||
* Offset control codes have a second byte in the range 0x21 to 0x23,
|
||
* with the first byte being 0x17 (for data channel 1) or 0x1f (for
|
||
* data channel 2).
|
||
*
|
||
* @param {Integer} char0 The first byte
|
||
* @param {Integer} char1 The second byte
|
||
* @return {Boolean} Whether the 2 bytes are an offset control code
|
||
*/
|
||
Cea608Stream.prototype.isOffsetControlCode = function(char0, char1) {
|
||
return (char0 === this.OFFSET_ && (char1 >= 0x21 && char1 <= 0x23));
|
||
};
|
||
|
||
/**
|
||
* Detects if the 2-byte packet is a Preamble Address Code
|
||
*
|
||
* PACs have a first byte in the range 0x10 to 0x17 (for data channel 1)
|
||
* or 0x18 to 0x1f (for data channel 2), with the second byte in the
|
||
* range 0x40 to 0x7f.
|
||
*
|
||
* @param {Integer} char0 The first byte
|
||
* @param {Integer} char1 The second byte
|
||
* @return {Boolean} Whether the 2 bytes are a PAC
|
||
*/
|
||
Cea608Stream.prototype.isPAC = function(char0, char1) {
|
||
return (char0 >= this.BASE_ && char0 < (this.BASE_ + 8) &&
|
||
(char1 >= 0x40 && char1 <= 0x7f));
|
||
};
|
||
|
||
/**
|
||
* Detects if a packet's second byte is in the range of a PAC color code
|
||
*
|
||
* PAC color codes have the second byte be in the range 0x40 to 0x4f, or
|
||
* 0x60 to 0x6f.
|
||
*
|
||
* @param {Integer} char1 The second byte
|
||
* @return {Boolean} Whether the byte is a color PAC
|
||
*/
|
||
Cea608Stream.prototype.isColorPAC = function(char1) {
|
||
return ((char1 >= 0x40 && char1 <= 0x4f) || (char1 >= 0x60 && char1 <= 0x7f));
|
||
};
|
||
|
||
/**
|
||
* Detects if a single byte is in the range of a normal character
|
||
*
|
||
* Normal text bytes are in the range 0x20 to 0x7f.
|
||
*
|
||
* @param {Integer} char The byte
|
||
* @return {Boolean} Whether the byte is a normal character
|
||
*/
|
||
Cea608Stream.prototype.isNormalChar = function(char) {
|
||
return (char >= 0x20 && char <= 0x7f);
|
||
};
|
||
|
||
// Adds the opening HTML tag for the passed character to the caption text,
|
||
// and keeps track of it for later closing
|
||
Cea608Stream.prototype.addFormatting = function(pts, format) {
|
||
this.formatting_ = this.formatting_.concat(format);
|
||
var text = format.reduce(function(text, format) {
|
||
return text + '<' + format + '>';
|
||
}, '');
|
||
this[this.mode_](pts, text);
|
||
};
|
||
|
||
// Adds HTML closing tags for current formatting to caption text and
|
||
// clears remembered formatting
|
||
Cea608Stream.prototype.clearFormatting = function(pts) {
|
||
if (!this.formatting_.length) {
|
||
return;
|
||
}
|
||
var text = this.formatting_.reverse().reduce(function(text, format) {
|
||
return text + '</' + format + '>';
|
||
}, '');
|
||
this.formatting_ = [];
|
||
this[this.mode_](pts, text);
|
||
};
|
||
|
||
// Mode Implementations
|
||
Cea608Stream.prototype.popOn = function(pts, text) {
|
||
var baseRow = this.nonDisplayed_[this.row_];
|
||
|
||
// buffer characters
|
||
baseRow += text;
|
||
this.nonDisplayed_[this.row_] = baseRow;
|
||
};
|
||
|
||
Cea608Stream.prototype.rollUp = function(pts, text) {
|
||
var baseRow = this.displayed_[BOTTOM_ROW];
|
||
|
||
baseRow += text;
|
||
this.displayed_[BOTTOM_ROW] = baseRow;
|
||
|
||
};
|
||
|
||
Cea608Stream.prototype.shiftRowsUp_ = function() {
|
||
var i;
|
||
// clear out inactive rows
|
||
for (i = 0; i < this.topRow_; i++) {
|
||
this.displayed_[i] = '';
|
||
}
|
||
// shift displayed rows up
|
||
for (i = this.topRow_; i < BOTTOM_ROW; i++) {
|
||
this.displayed_[i] = this.displayed_[i + 1];
|
||
}
|
||
// clear out the bottom row
|
||
this.displayed_[BOTTOM_ROW] = '';
|
||
};
|
||
|
||
// paintOn mode is not implemented
|
||
Cea608Stream.prototype.paintOn = function() {};
|
||
|
||
// exports
|
||
module.exports = {
|
||
CaptionStream: CaptionStream,
|
||
Cea608Stream: Cea608Stream
|
||
};
|
||
|
||
},{"../utils/stream":33}],18:[function(require,module,exports){
|
||
module.exports = require('./m2ts');
|
||
|
||
},{"./m2ts":19}],19:[function(require,module,exports){
|
||
/**
|
||
* mux.js
|
||
*
|
||
* Copyright (c) 2015 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* A stream-based mp2t to mp4 converter. This utility can be used to
|
||
* deliver mp4s to a SourceBuffer on platforms that support native
|
||
* Media Source Extensions.
|
||
*/
|
||
'use strict';
|
||
var Stream = require('../utils/stream.js'),
|
||
CaptionStream = require('./caption-stream'),
|
||
StreamTypes = require('./stream-types'),
|
||
TimestampRolloverStream = require('./timestamp-rollover-stream').TimestampRolloverStream;
|
||
|
||
var m2tsStreamTypes = require('./stream-types.js');
|
||
|
||
// object types
|
||
var TransportPacketStream, TransportParseStream, ElementaryStream;
|
||
|
||
// constants
|
||
var
|
||
MP2T_PACKET_LENGTH = 188, // bytes
|
||
SYNC_BYTE = 0x47;
|
||
|
||
/**
|
||
* Splits an incoming stream of binary data into MPEG-2 Transport
|
||
* Stream packets.
|
||
*/
|
||
TransportPacketStream = function() {
|
||
var
|
||
buffer = new Uint8Array(MP2T_PACKET_LENGTH),
|
||
bytesInBuffer = 0;
|
||
|
||
TransportPacketStream.prototype.init.call(this);
|
||
|
||
// Deliver new bytes to the stream.
|
||
|
||
this.push = function(bytes) {
|
||
var
|
||
startIndex = 0,
|
||
endIndex = MP2T_PACKET_LENGTH,
|
||
everything;
|
||
|
||
// If there are bytes remaining from the last segment, prepend them to the
|
||
// bytes that were pushed in
|
||
if (bytesInBuffer) {
|
||
everything = new Uint8Array(bytes.byteLength + bytesInBuffer);
|
||
everything.set(buffer.subarray(0, bytesInBuffer));
|
||
everything.set(bytes, bytesInBuffer);
|
||
bytesInBuffer = 0;
|
||
} else {
|
||
everything = bytes;
|
||
}
|
||
|
||
// While we have enough data for a packet
|
||
while (endIndex < everything.byteLength) {
|
||
// Look for a pair of start and end sync bytes in the data..
|
||
if (everything[startIndex] === SYNC_BYTE && everything[endIndex] === SYNC_BYTE) {
|
||
// We found a packet so emit it and jump one whole packet forward in
|
||
// the stream
|
||
this.trigger('data', everything.subarray(startIndex, endIndex));
|
||
startIndex += MP2T_PACKET_LENGTH;
|
||
endIndex += MP2T_PACKET_LENGTH;
|
||
continue;
|
||
}
|
||
// If we get here, we have somehow become de-synchronized and we need to step
|
||
// forward one byte at a time until we find a pair of sync bytes that denote
|
||
// a packet
|
||
startIndex++;
|
||
endIndex++;
|
||
}
|
||
|
||
// If there was some data left over at the end of the segment that couldn't
|
||
// possibly be a whole packet, keep it because it might be the start of a packet
|
||
// that continues in the next segment
|
||
if (startIndex < everything.byteLength) {
|
||
buffer.set(everything.subarray(startIndex), 0);
|
||
bytesInBuffer = everything.byteLength - startIndex;
|
||
}
|
||
};
|
||
|
||
this.flush = function() {
|
||
// If the buffer contains a whole packet when we are being flushed, emit it
|
||
// and empty the buffer. Otherwise hold onto the data because it may be
|
||
// important for decoding the next segment
|
||
if (bytesInBuffer === MP2T_PACKET_LENGTH && buffer[0] === SYNC_BYTE) {
|
||
this.trigger('data', buffer);
|
||
bytesInBuffer = 0;
|
||
}
|
||
this.trigger('done');
|
||
};
|
||
};
|
||
TransportPacketStream.prototype = new Stream();
|
||
|
||
/**
|
||
* Accepts an MP2T TransportPacketStream and emits data events with parsed
|
||
* forms of the individual transport stream packets.
|
||
*/
|
||
TransportParseStream = function() {
|
||
var parsePsi, parsePat, parsePmt, self;
|
||
TransportParseStream.prototype.init.call(this);
|
||
self = this;
|
||
|
||
this.packetsWaitingForPmt = [];
|
||
this.programMapTable = undefined;
|
||
|
||
parsePsi = function(payload, psi) {
|
||
var offset = 0;
|
||
|
||
// PSI packets may be split into multiple sections and those
|
||
// sections may be split into multiple packets. If a PSI
|
||
// section starts in this packet, the payload_unit_start_indicator
|
||
// will be true and the first byte of the payload will indicate
|
||
// the offset from the current position to the start of the
|
||
// section.
|
||
if (psi.payloadUnitStartIndicator) {
|
||
offset += payload[offset] + 1;
|
||
}
|
||
|
||
if (psi.type === 'pat') {
|
||
parsePat(payload.subarray(offset), psi);
|
||
} else {
|
||
parsePmt(payload.subarray(offset), psi);
|
||
}
|
||
};
|
||
|
||
parsePat = function(payload, pat) {
|
||
pat.section_number = payload[7]; // eslint-disable-line camelcase
|
||
pat.last_section_number = payload[8]; // eslint-disable-line camelcase
|
||
|
||
// skip the PSI header and parse the first PMT entry
|
||
self.pmtPid = (payload[10] & 0x1F) << 8 | payload[11];
|
||
pat.pmtPid = self.pmtPid;
|
||
};
|
||
|
||
/**
|
||
* Parse out the relevant fields of a Program Map Table (PMT).
|
||
* @param payload {Uint8Array} the PMT-specific portion of an MP2T
|
||
* packet. The first byte in this array should be the table_id
|
||
* field.
|
||
* @param pmt {object} the object that should be decorated with
|
||
* fields parsed from the PMT.
|
||
*/
|
||
parsePmt = function(payload, pmt) {
|
||
var sectionLength, tableEnd, programInfoLength, offset;
|
||
|
||
// PMTs can be sent ahead of the time when they should actually
|
||
// take effect. We don't believe this should ever be the case
|
||
// for HLS but we'll ignore "forward" PMT declarations if we see
|
||
// them. Future PMT declarations have the current_next_indicator
|
||
// set to zero.
|
||
if (!(payload[5] & 0x01)) {
|
||
return;
|
||
}
|
||
|
||
// overwrite any existing program map table
|
||
self.programMapTable = {
|
||
video: null,
|
||
audio: null,
|
||
'timed-metadata': {}
|
||
};
|
||
|
||
// the mapping table ends at the end of the current section
|
||
sectionLength = (payload[1] & 0x0f) << 8 | payload[2];
|
||
tableEnd = 3 + sectionLength - 4;
|
||
|
||
// to determine where the table is, we have to figure out how
|
||
// long the program info descriptors are
|
||
programInfoLength = (payload[10] & 0x0f) << 8 | payload[11];
|
||
|
||
// advance the offset to the first entry in the mapping table
|
||
offset = 12 + programInfoLength;
|
||
while (offset < tableEnd) {
|
||
var streamType = payload[offset];
|
||
var pid = (payload[offset + 1] & 0x1F) << 8 | payload[offset + 2];
|
||
|
||
// only map a single elementary_pid for audio and video stream types
|
||
// TODO: should this be done for metadata too? for now maintain behavior of
|
||
// multiple metadata streams
|
||
if (streamType === StreamTypes.H264_STREAM_TYPE &&
|
||
self.programMapTable.video === null) {
|
||
self.programMapTable.video = pid;
|
||
} else if (streamType === StreamTypes.ADTS_STREAM_TYPE &&
|
||
self.programMapTable.audio === null) {
|
||
self.programMapTable.audio = pid;
|
||
} else if (streamType === StreamTypes.METADATA_STREAM_TYPE) {
|
||
// map pid to stream type for metadata streams
|
||
self.programMapTable['timed-metadata'][pid] = streamType;
|
||
}
|
||
|
||
// move to the next table entry
|
||
// skip past the elementary stream descriptors, if present
|
||
offset += ((payload[offset + 3] & 0x0F) << 8 | payload[offset + 4]) + 5;
|
||
}
|
||
|
||
// record the map on the packet as well
|
||
pmt.programMapTable = self.programMapTable;
|
||
};
|
||
|
||
/**
|
||
* Deliver a new MP2T packet to the stream.
|
||
*/
|
||
this.push = function(packet) {
|
||
var
|
||
result = {},
|
||
offset = 4;
|
||
|
||
result.payloadUnitStartIndicator = !!(packet[1] & 0x40);
|
||
|
||
// pid is a 13-bit field starting at the last bit of packet[1]
|
||
result.pid = packet[1] & 0x1f;
|
||
result.pid <<= 8;
|
||
result.pid |= packet[2];
|
||
|
||
// if an adaption field is present, its length is specified by the
|
||
// fifth byte of the TS packet header. The adaptation field is
|
||
// used to add stuffing to PES packets that don't fill a complete
|
||
// TS packet, and to specify some forms of timing and control data
|
||
// that we do not currently use.
|
||
if (((packet[3] & 0x30) >>> 4) > 0x01) {
|
||
offset += packet[offset] + 1;
|
||
}
|
||
|
||
// parse the rest of the packet based on the type
|
||
if (result.pid === 0) {
|
||
result.type = 'pat';
|
||
parsePsi(packet.subarray(offset), result);
|
||
this.trigger('data', result);
|
||
} else if (result.pid === this.pmtPid) {
|
||
result.type = 'pmt';
|
||
parsePsi(packet.subarray(offset), result);
|
||
this.trigger('data', result);
|
||
|
||
// if there are any packets waiting for a PMT to be found, process them now
|
||
while (this.packetsWaitingForPmt.length) {
|
||
this.processPes_.apply(this, this.packetsWaitingForPmt.shift());
|
||
}
|
||
} else if (this.programMapTable === undefined) {
|
||
// When we have not seen a PMT yet, defer further processing of
|
||
// PES packets until one has been parsed
|
||
this.packetsWaitingForPmt.push([packet, offset, result]);
|
||
} else {
|
||
this.processPes_(packet, offset, result);
|
||
}
|
||
};
|
||
|
||
this.processPes_ = function(packet, offset, result) {
|
||
// set the appropriate stream type
|
||
if (result.pid === this.programMapTable.video) {
|
||
result.streamType = StreamTypes.H264_STREAM_TYPE;
|
||
} else if (result.pid === this.programMapTable.audio) {
|
||
result.streamType = StreamTypes.ADTS_STREAM_TYPE;
|
||
} else {
|
||
// if not video or audio, it is timed-metadata or unknown
|
||
// if unknown, streamType will be undefined
|
||
result.streamType = this.programMapTable['timed-metadata'][result.pid];
|
||
}
|
||
|
||
result.type = 'pes';
|
||
result.data = packet.subarray(offset);
|
||
|
||
this.trigger('data', result);
|
||
};
|
||
|
||
};
|
||
TransportParseStream.prototype = new Stream();
|
||
TransportParseStream.STREAM_TYPES = {
|
||
h264: 0x1b,
|
||
adts: 0x0f
|
||
};
|
||
|
||
/**
|
||
* Reconsistutes program elementary stream (PES) packets from parsed
|
||
* transport stream packets. That is, if you pipe an
|
||
* mp2t.TransportParseStream into a mp2t.ElementaryStream, the output
|
||
* events will be events which capture the bytes for individual PES
|
||
* packets plus relevant metadata that has been extracted from the
|
||
* container.
|
||
*/
|
||
ElementaryStream = function() {
|
||
var
|
||
self = this,
|
||
// PES packet fragments
|
||
video = {
|
||
data: [],
|
||
size: 0
|
||
},
|
||
audio = {
|
||
data: [],
|
||
size: 0
|
||
},
|
||
timedMetadata = {
|
||
data: [],
|
||
size: 0
|
||
},
|
||
parsePes = function(payload, pes) {
|
||
var ptsDtsFlags;
|
||
|
||
// get the packet length, this will be 0 for video
|
||
pes.packetLength = 6 + ((payload[4] << 8) | payload[5]);
|
||
|
||
// find out if this packets starts a new keyframe
|
||
pes.dataAlignmentIndicator = (payload[6] & 0x04) !== 0;
|
||
// PES packets may be annotated with a PTS value, or a PTS value
|
||
// and a DTS value. Determine what combination of values is
|
||
// available to work with.
|
||
ptsDtsFlags = payload[7];
|
||
|
||
// PTS and DTS are normally stored as a 33-bit number. Javascript
|
||
// performs all bitwise operations on 32-bit integers but javascript
|
||
// supports a much greater range (52-bits) of integer using standard
|
||
// mathematical operations.
|
||
// We construct a 31-bit value using bitwise operators over the 31
|
||
// most significant bits and then multiply by 4 (equal to a left-shift
|
||
// of 2) before we add the final 2 least significant bits of the
|
||
// timestamp (equal to an OR.)
|
||
if (ptsDtsFlags & 0xC0) {
|
||
// the PTS and DTS are not written out directly. For information
|
||
// on how they are encoded, see
|
||
// http://dvd.sourceforge.net/dvdinfo/pes-hdr.html
|
||
pes.pts = (payload[9] & 0x0E) << 27 |
|
||
(payload[10] & 0xFF) << 20 |
|
||
(payload[11] & 0xFE) << 12 |
|
||
(payload[12] & 0xFF) << 5 |
|
||
(payload[13] & 0xFE) >>> 3;
|
||
pes.pts *= 4; // Left shift by 2
|
||
pes.pts += (payload[13] & 0x06) >>> 1; // OR by the two LSBs
|
||
pes.dts = pes.pts;
|
||
if (ptsDtsFlags & 0x40) {
|
||
pes.dts = (payload[14] & 0x0E) << 27 |
|
||
(payload[15] & 0xFF) << 20 |
|
||
(payload[16] & 0xFE) << 12 |
|
||
(payload[17] & 0xFF) << 5 |
|
||
(payload[18] & 0xFE) >>> 3;
|
||
pes.dts *= 4; // Left shift by 2
|
||
pes.dts += (payload[18] & 0x06) >>> 1; // OR by the two LSBs
|
||
}
|
||
}
|
||
// the data section starts immediately after the PES header.
|
||
// pes_header_data_length specifies the number of header bytes
|
||
// that follow the last byte of the field.
|
||
pes.data = payload.subarray(9 + payload[8]);
|
||
},
|
||
flushStream = function(stream, type, forceFlush) {
|
||
var
|
||
packetData = new Uint8Array(stream.size),
|
||
event = {
|
||
type: type
|
||
},
|
||
i = 0,
|
||
offset = 0,
|
||
packetFlushable = false,
|
||
fragment;
|
||
|
||
// do nothing if there is not enough buffered data for a complete
|
||
// PES header
|
||
if (!stream.data.length || stream.size < 9) {
|
||
return;
|
||
}
|
||
event.trackId = stream.data[0].pid;
|
||
|
||
// reassemble the packet
|
||
for (i = 0; i < stream.data.length; i++) {
|
||
fragment = stream.data[i];
|
||
|
||
packetData.set(fragment.data, offset);
|
||
offset += fragment.data.byteLength;
|
||
}
|
||
|
||
// parse assembled packet's PES header
|
||
parsePes(packetData, event);
|
||
|
||
// non-video PES packets MUST have a non-zero PES_packet_length
|
||
// check that there is enough stream data to fill the packet
|
||
packetFlushable = type === 'video' || event.packetLength <= stream.size;
|
||
|
||
// flush pending packets if the conditions are right
|
||
if (forceFlush || packetFlushable) {
|
||
stream.size = 0;
|
||
stream.data.length = 0;
|
||
}
|
||
|
||
// only emit packets that are complete. this is to avoid assembling
|
||
// incomplete PES packets due to poor segmentation
|
||
if (packetFlushable) {
|
||
self.trigger('data', event);
|
||
}
|
||
};
|
||
|
||
ElementaryStream.prototype.init.call(this);
|
||
|
||
this.push = function(data) {
|
||
({
|
||
pat: function() {
|
||
// we have to wait for the PMT to arrive as well before we
|
||
// have any meaningful metadata
|
||
},
|
||
pes: function() {
|
||
var stream, streamType;
|
||
|
||
switch (data.streamType) {
|
||
case StreamTypes.H264_STREAM_TYPE:
|
||
case m2tsStreamTypes.H264_STREAM_TYPE:
|
||
stream = video;
|
||
streamType = 'video';
|
||
break;
|
||
case StreamTypes.ADTS_STREAM_TYPE:
|
||
stream = audio;
|
||
streamType = 'audio';
|
||
break;
|
||
case StreamTypes.METADATA_STREAM_TYPE:
|
||
stream = timedMetadata;
|
||
streamType = 'timed-metadata';
|
||
break;
|
||
default:
|
||
// ignore unknown stream types
|
||
return;
|
||
}
|
||
|
||
// if a new packet is starting, we can flush the completed
|
||
// packet
|
||
if (data.payloadUnitStartIndicator) {
|
||
flushStream(stream, streamType, true);
|
||
}
|
||
|
||
// buffer this fragment until we are sure we've received the
|
||
// complete payload
|
||
stream.data.push(data);
|
||
stream.size += data.data.byteLength;
|
||
},
|
||
pmt: function() {
|
||
var
|
||
event = {
|
||
type: 'metadata',
|
||
tracks: []
|
||
},
|
||
programMapTable = data.programMapTable;
|
||
|
||
// translate audio and video streams to tracks
|
||
if (programMapTable.video !== null) {
|
||
event.tracks.push({
|
||
timelineStartInfo: {
|
||
baseMediaDecodeTime: 0
|
||
},
|
||
id: +programMapTable.video,
|
||
codec: 'avc',
|
||
type: 'video'
|
||
});
|
||
}
|
||
if (programMapTable.audio !== null) {
|
||
event.tracks.push({
|
||
timelineStartInfo: {
|
||
baseMediaDecodeTime: 0
|
||
},
|
||
id: +programMapTable.audio,
|
||
codec: 'adts',
|
||
type: 'audio'
|
||
});
|
||
}
|
||
|
||
self.trigger('data', event);
|
||
}
|
||
})[data.type]();
|
||
};
|
||
|
||
/**
|
||
* Flush any remaining input. Video PES packets may be of variable
|
||
* length. Normally, the start of a new video packet can trigger the
|
||
* finalization of the previous packet. That is not possible if no
|
||
* more video is forthcoming, however. In that case, some other
|
||
* mechanism (like the end of the file) has to be employed. When it is
|
||
* clear that no additional data is forthcoming, calling this method
|
||
* will flush the buffered packets.
|
||
*/
|
||
this.flush = function() {
|
||
// !!THIS ORDER IS IMPORTANT!!
|
||
// video first then audio
|
||
flushStream(video, 'video');
|
||
flushStream(audio, 'audio');
|
||
flushStream(timedMetadata, 'timed-metadata');
|
||
this.trigger('done');
|
||
};
|
||
};
|
||
ElementaryStream.prototype = new Stream();
|
||
|
||
var m2ts = {
|
||
PAT_PID: 0x0000,
|
||
MP2T_PACKET_LENGTH: MP2T_PACKET_LENGTH,
|
||
TransportPacketStream: TransportPacketStream,
|
||
TransportParseStream: TransportParseStream,
|
||
ElementaryStream: ElementaryStream,
|
||
TimestampRolloverStream: TimestampRolloverStream,
|
||
CaptionStream: CaptionStream.CaptionStream,
|
||
Cea608Stream: CaptionStream.Cea608Stream,
|
||
MetadataStream: require('./metadata-stream')
|
||
};
|
||
|
||
for (var type in StreamTypes) {
|
||
if (StreamTypes.hasOwnProperty(type)) {
|
||
m2ts[type] = StreamTypes[type];
|
||
}
|
||
}
|
||
|
||
module.exports = m2ts;
|
||
|
||
},{"../utils/stream.js":33,"./caption-stream":17,"./metadata-stream":20,"./stream-types":22,"./stream-types.js":22,"./timestamp-rollover-stream":23}],20:[function(require,module,exports){
|
||
/**
|
||
* Accepts program elementary stream (PES) data events and parses out
|
||
* ID3 metadata from them, if present.
|
||
* @see http://id3.org/id3v2.3.0
|
||
*/
|
||
'use strict';
|
||
var
|
||
Stream = require('../utils/stream'),
|
||
StreamTypes = require('./stream-types'),
|
||
// return a percent-encoded representation of the specified byte range
|
||
// @see http://en.wikipedia.org/wiki/Percent-encoding
|
||
percentEncode = function(bytes, start, end) {
|
||
var i, result = '';
|
||
for (i = start; i < end; i++) {
|
||
result += '%' + ('00' + bytes[i].toString(16)).slice(-2);
|
||
}
|
||
return result;
|
||
},
|
||
// return the string representation of the specified byte range,
|
||
// interpreted as UTf-8.
|
||
parseUtf8 = function(bytes, start, end) {
|
||
return decodeURIComponent(percentEncode(bytes, start, end));
|
||
},
|
||
// return the string representation of the specified byte range,
|
||
// interpreted as ISO-8859-1.
|
||
parseIso88591 = function(bytes, start, end) {
|
||
return unescape(percentEncode(bytes, start, end)); // jshint ignore:line
|
||
},
|
||
parseSyncSafeInteger = function(data) {
|
||
return (data[0] << 21) |
|
||
(data[1] << 14) |
|
||
(data[2] << 7) |
|
||
(data[3]);
|
||
},
|
||
tagParsers = {
|
||
TXXX: function(tag) {
|
||
var i;
|
||
if (tag.data[0] !== 3) {
|
||
// ignore frames with unrecognized character encodings
|
||
return;
|
||
}
|
||
|
||
for (i = 1; i < tag.data.length; i++) {
|
||
if (tag.data[i] === 0) {
|
||
// parse the text fields
|
||
tag.description = parseUtf8(tag.data, 1, i);
|
||
// do not include the null terminator in the tag value
|
||
tag.value = parseUtf8(tag.data, i + 1, tag.data.length).replace(/\0*$/, '');
|
||
break;
|
||
}
|
||
}
|
||
tag.data = tag.value;
|
||
},
|
||
WXXX: function(tag) {
|
||
var i;
|
||
if (tag.data[0] !== 3) {
|
||
// ignore frames with unrecognized character encodings
|
||
return;
|
||
}
|
||
|
||
for (i = 1; i < tag.data.length; i++) {
|
||
if (tag.data[i] === 0) {
|
||
// parse the description and URL fields
|
||
tag.description = parseUtf8(tag.data, 1, i);
|
||
tag.url = parseUtf8(tag.data, i + 1, tag.data.length);
|
||
break;
|
||
}
|
||
}
|
||
},
|
||
PRIV: function(tag) {
|
||
var i;
|
||
|
||
for (i = 0; i < tag.data.length; i++) {
|
||
if (tag.data[i] === 0) {
|
||
// parse the description and URL fields
|
||
tag.owner = parseIso88591(tag.data, 0, i);
|
||
break;
|
||
}
|
||
}
|
||
tag.privateData = tag.data.subarray(i + 1);
|
||
tag.data = tag.privateData;
|
||
}
|
||
},
|
||
MetadataStream;
|
||
|
||
MetadataStream = function(options) {
|
||
var
|
||
settings = {
|
||
debug: !!(options && options.debug),
|
||
|
||
// the bytes of the program-level descriptor field in MP2T
|
||
// see ISO/IEC 13818-1:2013 (E), section 2.6 "Program and
|
||
// program element descriptors"
|
||
descriptor: options && options.descriptor
|
||
},
|
||
// the total size in bytes of the ID3 tag being parsed
|
||
tagSize = 0,
|
||
// tag data that is not complete enough to be parsed
|
||
buffer = [],
|
||
// the total number of bytes currently in the buffer
|
||
bufferSize = 0,
|
||
i;
|
||
|
||
MetadataStream.prototype.init.call(this);
|
||
|
||
// calculate the text track in-band metadata track dispatch type
|
||
// https://html.spec.whatwg.org/multipage/embedded-content.html#steps-to-expose-a-media-resource-specific-text-track
|
||
this.dispatchType = StreamTypes.METADATA_STREAM_TYPE.toString(16);
|
||
if (settings.descriptor) {
|
||
for (i = 0; i < settings.descriptor.length; i++) {
|
||
this.dispatchType += ('00' + settings.descriptor[i].toString(16)).slice(-2);
|
||
}
|
||
}
|
||
|
||
this.push = function(chunk) {
|
||
var tag, frameStart, frameSize, frame, i, frameHeader;
|
||
if (chunk.type !== 'timed-metadata') {
|
||
return;
|
||
}
|
||
|
||
// if data_alignment_indicator is set in the PES header,
|
||
// we must have the start of a new ID3 tag. Assume anything
|
||
// remaining in the buffer was malformed and throw it out
|
||
if (chunk.dataAlignmentIndicator) {
|
||
bufferSize = 0;
|
||
buffer.length = 0;
|
||
}
|
||
|
||
// ignore events that don't look like ID3 data
|
||
if (buffer.length === 0 &&
|
||
(chunk.data.length < 10 ||
|
||
chunk.data[0] !== 'I'.charCodeAt(0) ||
|
||
chunk.data[1] !== 'D'.charCodeAt(0) ||
|
||
chunk.data[2] !== '3'.charCodeAt(0))) {
|
||
if (settings.debug) {
|
||
// eslint-disable-next-line no-console
|
||
console.log('Skipping unrecognized metadata packet');
|
||
}
|
||
return;
|
||
}
|
||
|
||
// add this chunk to the data we've collected so far
|
||
|
||
buffer.push(chunk);
|
||
bufferSize += chunk.data.byteLength;
|
||
|
||
// grab the size of the entire frame from the ID3 header
|
||
if (buffer.length === 1) {
|
||
// the frame size is transmitted as a 28-bit integer in the
|
||
// last four bytes of the ID3 header.
|
||
// The most significant bit of each byte is dropped and the
|
||
// results concatenated to recover the actual value.
|
||
tagSize = parseSyncSafeInteger(chunk.data.subarray(6, 10));
|
||
|
||
// ID3 reports the tag size excluding the header but it's more
|
||
// convenient for our comparisons to include it
|
||
tagSize += 10;
|
||
}
|
||
|
||
// if the entire frame has not arrived, wait for more data
|
||
if (bufferSize < tagSize) {
|
||
return;
|
||
}
|
||
|
||
// collect the entire frame so it can be parsed
|
||
tag = {
|
||
data: new Uint8Array(tagSize),
|
||
frames: [],
|
||
pts: buffer[0].pts,
|
||
dts: buffer[0].dts
|
||
};
|
||
for (i = 0; i < tagSize;) {
|
||
tag.data.set(buffer[0].data.subarray(0, tagSize - i), i);
|
||
i += buffer[0].data.byteLength;
|
||
bufferSize -= buffer[0].data.byteLength;
|
||
buffer.shift();
|
||
}
|
||
|
||
// find the start of the first frame and the end of the tag
|
||
frameStart = 10;
|
||
if (tag.data[5] & 0x40) {
|
||
// advance the frame start past the extended header
|
||
frameStart += 4; // header size field
|
||
frameStart += parseSyncSafeInteger(tag.data.subarray(10, 14));
|
||
|
||
// clip any padding off the end
|
||
tagSize -= parseSyncSafeInteger(tag.data.subarray(16, 20));
|
||
}
|
||
|
||
// parse one or more ID3 frames
|
||
// http://id3.org/id3v2.3.0#ID3v2_frame_overview
|
||
do {
|
||
// determine the number of bytes in this frame
|
||
frameSize = parseSyncSafeInteger(tag.data.subarray(frameStart + 4, frameStart + 8));
|
||
if (frameSize < 1) {
|
||
// eslint-disable-next-line no-console
|
||
return console.log('Malformed ID3 frame encountered. Skipping metadata parsing.');
|
||
}
|
||
frameHeader = String.fromCharCode(tag.data[frameStart],
|
||
tag.data[frameStart + 1],
|
||
tag.data[frameStart + 2],
|
||
tag.data[frameStart + 3]);
|
||
|
||
|
||
frame = {
|
||
id: frameHeader,
|
||
data: tag.data.subarray(frameStart + 10, frameStart + frameSize + 10)
|
||
};
|
||
frame.key = frame.id;
|
||
if (tagParsers[frame.id]) {
|
||
tagParsers[frame.id](frame);
|
||
|
||
// handle the special PRIV frame used to indicate the start
|
||
// time for raw AAC data
|
||
if (frame.owner === 'com.apple.streaming.transportStreamTimestamp') {
|
||
var
|
||
d = frame.data,
|
||
size = ((d[3] & 0x01) << 30) |
|
||
(d[4] << 22) |
|
||
(d[5] << 14) |
|
||
(d[6] << 6) |
|
||
(d[7] >>> 2);
|
||
|
||
size *= 4;
|
||
size += d[7] & 0x03;
|
||
frame.timeStamp = size;
|
||
// in raw AAC, all subsequent data will be timestamped based
|
||
// on the value of this frame
|
||
// we couldn't have known the appropriate pts and dts before
|
||
// parsing this ID3 tag so set those values now
|
||
if (tag.pts === undefined && tag.dts === undefined) {
|
||
tag.pts = frame.timeStamp;
|
||
tag.dts = frame.timeStamp;
|
||
}
|
||
this.trigger('timestamp', frame);
|
||
}
|
||
}
|
||
tag.frames.push(frame);
|
||
|
||
frameStart += 10; // advance past the frame header
|
||
frameStart += frameSize; // advance past the frame body
|
||
} while (frameStart < tagSize);
|
||
this.trigger('data', tag);
|
||
};
|
||
};
|
||
MetadataStream.prototype = new Stream();
|
||
|
||
module.exports = MetadataStream;
|
||
|
||
},{"../utils/stream":33,"./stream-types":22}],21:[function(require,module,exports){
|
||
/**
|
||
* mux.js
|
||
*
|
||
* Copyright (c) 2016 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* Utilities to detect basic properties and metadata about TS Segments.
|
||
*/
|
||
'use strict';
|
||
|
||
var StreamTypes = require('./stream-types.js');
|
||
|
||
var parsePid = function(packet) {
|
||
var pid = packet[1] & 0x1f;
|
||
pid <<= 8;
|
||
pid |= packet[2];
|
||
return pid;
|
||
};
|
||
|
||
var parsePayloadUnitStartIndicator = function(packet) {
|
||
return !!(packet[1] & 0x40);
|
||
};
|
||
|
||
var parseAdaptionField = function(packet) {
|
||
var offset = 0;
|
||
// if an adaption field is present, its length is specified by the
|
||
// fifth byte of the TS packet header. The adaptation field is
|
||
// used to add stuffing to PES packets that don't fill a complete
|
||
// TS packet, and to specify some forms of timing and control data
|
||
// that we do not currently use.
|
||
if (((packet[3] & 0x30) >>> 4) > 0x01) {
|
||
offset += packet[4] + 1;
|
||
}
|
||
return offset;
|
||
};
|
||
|
||
var parseType = function(packet, pmtPid) {
|
||
var pid = parsePid(packet);
|
||
if (pid === 0) {
|
||
return 'pat';
|
||
} else if (pid === pmtPid) {
|
||
return 'pmt';
|
||
} else if (pmtPid) {
|
||
return 'pes';
|
||
}
|
||
return null;
|
||
};
|
||
|
||
var parsePat = function(packet) {
|
||
var pusi = parsePayloadUnitStartIndicator(packet);
|
||
var offset = 4 + parseAdaptionField(packet);
|
||
|
||
if (pusi) {
|
||
offset += packet[offset] + 1;
|
||
}
|
||
|
||
return (packet[offset + 10] & 0x1f) << 8 | packet[offset + 11];
|
||
};
|
||
|
||
var parsePmt = function(packet) {
|
||
var programMapTable = {};
|
||
var pusi = parsePayloadUnitStartIndicator(packet);
|
||
var payloadOffset = 4 + parseAdaptionField(packet);
|
||
|
||
if (pusi) {
|
||
payloadOffset += packet[payloadOffset] + 1;
|
||
}
|
||
|
||
// PMTs can be sent ahead of the time when they should actually
|
||
// take effect. We don't believe this should ever be the case
|
||
// for HLS but we'll ignore "forward" PMT declarations if we see
|
||
// them. Future PMT declarations have the current_next_indicator
|
||
// set to zero.
|
||
if (!(packet[payloadOffset + 5] & 0x01)) {
|
||
return;
|
||
}
|
||
|
||
var sectionLength, tableEnd, programInfoLength;
|
||
// the mapping table ends at the end of the current section
|
||
sectionLength = (packet[payloadOffset + 1] & 0x0f) << 8 | packet[payloadOffset + 2];
|
||
tableEnd = 3 + sectionLength - 4;
|
||
|
||
// to determine where the table is, we have to figure out how
|
||
// long the program info descriptors are
|
||
programInfoLength = (packet[payloadOffset + 10] & 0x0f) << 8 | packet[payloadOffset + 11];
|
||
|
||
// advance the offset to the first entry in the mapping table
|
||
var offset = 12 + programInfoLength;
|
||
while (offset < tableEnd) {
|
||
var i = payloadOffset + offset;
|
||
// add an entry that maps the elementary_pid to the stream_type
|
||
programMapTable[(packet[i + 1] & 0x1F) << 8 | packet[i + 2]] = packet[i];
|
||
|
||
// move to the next table entry
|
||
// skip past the elementary stream descriptors, if present
|
||
offset += ((packet[i + 3] & 0x0F) << 8 | packet[i + 4]) + 5;
|
||
}
|
||
return programMapTable;
|
||
};
|
||
|
||
var parsePesType = function(packet, programMapTable) {
|
||
var pid = parsePid(packet);
|
||
var type = programMapTable[pid];
|
||
switch (type) {
|
||
case StreamTypes.H264_STREAM_TYPE:
|
||
return 'video';
|
||
case StreamTypes.ADTS_STREAM_TYPE:
|
||
return 'audio';
|
||
case StreamTypes.METADATA_STREAM_TYPE:
|
||
return 'timed-metadata';
|
||
default:
|
||
return null;
|
||
}
|
||
};
|
||
|
||
var parsePesTime = function(packet) {
|
||
var pusi = parsePayloadUnitStartIndicator(packet);
|
||
if (!pusi) {
|
||
return null;
|
||
}
|
||
|
||
var offset = 4 + parseAdaptionField(packet);
|
||
|
||
if (offset >= packet.byteLength) {
|
||
// From the H 222.0 MPEG-TS spec
|
||
// "For transport stream packets carrying PES packets, stuffing is needed when there
|
||
// is insufficient PES packet data to completely fill the transport stream packet
|
||
// payload bytes. Stuffing is accomplished by defining an adaptation field longer than
|
||
// the sum of the lengths of the data elements in it, so that the payload bytes
|
||
// remaining after the adaptation field exactly accommodates the available PES packet
|
||
// data."
|
||
//
|
||
// If the offset is >= the length of the packet, then the packet contains no data
|
||
// and instead is just adaption field stuffing bytes
|
||
return null;
|
||
}
|
||
|
||
var pes = null;
|
||
var ptsDtsFlags;
|
||
|
||
// PES packets may be annotated with a PTS value, or a PTS value
|
||
// and a DTS value. Determine what combination of values is
|
||
// available to work with.
|
||
ptsDtsFlags = packet[offset + 7];
|
||
|
||
// PTS and DTS are normally stored as a 33-bit number. Javascript
|
||
// performs all bitwise operations on 32-bit integers but javascript
|
||
// supports a much greater range (52-bits) of integer using standard
|
||
// mathematical operations.
|
||
// We construct a 31-bit value using bitwise operators over the 31
|
||
// most significant bits and then multiply by 4 (equal to a left-shift
|
||
// of 2) before we add the final 2 least significant bits of the
|
||
// timestamp (equal to an OR.)
|
||
if (ptsDtsFlags & 0xC0) {
|
||
pes = {};
|
||
// the PTS and DTS are not written out directly. For information
|
||
// on how they are encoded, see
|
||
// http://dvd.sourceforge.net/dvdinfo/pes-hdr.html
|
||
pes.pts = (packet[offset + 9] & 0x0E) << 27 |
|
||
(packet[offset + 10] & 0xFF) << 20 |
|
||
(packet[offset + 11] & 0xFE) << 12 |
|
||
(packet[offset + 12] & 0xFF) << 5 |
|
||
(packet[offset + 13] & 0xFE) >>> 3;
|
||
pes.pts *= 4; // Left shift by 2
|
||
pes.pts += (packet[offset + 13] & 0x06) >>> 1; // OR by the two LSBs
|
||
pes.dts = pes.pts;
|
||
if (ptsDtsFlags & 0x40) {
|
||
pes.dts = (packet[offset + 14] & 0x0E) << 27 |
|
||
(packet[offset + 15] & 0xFF) << 20 |
|
||
(packet[offset + 16] & 0xFE) << 12 |
|
||
(packet[offset + 17] & 0xFF) << 5 |
|
||
(packet[offset + 18] & 0xFE) >>> 3;
|
||
pes.dts *= 4; // Left shift by 2
|
||
pes.dts += (packet[offset + 18] & 0x06) >>> 1; // OR by the two LSBs
|
||
}
|
||
}
|
||
return pes;
|
||
};
|
||
|
||
var parseNalUnitType = function(type) {
|
||
switch (type) {
|
||
case 0x05:
|
||
return 'slice_layer_without_partitioning_rbsp_idr';
|
||
case 0x06:
|
||
return 'sei_rbsp';
|
||
case 0x07:
|
||
return 'seq_parameter_set_rbsp';
|
||
case 0x08:
|
||
return 'pic_parameter_set_rbsp';
|
||
case 0x09:
|
||
return 'access_unit_delimiter_rbsp';
|
||
default:
|
||
return null;
|
||
}
|
||
};
|
||
|
||
var videoPacketContainsKeyFrame = function(packet) {
|
||
var offset = 4 + parseAdaptionField(packet);
|
||
var frameBuffer = packet.subarray(offset);
|
||
var frameI = 0;
|
||
var frameSyncPoint = 0;
|
||
var foundKeyFrame = false;
|
||
var nalType;
|
||
|
||
// advance the sync point to a NAL start, if necessary
|
||
for (; frameSyncPoint < frameBuffer.byteLength - 3; frameSyncPoint++) {
|
||
if (frameBuffer[frameSyncPoint + 2] === 1) {
|
||
// the sync point is properly aligned
|
||
frameI = frameSyncPoint + 5;
|
||
break;
|
||
}
|
||
}
|
||
|
||
while (frameI < frameBuffer.byteLength) {
|
||
// look at the current byte to determine if we've hit the end of
|
||
// a NAL unit boundary
|
||
switch (frameBuffer[frameI]) {
|
||
case 0:
|
||
// skip past non-sync sequences
|
||
if (frameBuffer[frameI - 1] !== 0) {
|
||
frameI += 2;
|
||
break;
|
||
} else if (frameBuffer[frameI - 2] !== 0) {
|
||
frameI++;
|
||
break;
|
||
}
|
||
|
||
if (frameSyncPoint + 3 !== frameI - 2) {
|
||
nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);
|
||
if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {
|
||
foundKeyFrame = true;
|
||
}
|
||
}
|
||
|
||
// drop trailing zeroes
|
||
do {
|
||
frameI++;
|
||
} while (frameBuffer[frameI] !== 1 && frameI < frameBuffer.length);
|
||
frameSyncPoint = frameI - 2;
|
||
frameI += 3;
|
||
break;
|
||
case 1:
|
||
// skip past non-sync sequences
|
||
if (frameBuffer[frameI - 1] !== 0 ||
|
||
frameBuffer[frameI - 2] !== 0) {
|
||
frameI += 3;
|
||
break;
|
||
}
|
||
|
||
nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);
|
||
if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {
|
||
foundKeyFrame = true;
|
||
}
|
||
frameSyncPoint = frameI - 2;
|
||
frameI += 3;
|
||
break;
|
||
default:
|
||
// the current byte isn't a one or zero, so it cannot be part
|
||
// of a sync sequence
|
||
frameI += 3;
|
||
break;
|
||
}
|
||
}
|
||
frameBuffer = frameBuffer.subarray(frameSyncPoint);
|
||
frameI -= frameSyncPoint;
|
||
frameSyncPoint = 0;
|
||
// parse the final nal
|
||
if (frameBuffer && frameBuffer.byteLength > 3) {
|
||
nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);
|
||
if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {
|
||
foundKeyFrame = true;
|
||
}
|
||
}
|
||
|
||
return foundKeyFrame;
|
||
};
|
||
|
||
|
||
module.exports = {
|
||
parseType: parseType,
|
||
parsePat: parsePat,
|
||
parsePmt: parsePmt,
|
||
parsePayloadUnitStartIndicator: parsePayloadUnitStartIndicator,
|
||
parsePesType: parsePesType,
|
||
parsePesTime: parsePesTime,
|
||
videoPacketContainsKeyFrame: videoPacketContainsKeyFrame
|
||
};
|
||
|
||
},{"./stream-types.js":22}],22:[function(require,module,exports){
|
||
'use strict';
|
||
|
||
module.exports = {
|
||
H264_STREAM_TYPE: 0x1B,
|
||
ADTS_STREAM_TYPE: 0x0F,
|
||
METADATA_STREAM_TYPE: 0x15
|
||
};
|
||
|
||
},{}],23:[function(require,module,exports){
|
||
/**
|
||
* mux.js
|
||
*
|
||
* Copyright (c) 2016 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* Accepts program elementary stream (PES) data events and corrects
|
||
* decode and presentation time stamps to account for a rollover
|
||
* of the 33 bit value.
|
||
*/
|
||
|
||
'use strict';
|
||
|
||
var Stream = require('../utils/stream');
|
||
|
||
var MAX_TS = 8589934592;
|
||
|
||
var RO_THRESH = 4294967296;
|
||
|
||
var handleRollover = function(value, reference) {
|
||
var direction = 1;
|
||
|
||
if (value > reference) {
|
||
// If the current timestamp value is greater than our reference timestamp and we detect a
|
||
// timestamp rollover, this means the roll over is happening in the opposite direction.
|
||
// Example scenario: Enter a long stream/video just after a rollover occurred. The reference
|
||
// point will be set to a small number, e.g. 1. The user then seeks backwards over the
|
||
// rollover point. In loading this segment, the timestamp values will be very large,
|
||
// e.g. 2^33 - 1. Since this comes before the data we loaded previously, we want to adjust
|
||
// the time stamp to be `value - 2^33`.
|
||
direction = -1;
|
||
}
|
||
|
||
// Note: A seek forwards or back that is greater than the RO_THRESH (2^32, ~13 hours) will
|
||
// cause an incorrect adjustment.
|
||
while (Math.abs(reference - value) > RO_THRESH) {
|
||
value += (direction * MAX_TS);
|
||
}
|
||
|
||
return value;
|
||
};
|
||
|
||
var TimestampRolloverStream = function(type) {
|
||
var lastDTS, referenceDTS;
|
||
|
||
TimestampRolloverStream.prototype.init.call(this);
|
||
|
||
this.type_ = type;
|
||
|
||
this.push = function(data) {
|
||
if (data.type !== this.type_) {
|
||
return;
|
||
}
|
||
|
||
if (referenceDTS === undefined) {
|
||
referenceDTS = data.dts;
|
||
}
|
||
|
||
data.dts = handleRollover(data.dts, referenceDTS);
|
||
data.pts = handleRollover(data.pts, referenceDTS);
|
||
|
||
lastDTS = data.dts;
|
||
|
||
this.trigger('data', data);
|
||
};
|
||
|
||
this.flush = function() {
|
||
referenceDTS = lastDTS;
|
||
this.trigger('done');
|
||
};
|
||
|
||
this.discontinuity = function() {
|
||
referenceDTS = void 0;
|
||
lastDTS = void 0;
|
||
};
|
||
|
||
};
|
||
|
||
TimestampRolloverStream.prototype = new Stream();
|
||
|
||
module.exports = {
|
||
TimestampRolloverStream: TimestampRolloverStream,
|
||
handleRollover: handleRollover
|
||
};
|
||
|
||
},{"../utils/stream":33}],24:[function(require,module,exports){
|
||
module.exports = {
|
||
generator: require('./mp4-generator'),
|
||
Transmuxer: require('./transmuxer').Transmuxer,
|
||
AudioSegmentStream: require('./transmuxer').AudioSegmentStream,
|
||
VideoSegmentStream: require('./transmuxer').VideoSegmentStream
|
||
};
|
||
|
||
},{"./mp4-generator":25,"./transmuxer":27}],25:[function(require,module,exports){
|
||
/**
|
||
* mux.js
|
||
*
|
||
* Copyright (c) 2015 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* Functions that generate fragmented MP4s suitable for use with Media
|
||
* Source Extensions.
|
||
*/
|
||
'use strict';
|
||
|
||
var UINT32_MAX = Math.pow(2, 32) - 1;
|
||
|
||
var box, dinf, esds, ftyp, mdat, mfhd, minf, moof, moov, mvex, mvhd,
|
||
trak, tkhd, mdia, mdhd, hdlr, sdtp, stbl, stsd, traf, trex,
|
||
trun, types, MAJOR_BRAND, MINOR_VERSION, AVC1_BRAND, VIDEO_HDLR,
|
||
AUDIO_HDLR, HDLR_TYPES, VMHD, SMHD, DREF, STCO, STSC, STSZ, STTS;
|
||
|
||
// pre-calculate constants
|
||
(function() {
|
||
var i;
|
||
types = {
|
||
avc1: [], // codingname
|
||
avcC: [],
|
||
btrt: [],
|
||
dinf: [],
|
||
dref: [],
|
||
esds: [],
|
||
ftyp: [],
|
||
hdlr: [],
|
||
mdat: [],
|
||
mdhd: [],
|
||
mdia: [],
|
||
mfhd: [],
|
||
minf: [],
|
||
moof: [],
|
||
moov: [],
|
||
mp4a: [], // codingname
|
||
mvex: [],
|
||
mvhd: [],
|
||
sdtp: [],
|
||
smhd: [],
|
||
stbl: [],
|
||
stco: [],
|
||
stsc: [],
|
||
stsd: [],
|
||
stsz: [],
|
||
stts: [],
|
||
styp: [],
|
||
tfdt: [],
|
||
tfhd: [],
|
||
traf: [],
|
||
trak: [],
|
||
trun: [],
|
||
trex: [],
|
||
tkhd: [],
|
||
vmhd: []
|
||
};
|
||
|
||
// In environments where Uint8Array is undefined (e.g., IE8), skip set up so that we
|
||
// don't throw an error
|
||
if (typeof Uint8Array === 'undefined') {
|
||
return;
|
||
}
|
||
|
||
for (i in types) {
|
||
if (types.hasOwnProperty(i)) {
|
||
types[i] = [
|
||
i.charCodeAt(0),
|
||
i.charCodeAt(1),
|
||
i.charCodeAt(2),
|
||
i.charCodeAt(3)
|
||
];
|
||
}
|
||
}
|
||
|
||
MAJOR_BRAND = new Uint8Array([
|
||
'i'.charCodeAt(0),
|
||
's'.charCodeAt(0),
|
||
'o'.charCodeAt(0),
|
||
'm'.charCodeAt(0)
|
||
]);
|
||
AVC1_BRAND = new Uint8Array([
|
||
'a'.charCodeAt(0),
|
||
'v'.charCodeAt(0),
|
||
'c'.charCodeAt(0),
|
||
'1'.charCodeAt(0)
|
||
]);
|
||
MINOR_VERSION = new Uint8Array([0, 0, 0, 1]);
|
||
VIDEO_HDLR = new Uint8Array([
|
||
0x00, // version 0
|
||
0x00, 0x00, 0x00, // flags
|
||
0x00, 0x00, 0x00, 0x00, // pre_defined
|
||
0x76, 0x69, 0x64, 0x65, // handler_type: 'vide'
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
0x56, 0x69, 0x64, 0x65,
|
||
0x6f, 0x48, 0x61, 0x6e,
|
||
0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'VideoHandler'
|
||
]);
|
||
AUDIO_HDLR = new Uint8Array([
|
||
0x00, // version 0
|
||
0x00, 0x00, 0x00, // flags
|
||
0x00, 0x00, 0x00, 0x00, // pre_defined
|
||
0x73, 0x6f, 0x75, 0x6e, // handler_type: 'soun'
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
0x53, 0x6f, 0x75, 0x6e,
|
||
0x64, 0x48, 0x61, 0x6e,
|
||
0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'SoundHandler'
|
||
]);
|
||
HDLR_TYPES = {
|
||
video: VIDEO_HDLR,
|
||
audio: AUDIO_HDLR
|
||
};
|
||
DREF = new Uint8Array([
|
||
0x00, // version 0
|
||
0x00, 0x00, 0x00, // flags
|
||
0x00, 0x00, 0x00, 0x01, // entry_count
|
||
0x00, 0x00, 0x00, 0x0c, // entry_size
|
||
0x75, 0x72, 0x6c, 0x20, // 'url' type
|
||
0x00, // version 0
|
||
0x00, 0x00, 0x01 // entry_flags
|
||
]);
|
||
SMHD = new Uint8Array([
|
||
0x00, // version
|
||
0x00, 0x00, 0x00, // flags
|
||
0x00, 0x00, // balance, 0 means centered
|
||
0x00, 0x00 // reserved
|
||
]);
|
||
STCO = new Uint8Array([
|
||
0x00, // version
|
||
0x00, 0x00, 0x00, // flags
|
||
0x00, 0x00, 0x00, 0x00 // entry_count
|
||
]);
|
||
STSC = STCO;
|
||
STSZ = new Uint8Array([
|
||
0x00, // version
|
||
0x00, 0x00, 0x00, // flags
|
||
0x00, 0x00, 0x00, 0x00, // sample_size
|
||
0x00, 0x00, 0x00, 0x00 // sample_count
|
||
]);
|
||
STTS = STCO;
|
||
VMHD = new Uint8Array([
|
||
0x00, // version
|
||
0x00, 0x00, 0x01, // flags
|
||
0x00, 0x00, // graphicsmode
|
||
0x00, 0x00,
|
||
0x00, 0x00,
|
||
0x00, 0x00 // opcolor
|
||
]);
|
||
}());
|
||
|
||
box = function(type) {
|
||
var
|
||
payload = [],
|
||
size = 0,
|
||
i,
|
||
result,
|
||
view;
|
||
|
||
for (i = 1; i < arguments.length; i++) {
|
||
payload.push(arguments[i]);
|
||
}
|
||
|
||
i = payload.length;
|
||
|
||
// calculate the total size we need to allocate
|
||
while (i--) {
|
||
size += payload[i].byteLength;
|
||
}
|
||
result = new Uint8Array(size + 8);
|
||
view = new DataView(result.buffer, result.byteOffset, result.byteLength);
|
||
view.setUint32(0, result.byteLength);
|
||
result.set(type, 4);
|
||
|
||
// copy the payload into the result
|
||
for (i = 0, size = 8; i < payload.length; i++) {
|
||
result.set(payload[i], size);
|
||
size += payload[i].byteLength;
|
||
}
|
||
return result;
|
||
};
|
||
|
||
dinf = function() {
|
||
return box(types.dinf, box(types.dref, DREF));
|
||
};
|
||
|
||
esds = function(track) {
|
||
return box(types.esds, new Uint8Array([
|
||
0x00, // version
|
||
0x00, 0x00, 0x00, // flags
|
||
|
||
// ES_Descriptor
|
||
0x03, // tag, ES_DescrTag
|
||
0x19, // length
|
||
0x00, 0x00, // ES_ID
|
||
0x00, // streamDependenceFlag, URL_flag, reserved, streamPriority
|
||
|
||
// DecoderConfigDescriptor
|
||
0x04, // tag, DecoderConfigDescrTag
|
||
0x11, // length
|
||
0x40, // object type
|
||
0x15, // streamType
|
||
0x00, 0x06, 0x00, // bufferSizeDB
|
||
0x00, 0x00, 0xda, 0xc0, // maxBitrate
|
||
0x00, 0x00, 0xda, 0xc0, // avgBitrate
|
||
|
||
// DecoderSpecificInfo
|
||
0x05, // tag, DecoderSpecificInfoTag
|
||
0x02, // length
|
||
// ISO/IEC 14496-3, AudioSpecificConfig
|
||
// for samplingFrequencyIndex see ISO/IEC 13818-7:2006, 8.1.3.2.2, Table 35
|
||
(track.audioobjecttype << 3) | (track.samplingfrequencyindex >>> 1),
|
||
(track.samplingfrequencyindex << 7) | (track.channelcount << 3),
|
||
0x06, 0x01, 0x02 // GASpecificConfig
|
||
]));
|
||
};
|
||
|
||
ftyp = function() {
|
||
return box(types.ftyp, MAJOR_BRAND, MINOR_VERSION, MAJOR_BRAND, AVC1_BRAND);
|
||
};
|
||
|
||
hdlr = function(type) {
|
||
return box(types.hdlr, HDLR_TYPES[type]);
|
||
};
|
||
mdat = function(data) {
|
||
return box(types.mdat, data);
|
||
};
|
||
mdhd = function(track) {
|
||
var result = new Uint8Array([
|
||
0x00, // version 0
|
||
0x00, 0x00, 0x00, // flags
|
||
0x00, 0x00, 0x00, 0x02, // creation_time
|
||
0x00, 0x00, 0x00, 0x03, // modification_time
|
||
0x00, 0x01, 0x5f, 0x90, // timescale, 90,000 "ticks" per second
|
||
|
||
(track.duration >>> 24) & 0xFF,
|
||
(track.duration >>> 16) & 0xFF,
|
||
(track.duration >>> 8) & 0xFF,
|
||
track.duration & 0xFF, // duration
|
||
0x55, 0xc4, // 'und' language (undetermined)
|
||
0x00, 0x00
|
||
]);
|
||
|
||
// Use the sample rate from the track metadata, when it is
|
||
// defined. The sample rate can be parsed out of an ADTS header, for
|
||
// instance.
|
||
if (track.samplerate) {
|
||
result[12] = (track.samplerate >>> 24) & 0xFF;
|
||
result[13] = (track.samplerate >>> 16) & 0xFF;
|
||
result[14] = (track.samplerate >>> 8) & 0xFF;
|
||
result[15] = (track.samplerate) & 0xFF;
|
||
}
|
||
|
||
return box(types.mdhd, result);
|
||
};
|
||
mdia = function(track) {
|
||
return box(types.mdia, mdhd(track), hdlr(track.type), minf(track));
|
||
};
|
||
mfhd = function(sequenceNumber) {
|
||
return box(types.mfhd, new Uint8Array([
|
||
0x00,
|
||
0x00, 0x00, 0x00, // flags
|
||
(sequenceNumber & 0xFF000000) >> 24,
|
||
(sequenceNumber & 0xFF0000) >> 16,
|
||
(sequenceNumber & 0xFF00) >> 8,
|
||
sequenceNumber & 0xFF // sequence_number
|
||
]));
|
||
};
|
||
minf = function(track) {
|
||
return box(types.minf,
|
||
track.type === 'video' ? box(types.vmhd, VMHD) : box(types.smhd, SMHD),
|
||
dinf(),
|
||
stbl(track));
|
||
};
|
||
moof = function(sequenceNumber, tracks) {
|
||
var
|
||
trackFragments = [],
|
||
i = tracks.length;
|
||
// build traf boxes for each track fragment
|
||
while (i--) {
|
||
trackFragments[i] = traf(tracks[i]);
|
||
}
|
||
return box.apply(null, [
|
||
types.moof,
|
||
mfhd(sequenceNumber)
|
||
].concat(trackFragments));
|
||
};
|
||
/**
|
||
* Returns a movie box.
|
||
* @param tracks {array} the tracks associated with this movie
|
||
* @see ISO/IEC 14496-12:2012(E), section 8.2.1
|
||
*/
|
||
moov = function(tracks) {
|
||
var
|
||
i = tracks.length,
|
||
boxes = [];
|
||
|
||
while (i--) {
|
||
boxes[i] = trak(tracks[i]);
|
||
}
|
||
|
||
return box.apply(null, [types.moov, mvhd(0xffffffff)].concat(boxes).concat(mvex(tracks)));
|
||
};
|
||
mvex = function(tracks) {
|
||
var
|
||
i = tracks.length,
|
||
boxes = [];
|
||
|
||
while (i--) {
|
||
boxes[i] = trex(tracks[i]);
|
||
}
|
||
return box.apply(null, [types.mvex].concat(boxes));
|
||
};
|
||
mvhd = function(duration) {
|
||
var
|
||
bytes = new Uint8Array([
|
||
0x00, // version 0
|
||
0x00, 0x00, 0x00, // flags
|
||
0x00, 0x00, 0x00, 0x01, // creation_time
|
||
0x00, 0x00, 0x00, 0x02, // modification_time
|
||
0x00, 0x01, 0x5f, 0x90, // timescale, 90,000 "ticks" per second
|
||
(duration & 0xFF000000) >> 24,
|
||
(duration & 0xFF0000) >> 16,
|
||
(duration & 0xFF00) >> 8,
|
||
duration & 0xFF, // duration
|
||
0x00, 0x01, 0x00, 0x00, // 1.0 rate
|
||
0x01, 0x00, // 1.0 volume
|
||
0x00, 0x00, // reserved
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
0x00, 0x01, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x01, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x40, 0x00, 0x00, 0x00, // transformation: unity matrix
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00, // pre_defined
|
||
0xff, 0xff, 0xff, 0xff // next_track_ID
|
||
]);
|
||
return box(types.mvhd, bytes);
|
||
};
|
||
|
||
sdtp = function(track) {
|
||
var
|
||
samples = track.samples || [],
|
||
bytes = new Uint8Array(4 + samples.length),
|
||
flags,
|
||
i;
|
||
|
||
// leave the full box header (4 bytes) all zero
|
||
|
||
// write the sample table
|
||
for (i = 0; i < samples.length; i++) {
|
||
flags = samples[i].flags;
|
||
|
||
bytes[i + 4] = (flags.dependsOn << 4) |
|
||
(flags.isDependedOn << 2) |
|
||
(flags.hasRedundancy);
|
||
}
|
||
|
||
return box(types.sdtp,
|
||
bytes);
|
||
};
|
||
|
||
stbl = function(track) {
|
||
return box(types.stbl,
|
||
stsd(track),
|
||
box(types.stts, STTS),
|
||
box(types.stsc, STSC),
|
||
box(types.stsz, STSZ),
|
||
box(types.stco, STCO));
|
||
};
|
||
|
||
(function() {
|
||
var videoSample, audioSample;
|
||
|
||
stsd = function(track) {
|
||
|
||
return box(types.stsd, new Uint8Array([
|
||
0x00, // version 0
|
||
0x00, 0x00, 0x00, // flags
|
||
0x00, 0x00, 0x00, 0x01
|
||
]), track.type === 'video' ? videoSample(track) : audioSample(track));
|
||
};
|
||
|
||
videoSample = function(track) {
|
||
var
|
||
sps = track.sps || [],
|
||
pps = track.pps || [],
|
||
sequenceParameterSets = [],
|
||
pictureParameterSets = [],
|
||
i;
|
||
|
||
// assemble the SPSs
|
||
for (i = 0; i < sps.length; i++) {
|
||
sequenceParameterSets.push((sps[i].byteLength & 0xFF00) >>> 8);
|
||
sequenceParameterSets.push((sps[i].byteLength & 0xFF)); // sequenceParameterSetLength
|
||
sequenceParameterSets = sequenceParameterSets.concat(Array.prototype.slice.call(sps[i])); // SPS
|
||
}
|
||
|
||
// assemble the PPSs
|
||
for (i = 0; i < pps.length; i++) {
|
||
pictureParameterSets.push((pps[i].byteLength & 0xFF00) >>> 8);
|
||
pictureParameterSets.push((pps[i].byteLength & 0xFF));
|
||
pictureParameterSets = pictureParameterSets.concat(Array.prototype.slice.call(pps[i]));
|
||
}
|
||
|
||
return box(types.avc1, new Uint8Array([
|
||
0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, // reserved
|
||
0x00, 0x01, // data_reference_index
|
||
0x00, 0x00, // pre_defined
|
||
0x00, 0x00, // reserved
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00, // pre_defined
|
||
(track.width & 0xff00) >> 8,
|
||
track.width & 0xff, // width
|
||
(track.height & 0xff00) >> 8,
|
||
track.height & 0xff, // height
|
||
0x00, 0x48, 0x00, 0x00, // horizresolution
|
||
0x00, 0x48, 0x00, 0x00, // vertresolution
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
0x00, 0x01, // frame_count
|
||
0x13,
|
||
0x76, 0x69, 0x64, 0x65,
|
||
0x6f, 0x6a, 0x73, 0x2d,
|
||
0x63, 0x6f, 0x6e, 0x74,
|
||
0x72, 0x69, 0x62, 0x2d,
|
||
0x68, 0x6c, 0x73, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, // compressorname
|
||
0x00, 0x18, // depth = 24
|
||
0x11, 0x11 // pre_defined = -1
|
||
]), box(types.avcC, new Uint8Array([
|
||
0x01, // configurationVersion
|
||
track.profileIdc, // AVCProfileIndication
|
||
track.profileCompatibility, // profile_compatibility
|
||
track.levelIdc, // AVCLevelIndication
|
||
0xff // lengthSizeMinusOne, hard-coded to 4 bytes
|
||
].concat([
|
||
sps.length // numOfSequenceParameterSets
|
||
]).concat(sequenceParameterSets).concat([
|
||
pps.length // numOfPictureParameterSets
|
||
]).concat(pictureParameterSets))), // "PPS"
|
||
box(types.btrt, new Uint8Array([
|
||
0x00, 0x1c, 0x9c, 0x80, // bufferSizeDB
|
||
0x00, 0x2d, 0xc6, 0xc0, // maxBitrate
|
||
0x00, 0x2d, 0xc6, 0xc0
|
||
])) // avgBitrate
|
||
);
|
||
};
|
||
|
||
audioSample = function(track) {
|
||
return box(types.mp4a, new Uint8Array([
|
||
|
||
// SampleEntry, ISO/IEC 14496-12
|
||
0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, // reserved
|
||
0x00, 0x01, // data_reference_index
|
||
|
||
// AudioSampleEntry, ISO/IEC 14496-12
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
(track.channelcount & 0xff00) >> 8,
|
||
(track.channelcount & 0xff), // channelcount
|
||
|
||
(track.samplesize & 0xff00) >> 8,
|
||
(track.samplesize & 0xff), // samplesize
|
||
0x00, 0x00, // pre_defined
|
||
0x00, 0x00, // reserved
|
||
|
||
(track.samplerate & 0xff00) >> 8,
|
||
(track.samplerate & 0xff),
|
||
0x00, 0x00 // samplerate, 16.16
|
||
|
||
// MP4AudioSampleEntry, ISO/IEC 14496-14
|
||
]), esds(track));
|
||
};
|
||
}());
|
||
|
||
tkhd = function(track) {
|
||
var result = new Uint8Array([
|
||
0x00, // version 0
|
||
0x00, 0x00, 0x07, // flags
|
||
0x00, 0x00, 0x00, 0x00, // creation_time
|
||
0x00, 0x00, 0x00, 0x00, // modification_time
|
||
(track.id & 0xFF000000) >> 24,
|
||
(track.id & 0xFF0000) >> 16,
|
||
(track.id & 0xFF00) >> 8,
|
||
track.id & 0xFF, // track_ID
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
(track.duration & 0xFF000000) >> 24,
|
||
(track.duration & 0xFF0000) >> 16,
|
||
(track.duration & 0xFF00) >> 8,
|
||
track.duration & 0xFF, // duration
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00, // reserved
|
||
0x00, 0x00, // layer
|
||
0x00, 0x00, // alternate_group
|
||
0x01, 0x00, // non-audio track volume
|
||
0x00, 0x00, // reserved
|
||
0x00, 0x01, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x01, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x00, 0x00, 0x00, 0x00,
|
||
0x40, 0x00, 0x00, 0x00, // transformation: unity matrix
|
||
(track.width & 0xFF00) >> 8,
|
||
track.width & 0xFF,
|
||
0x00, 0x00, // width
|
||
(track.height & 0xFF00) >> 8,
|
||
track.height & 0xFF,
|
||
0x00, 0x00 // height
|
||
]);
|
||
|
||
return box(types.tkhd, result);
|
||
};
|
||
|
||
/**
|
||
* Generate a track fragment (traf) box. A traf box collects metadata
|
||
* about tracks in a movie fragment (moof) box.
|
||
*/
|
||
traf = function(track) {
|
||
var trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun,
|
||
sampleDependencyTable, dataOffset,
|
||
upperWordBaseMediaDecodeTime, lowerWordBaseMediaDecodeTime;
|
||
|
||
trackFragmentHeader = box(types.tfhd, new Uint8Array([
|
||
0x00, // version 0
|
||
0x00, 0x00, 0x3a, // flags
|
||
(track.id & 0xFF000000) >> 24,
|
||
(track.id & 0xFF0000) >> 16,
|
||
(track.id & 0xFF00) >> 8,
|
||
(track.id & 0xFF), // track_ID
|
||
0x00, 0x00, 0x00, 0x01, // sample_description_index
|
||
0x00, 0x00, 0x00, 0x00, // default_sample_duration
|
||
0x00, 0x00, 0x00, 0x00, // default_sample_size
|
||
0x00, 0x00, 0x00, 0x00 // default_sample_flags
|
||
]));
|
||
|
||
upperWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime / (UINT32_MAX + 1));
|
||
lowerWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime % (UINT32_MAX + 1));
|
||
|
||
trackFragmentDecodeTime = box(types.tfdt, new Uint8Array([
|
||
0x01, // version 1
|
||
0x00, 0x00, 0x00, // flags
|
||
// baseMediaDecodeTime
|
||
(upperWordBaseMediaDecodeTime >>> 24) & 0xFF,
|
||
(upperWordBaseMediaDecodeTime >>> 16) & 0xFF,
|
||
(upperWordBaseMediaDecodeTime >>> 8) & 0xFF,
|
||
upperWordBaseMediaDecodeTime & 0xFF,
|
||
(lowerWordBaseMediaDecodeTime >>> 24) & 0xFF,
|
||
(lowerWordBaseMediaDecodeTime >>> 16) & 0xFF,
|
||
(lowerWordBaseMediaDecodeTime >>> 8) & 0xFF,
|
||
lowerWordBaseMediaDecodeTime & 0xFF
|
||
]));
|
||
|
||
// the data offset specifies the number of bytes from the start of
|
||
// the containing moof to the first payload byte of the associated
|
||
// mdat
|
||
dataOffset = (32 + // tfhd
|
||
20 + // tfdt
|
||
8 + // traf header
|
||
16 + // mfhd
|
||
8 + // moof header
|
||
8); // mdat header
|
||
|
||
// audio tracks require less metadata
|
||
if (track.type === 'audio') {
|
||
trackFragmentRun = trun(track, dataOffset);
|
||
return box(types.traf,
|
||
trackFragmentHeader,
|
||
trackFragmentDecodeTime,
|
||
trackFragmentRun);
|
||
}
|
||
|
||
// video tracks should contain an independent and disposable samples
|
||
// box (sdtp)
|
||
// generate one and adjust offsets to match
|
||
sampleDependencyTable = sdtp(track);
|
||
trackFragmentRun = trun(track,
|
||
sampleDependencyTable.length + dataOffset);
|
||
return box(types.traf,
|
||
trackFragmentHeader,
|
||
trackFragmentDecodeTime,
|
||
trackFragmentRun,
|
||
sampleDependencyTable);
|
||
};
|
||
|
||
/**
|
||
* Generate a track box.
|
||
* @param track {object} a track definition
|
||
* @return {Uint8Array} the track box
|
||
*/
|
||
trak = function(track) {
|
||
track.duration = track.duration || 0xffffffff;
|
||
return box(types.trak,
|
||
tkhd(track),
|
||
mdia(track));
|
||
};
|
||
|
||
trex = function(track) {
|
||
var result = new Uint8Array([
|
||
0x00, // version 0
|
||
0x00, 0x00, 0x00, // flags
|
||
(track.id & 0xFF000000) >> 24,
|
||
(track.id & 0xFF0000) >> 16,
|
||
(track.id & 0xFF00) >> 8,
|
||
(track.id & 0xFF), // track_ID
|
||
0x00, 0x00, 0x00, 0x01, // default_sample_description_index
|
||
0x00, 0x00, 0x00, 0x00, // default_sample_duration
|
||
0x00, 0x00, 0x00, 0x00, // default_sample_size
|
||
0x00, 0x01, 0x00, 0x01 // default_sample_flags
|
||
]);
|
||
// the last two bytes of default_sample_flags is the sample
|
||
// degradation priority, a hint about the importance of this sample
|
||
// relative to others. Lower the degradation priority for all sample
|
||
// types other than video.
|
||
if (track.type !== 'video') {
|
||
result[result.length - 1] = 0x00;
|
||
}
|
||
|
||
return box(types.trex, result);
|
||
};
|
||
|
||
(function() {
|
||
var audioTrun, videoTrun, trunHeader;
|
||
|
||
// This method assumes all samples are uniform. That is, if a
|
||
// duration is present for the first sample, it will be present for
|
||
// all subsequent samples.
|
||
// see ISO/IEC 14496-12:2012, Section 8.8.8.1
|
||
trunHeader = function(samples, offset) {
|
||
var durationPresent = 0, sizePresent = 0,
|
||
flagsPresent = 0, compositionTimeOffset = 0;
|
||
|
||
// trun flag constants
|
||
if (samples.length) {
|
||
if (samples[0].duration !== undefined) {
|
||
durationPresent = 0x1;
|
||
}
|
||
if (samples[0].size !== undefined) {
|
||
sizePresent = 0x2;
|
||
}
|
||
if (samples[0].flags !== undefined) {
|
||
flagsPresent = 0x4;
|
||
}
|
||
if (samples[0].compositionTimeOffset !== undefined) {
|
||
compositionTimeOffset = 0x8;
|
||
}
|
||
}
|
||
|
||
return [
|
||
0x00, // version 0
|
||
0x00,
|
||
durationPresent | sizePresent | flagsPresent | compositionTimeOffset,
|
||
0x01, // flags
|
||
(samples.length & 0xFF000000) >>> 24,
|
||
(samples.length & 0xFF0000) >>> 16,
|
||
(samples.length & 0xFF00) >>> 8,
|
||
samples.length & 0xFF, // sample_count
|
||
(offset & 0xFF000000) >>> 24,
|
||
(offset & 0xFF0000) >>> 16,
|
||
(offset & 0xFF00) >>> 8,
|
||
offset & 0xFF // data_offset
|
||
];
|
||
};
|
||
|
||
videoTrun = function(track, offset) {
|
||
var bytes, samples, sample, i;
|
||
|
||
samples = track.samples || [];
|
||
offset += 8 + 12 + (16 * samples.length);
|
||
|
||
bytes = trunHeader(samples, offset);
|
||
|
||
for (i = 0; i < samples.length; i++) {
|
||
sample = samples[i];
|
||
bytes = bytes.concat([
|
||
(sample.duration & 0xFF000000) >>> 24,
|
||
(sample.duration & 0xFF0000) >>> 16,
|
||
(sample.duration & 0xFF00) >>> 8,
|
||
sample.duration & 0xFF, // sample_duration
|
||
(sample.size & 0xFF000000) >>> 24,
|
||
(sample.size & 0xFF0000) >>> 16,
|
||
(sample.size & 0xFF00) >>> 8,
|
||
sample.size & 0xFF, // sample_size
|
||
(sample.flags.isLeading << 2) | sample.flags.dependsOn,
|
||
(sample.flags.isDependedOn << 6) |
|
||
(sample.flags.hasRedundancy << 4) |
|
||
(sample.flags.paddingValue << 1) |
|
||
sample.flags.isNonSyncSample,
|
||
sample.flags.degradationPriority & 0xF0 << 8,
|
||
sample.flags.degradationPriority & 0x0F, // sample_flags
|
||
(sample.compositionTimeOffset & 0xFF000000) >>> 24,
|
||
(sample.compositionTimeOffset & 0xFF0000) >>> 16,
|
||
(sample.compositionTimeOffset & 0xFF00) >>> 8,
|
||
sample.compositionTimeOffset & 0xFF // sample_composition_time_offset
|
||
]);
|
||
}
|
||
return box(types.trun, new Uint8Array(bytes));
|
||
};
|
||
|
||
audioTrun = function(track, offset) {
|
||
var bytes, samples, sample, i;
|
||
|
||
samples = track.samples || [];
|
||
offset += 8 + 12 + (8 * samples.length);
|
||
|
||
bytes = trunHeader(samples, offset);
|
||
|
||
for (i = 0; i < samples.length; i++) {
|
||
sample = samples[i];
|
||
bytes = bytes.concat([
|
||
(sample.duration & 0xFF000000) >>> 24,
|
||
(sample.duration & 0xFF0000) >>> 16,
|
||
(sample.duration & 0xFF00) >>> 8,
|
||
sample.duration & 0xFF, // sample_duration
|
||
(sample.size & 0xFF000000) >>> 24,
|
||
(sample.size & 0xFF0000) >>> 16,
|
||
(sample.size & 0xFF00) >>> 8,
|
||
sample.size & 0xFF]); // sample_size
|
||
}
|
||
|
||
return box(types.trun, new Uint8Array(bytes));
|
||
};
|
||
|
||
trun = function(track, offset) {
|
||
if (track.type === 'audio') {
|
||
return audioTrun(track, offset);
|
||
}
|
||
|
||
return videoTrun(track, offset);
|
||
};
|
||
}());
|
||
|
||
module.exports = {
|
||
ftyp: ftyp,
|
||
mdat: mdat,
|
||
moof: moof,
|
||
moov: moov,
|
||
initSegment: function(tracks) {
|
||
var
|
||
fileType = ftyp(),
|
||
movie = moov(tracks),
|
||
result;
|
||
|
||
result = new Uint8Array(fileType.byteLength + movie.byteLength);
|
||
result.set(fileType);
|
||
result.set(movie, fileType.byteLength);
|
||
return result;
|
||
}
|
||
};
|
||
|
||
},{}],26:[function(require,module,exports){
|
||
/**
|
||
* mux.js
|
||
*
|
||
* Copyright (c) 2015 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* Utilities to detect basic properties and metadata about MP4s.
|
||
*/
|
||
'use strict';
|
||
|
||
var findBox, parseType, timescale, startTime;
|
||
|
||
// Find the data for a box specified by its path
|
||
findBox = function(data, path) {
|
||
var results = [],
|
||
i, size, type, end, subresults;
|
||
|
||
if (!path.length) {
|
||
// short-circuit the search for empty paths
|
||
return null;
|
||
}
|
||
|
||
for (i = 0; i < data.byteLength;) {
|
||
size = data[i] << 24;
|
||
size |= data[i + 1] << 16;
|
||
size |= data[i + 2] << 8;
|
||
size |= data[i + 3];
|
||
|
||
type = parseType(data.subarray(i + 4, i + 8));
|
||
|
||
end = size > 1 ? i + size : data.byteLength;
|
||
|
||
if (type === path[0]) {
|
||
if (path.length === 1) {
|
||
// this is the end of the path and we've found the box we were
|
||
// looking for
|
||
results.push(data.subarray(i + 8, end));
|
||
} else {
|
||
// recursively search for the next box along the path
|
||
subresults = findBox(data.subarray(i + 8, end), path.slice(1));
|
||
if (subresults.length) {
|
||
results = results.concat(subresults);
|
||
}
|
||
}
|
||
}
|
||
i = end;
|
||
}
|
||
|
||
// we've finished searching all of data
|
||
return results;
|
||
};
|
||
|
||
/**
|
||
* Returns the string representation of an ASCII encoded four byte buffer.
|
||
* @param buffer {Uint8Array} a four-byte buffer to translate
|
||
* @return {string} the corresponding string
|
||
*/
|
||
parseType = function(buffer) {
|
||
var result = '';
|
||
result += String.fromCharCode(buffer[0]);
|
||
result += String.fromCharCode(buffer[1]);
|
||
result += String.fromCharCode(buffer[2]);
|
||
result += String.fromCharCode(buffer[3]);
|
||
return result;
|
||
};
|
||
|
||
/**
|
||
* Parses an MP4 initialization segment and extracts the timescale
|
||
* values for any declared tracks. Timescale values indicate the
|
||
* number of clock ticks per second to assume for time-based values
|
||
* elsewhere in the MP4.
|
||
*
|
||
* To determine the start time of an MP4, you need two pieces of
|
||
* information: the timescale unit and the earliest base media decode
|
||
* time. Multiple timescales can be specified within an MP4 but the
|
||
* base media decode time is always expressed in the timescale from
|
||
* the media header box for the track:
|
||
* ```
|
||
* moov > trak > mdia > mdhd.timescale
|
||
* ```
|
||
* @param init {Uint8Array} the bytes of the init segment
|
||
* @return {object} a hash of track ids to timescale values or null if
|
||
* the init segment is malformed.
|
||
*/
|
||
timescale = function(init) {
|
||
var
|
||
result = {},
|
||
traks = findBox(init, ['moov', 'trak']);
|
||
|
||
// mdhd timescale
|
||
return traks.reduce(function(result, trak) {
|
||
var tkhd, version, index, id, mdhd;
|
||
|
||
tkhd = findBox(trak, ['tkhd'])[0];
|
||
if (!tkhd) {
|
||
return null;
|
||
}
|
||
version = tkhd[0];
|
||
index = version === 0 ? 12 : 20;
|
||
id = tkhd[index] << 24 |
|
||
tkhd[index + 1] << 16 |
|
||
tkhd[index + 2] << 8 |
|
||
tkhd[index + 3];
|
||
|
||
mdhd = findBox(trak, ['mdia', 'mdhd'])[0];
|
||
if (!mdhd) {
|
||
return null;
|
||
}
|
||
version = mdhd[0];
|
||
index = version === 0 ? 12 : 20;
|
||
result[id] = mdhd[index] << 24 |
|
||
mdhd[index + 1] << 16 |
|
||
mdhd[index + 2] << 8 |
|
||
mdhd[index + 3];
|
||
return result;
|
||
}, result);
|
||
};
|
||
|
||
/**
|
||
* Determine the base media decode start time, in seconds, for an MP4
|
||
* fragment. If multiple fragments are specified, the earliest time is
|
||
* returned.
|
||
*
|
||
* The base media decode time can be parsed from track fragment
|
||
* metadata:
|
||
* ```
|
||
* moof > traf > tfdt.baseMediaDecodeTime
|
||
* ```
|
||
* It requires the timescale value from the mdhd to interpret.
|
||
*
|
||
* @param timescale {object} a hash of track ids to timescale values.
|
||
* @return {number} the earliest base media decode start time for the
|
||
* fragment, in seconds
|
||
*/
|
||
startTime = function(timescale, fragment) {
|
||
var trafs, baseTimes, result;
|
||
|
||
// we need info from two childrend of each track fragment box
|
||
trafs = findBox(fragment, ['moof', 'traf']);
|
||
|
||
// determine the start times for each track
|
||
baseTimes = [].concat.apply([], trafs.map(function(traf) {
|
||
return findBox(traf, ['tfhd']).map(function(tfhd) {
|
||
var id, scale, baseTime;
|
||
|
||
// get the track id from the tfhd
|
||
id = tfhd[4] << 24 |
|
||
tfhd[5] << 16 |
|
||
tfhd[6] << 8 |
|
||
tfhd[7];
|
||
// assume a 90kHz clock if no timescale was specified
|
||
scale = timescale[id] || 90e3;
|
||
|
||
// get the base media decode time from the tfdt
|
||
baseTime = findBox(traf, ['tfdt']).map(function(tfdt) {
|
||
var version, result;
|
||
|
||
version = tfdt[0];
|
||
result = tfdt[4] << 24 |
|
||
tfdt[5] << 16 |
|
||
tfdt[6] << 8 |
|
||
tfdt[7];
|
||
if (version === 1) {
|
||
result *= Math.pow(2, 32);
|
||
result += tfdt[8] << 24 |
|
||
tfdt[9] << 16 |
|
||
tfdt[10] << 8 |
|
||
tfdt[11];
|
||
}
|
||
return result;
|
||
})[0];
|
||
baseTime = baseTime || Infinity;
|
||
|
||
// convert base time to seconds
|
||
return baseTime / scale;
|
||
});
|
||
}));
|
||
|
||
// return the minimum
|
||
result = Math.min.apply(null, baseTimes);
|
||
return isFinite(result) ? result : 0;
|
||
};
|
||
|
||
module.exports = {
|
||
parseType: parseType,
|
||
timescale: timescale,
|
||
startTime: startTime
|
||
};
|
||
|
||
},{}],27:[function(require,module,exports){
|
||
/**
|
||
* mux.js
|
||
*
|
||
* Copyright (c) 2015 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* A stream-based mp2t to mp4 converter. This utility can be used to
|
||
* deliver mp4s to a SourceBuffer on platforms that support native
|
||
* Media Source Extensions.
|
||
*/
|
||
'use strict';
|
||
|
||
var Stream = require('../utils/stream.js');
|
||
var mp4 = require('./mp4-generator.js');
|
||
var m2ts = require('../m2ts/m2ts.js');
|
||
var AdtsStream = require('../codecs/adts.js');
|
||
var H264Stream = require('../codecs/h264').H264Stream;
|
||
var AacStream = require('../aac');
|
||
var coneOfSilence = require('../data/silence');
|
||
var clock = require('../utils/clock');
|
||
|
||
// constants
|
||
var AUDIO_PROPERTIES = [
|
||
'audioobjecttype',
|
||
'channelcount',
|
||
'samplerate',
|
||
'samplingfrequencyindex',
|
||
'samplesize'
|
||
];
|
||
|
||
var VIDEO_PROPERTIES = [
|
||
'width',
|
||
'height',
|
||
'profileIdc',
|
||
'levelIdc',
|
||
'profileCompatibility'
|
||
];
|
||
|
||
var ONE_SECOND_IN_TS = 90000; // 90kHz clock
|
||
|
||
// object types
|
||
var VideoSegmentStream, AudioSegmentStream, Transmuxer, CoalesceStream;
|
||
|
||
// Helper functions
|
||
var
|
||
createDefaultSample,
|
||
isLikelyAacData,
|
||
collectDtsInfo,
|
||
clearDtsInfo,
|
||
calculateTrackBaseMediaDecodeTime,
|
||
arrayEquals,
|
||
sumFrameByteLengths;
|
||
|
||
/**
|
||
* Default sample object
|
||
* see ISO/IEC 14496-12:2012, section 8.6.4.3
|
||
*/
|
||
createDefaultSample = function() {
|
||
return {
|
||
size: 0,
|
||
flags: {
|
||
isLeading: 0,
|
||
dependsOn: 1,
|
||
isDependedOn: 0,
|
||
hasRedundancy: 0,
|
||
degradationPriority: 0
|
||
}
|
||
};
|
||
};
|
||
|
||
isLikelyAacData = function(data) {
|
||
if ((data[0] === 'I'.charCodeAt(0)) &&
|
||
(data[1] === 'D'.charCodeAt(0)) &&
|
||
(data[2] === '3'.charCodeAt(0))) {
|
||
return true;
|
||
}
|
||
return false;
|
||
};
|
||
|
||
/**
|
||
* Compare two arrays (even typed) for same-ness
|
||
*/
|
||
arrayEquals = function(a, b) {
|
||
var
|
||
i;
|
||
|
||
if (a.length !== b.length) {
|
||
return false;
|
||
}
|
||
|
||
// compare the value of each element in the array
|
||
for (i = 0; i < a.length; i++) {
|
||
if (a[i] !== b[i]) {
|
||
return false;
|
||
}
|
||
}
|
||
|
||
return true;
|
||
};
|
||
|
||
/**
|
||
* Sum the `byteLength` properties of the data in each AAC frame
|
||
*/
|
||
sumFrameByteLengths = function(array) {
|
||
var
|
||
i,
|
||
currentObj,
|
||
sum = 0;
|
||
|
||
// sum the byteLength's all each nal unit in the frame
|
||
for (i = 0; i < array.length; i++) {
|
||
currentObj = array[i];
|
||
sum += currentObj.data.byteLength;
|
||
}
|
||
|
||
return sum;
|
||
};
|
||
|
||
/**
|
||
* Constructs a single-track, ISO BMFF media segment from AAC data
|
||
* events. The output of this stream can be fed to a SourceBuffer
|
||
* configured with a suitable initialization segment.
|
||
*/
|
||
AudioSegmentStream = function(track) {
|
||
var
|
||
adtsFrames = [],
|
||
sequenceNumber = 0,
|
||
earliestAllowedDts = 0,
|
||
audioAppendStartTs = 0,
|
||
videoBaseMediaDecodeTime = Infinity;
|
||
|
||
AudioSegmentStream.prototype.init.call(this);
|
||
|
||
this.push = function(data) {
|
||
collectDtsInfo(track, data);
|
||
|
||
if (track) {
|
||
AUDIO_PROPERTIES.forEach(function(prop) {
|
||
track[prop] = data[prop];
|
||
});
|
||
}
|
||
|
||
// buffer audio data until end() is called
|
||
adtsFrames.push(data);
|
||
};
|
||
|
||
this.setEarliestDts = function(earliestDts) {
|
||
earliestAllowedDts = earliestDts - track.timelineStartInfo.baseMediaDecodeTime;
|
||
};
|
||
|
||
this.setVideoBaseMediaDecodeTime = function(baseMediaDecodeTime) {
|
||
videoBaseMediaDecodeTime = baseMediaDecodeTime;
|
||
};
|
||
|
||
this.setAudioAppendStart = function(timestamp) {
|
||
audioAppendStartTs = timestamp;
|
||
};
|
||
|
||
this.flush = function() {
|
||
var
|
||
frames,
|
||
moof,
|
||
mdat,
|
||
boxes;
|
||
|
||
// return early if no audio data has been observed
|
||
if (adtsFrames.length === 0) {
|
||
this.trigger('done', 'AudioSegmentStream');
|
||
return;
|
||
}
|
||
|
||
frames = this.trimAdtsFramesByEarliestDts_(adtsFrames);
|
||
track.baseMediaDecodeTime = calculateTrackBaseMediaDecodeTime(track);
|
||
|
||
this.prefixWithSilence_(track, frames);
|
||
|
||
// we have to build the index from byte locations to
|
||
// samples (that is, adts frames) in the audio data
|
||
track.samples = this.generateSampleTable_(frames);
|
||
|
||
// concatenate the audio data to constuct the mdat
|
||
mdat = mp4.mdat(this.concatenateFrameData_(frames));
|
||
|
||
adtsFrames = [];
|
||
|
||
moof = mp4.moof(sequenceNumber, [track]);
|
||
boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
|
||
|
||
// bump the sequence number for next time
|
||
sequenceNumber++;
|
||
|
||
boxes.set(moof);
|
||
boxes.set(mdat, moof.byteLength);
|
||
|
||
clearDtsInfo(track);
|
||
|
||
this.trigger('data', {track: track, boxes: boxes});
|
||
this.trigger('done', 'AudioSegmentStream');
|
||
};
|
||
|
||
// Possibly pad (prefix) the audio track with silence if appending this track
|
||
// would lead to the introduction of a gap in the audio buffer
|
||
this.prefixWithSilence_ = function(track, frames) {
|
||
var
|
||
baseMediaDecodeTimeTs,
|
||
frameDuration = 0,
|
||
audioGapDuration = 0,
|
||
audioFillFrameCount = 0,
|
||
audioFillDuration = 0,
|
||
silentFrame,
|
||
i;
|
||
|
||
if (!frames.length) {
|
||
return;
|
||
}
|
||
|
||
baseMediaDecodeTimeTs = clock.audioTsToVideoTs(track.baseMediaDecodeTime, track.samplerate);
|
||
// determine frame clock duration based on sample rate, round up to avoid overfills
|
||
frameDuration = Math.ceil(ONE_SECOND_IN_TS / (track.samplerate / 1024));
|
||
|
||
if (audioAppendStartTs && videoBaseMediaDecodeTime) {
|
||
// insert the shortest possible amount (audio gap or audio to video gap)
|
||
audioGapDuration =
|
||
baseMediaDecodeTimeTs - Math.max(audioAppendStartTs, videoBaseMediaDecodeTime);
|
||
// number of full frames in the audio gap
|
||
audioFillFrameCount = Math.floor(audioGapDuration / frameDuration);
|
||
audioFillDuration = audioFillFrameCount * frameDuration;
|
||
}
|
||
|
||
// don't attempt to fill gaps smaller than a single frame or larger
|
||
// than a half second
|
||
if (audioFillFrameCount < 1 || audioFillDuration > ONE_SECOND_IN_TS / 2) {
|
||
return;
|
||
}
|
||
|
||
silentFrame = coneOfSilence[track.samplerate];
|
||
|
||
if (!silentFrame) {
|
||
// we don't have a silent frame pregenerated for the sample rate, so use a frame
|
||
// from the content instead
|
||
silentFrame = frames[0].data;
|
||
}
|
||
|
||
for (i = 0; i < audioFillFrameCount; i++) {
|
||
frames.splice(i, 0, {
|
||
data: silentFrame
|
||
});
|
||
}
|
||
|
||
track.baseMediaDecodeTime -=
|
||
Math.floor(clock.videoTsToAudioTs(audioFillDuration, track.samplerate));
|
||
};
|
||
|
||
// If the audio segment extends before the earliest allowed dts
|
||
// value, remove AAC frames until starts at or after the earliest
|
||
// allowed DTS so that we don't end up with a negative baseMedia-
|
||
// DecodeTime for the audio track
|
||
this.trimAdtsFramesByEarliestDts_ = function(adtsFrames) {
|
||
if (track.minSegmentDts >= earliestAllowedDts) {
|
||
return adtsFrames;
|
||
}
|
||
|
||
// We will need to recalculate the earliest segment Dts
|
||
track.minSegmentDts = Infinity;
|
||
|
||
return adtsFrames.filter(function(currentFrame) {
|
||
// If this is an allowed frame, keep it and record it's Dts
|
||
if (currentFrame.dts >= earliestAllowedDts) {
|
||
track.minSegmentDts = Math.min(track.minSegmentDts, currentFrame.dts);
|
||
track.minSegmentPts = track.minSegmentDts;
|
||
return true;
|
||
}
|
||
// Otherwise, discard it
|
||
return false;
|
||
});
|
||
};
|
||
|
||
// generate the track's raw mdat data from an array of frames
|
||
this.generateSampleTable_ = function(frames) {
|
||
var
|
||
i,
|
||
currentFrame,
|
||
samples = [];
|
||
|
||
for (i = 0; i < frames.length; i++) {
|
||
currentFrame = frames[i];
|
||
samples.push({
|
||
size: currentFrame.data.byteLength,
|
||
duration: 1024 // For AAC audio, all samples contain 1024 samples
|
||
});
|
||
}
|
||
return samples;
|
||
};
|
||
|
||
// generate the track's sample table from an array of frames
|
||
this.concatenateFrameData_ = function(frames) {
|
||
var
|
||
i,
|
||
currentFrame,
|
||
dataOffset = 0,
|
||
data = new Uint8Array(sumFrameByteLengths(frames));
|
||
|
||
for (i = 0; i < frames.length; i++) {
|
||
currentFrame = frames[i];
|
||
|
||
data.set(currentFrame.data, dataOffset);
|
||
dataOffset += currentFrame.data.byteLength;
|
||
}
|
||
return data;
|
||
};
|
||
};
|
||
|
||
AudioSegmentStream.prototype = new Stream();
|
||
|
||
/**
|
||
* Constructs a single-track, ISO BMFF media segment from H264 data
|
||
* events. The output of this stream can be fed to a SourceBuffer
|
||
* configured with a suitable initialization segment.
|
||
* @param track {object} track metadata configuration
|
||
* @param options {object} transmuxer options object
|
||
* @param options.alignGopsAtEnd {boolean} If true, start from the end of the
|
||
* gopsToAlignWith list when attempting to align gop pts
|
||
*/
|
||
VideoSegmentStream = function(track, options) {
|
||
var
|
||
sequenceNumber = 0,
|
||
nalUnits = [],
|
||
gopsToAlignWith = [],
|
||
config,
|
||
pps;
|
||
|
||
options = options || {};
|
||
|
||
VideoSegmentStream.prototype.init.call(this);
|
||
|
||
delete track.minPTS;
|
||
|
||
this.gopCache_ = [];
|
||
|
||
this.push = function(nalUnit) {
|
||
collectDtsInfo(track, nalUnit);
|
||
|
||
// record the track config
|
||
if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) {
|
||
config = nalUnit.config;
|
||
track.sps = [nalUnit.data];
|
||
|
||
VIDEO_PROPERTIES.forEach(function(prop) {
|
||
track[prop] = config[prop];
|
||
}, this);
|
||
}
|
||
|
||
if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' &&
|
||
!pps) {
|
||
pps = nalUnit.data;
|
||
track.pps = [nalUnit.data];
|
||
}
|
||
|
||
// buffer video until flush() is called
|
||
nalUnits.push(nalUnit);
|
||
};
|
||
|
||
this.flush = function() {
|
||
var
|
||
frames,
|
||
gopForFusion,
|
||
gops,
|
||
moof,
|
||
mdat,
|
||
boxes;
|
||
|
||
// Throw away nalUnits at the start of the byte stream until
|
||
// we find the first AUD
|
||
while (nalUnits.length) {
|
||
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {
|
||
break;
|
||
}
|
||
nalUnits.shift();
|
||
}
|
||
|
||
// Return early if no video data has been observed
|
||
if (nalUnits.length === 0) {
|
||
this.resetStream_();
|
||
this.trigger('done', 'VideoSegmentStream');
|
||
return;
|
||
}
|
||
|
||
// Organize the raw nal-units into arrays that represent
|
||
// higher-level constructs such as frames and gops
|
||
// (group-of-pictures)
|
||
frames = this.groupNalsIntoFrames_(nalUnits);
|
||
gops = this.groupFramesIntoGops_(frames);
|
||
|
||
// If the first frame of this fragment is not a keyframe we have
|
||
// a problem since MSE (on Chrome) requires a leading keyframe.
|
||
//
|
||
// We have two approaches to repairing this situation:
|
||
// 1) GOP-FUSION:
|
||
// This is where we keep track of the GOPS (group-of-pictures)
|
||
// from previous fragments and attempt to find one that we can
|
||
// prepend to the current fragment in order to create a valid
|
||
// fragment.
|
||
// 2) KEYFRAME-PULLING:
|
||
// Here we search for the first keyframe in the fragment and
|
||
// throw away all the frames between the start of the fragment
|
||
// and that keyframe. We then extend the duration and pull the
|
||
// PTS of the keyframe forward so that it covers the time range
|
||
// of the frames that were disposed of.
|
||
//
|
||
// #1 is far prefereable over #2 which can cause "stuttering" but
|
||
// requires more things to be just right.
|
||
if (!gops[0][0].keyFrame) {
|
||
// Search for a gop for fusion from our gopCache
|
||
gopForFusion = this.getGopForFusion_(nalUnits[0], track);
|
||
|
||
if (gopForFusion) {
|
||
gops.unshift(gopForFusion);
|
||
// Adjust Gops' metadata to account for the inclusion of the
|
||
// new gop at the beginning
|
||
gops.byteLength += gopForFusion.byteLength;
|
||
gops.nalCount += gopForFusion.nalCount;
|
||
gops.pts = gopForFusion.pts;
|
||
gops.dts = gopForFusion.dts;
|
||
gops.duration += gopForFusion.duration;
|
||
} else {
|
||
// If we didn't find a candidate gop fall back to keyrame-pulling
|
||
gops = this.extendFirstKeyFrame_(gops);
|
||
}
|
||
}
|
||
|
||
// Trim gops to align with gopsToAlignWith
|
||
if (gopsToAlignWith.length) {
|
||
var alignedGops;
|
||
|
||
if (options.alignGopsAtEnd) {
|
||
alignedGops = this.alignGopsAtEnd_(gops);
|
||
} else {
|
||
alignedGops = this.alignGopsAtStart_(gops);
|
||
}
|
||
|
||
if (!alignedGops) {
|
||
// save all the nals in the last GOP into the gop cache
|
||
this.gopCache_.unshift({
|
||
gop: gops.pop(),
|
||
pps: track.pps,
|
||
sps: track.sps
|
||
});
|
||
|
||
// Keep a maximum of 6 GOPs in the cache
|
||
this.gopCache_.length = Math.min(6, this.gopCache_.length);
|
||
|
||
// Clear nalUnits
|
||
nalUnits = [];
|
||
|
||
// return early no gops can be aligned with desired gopsToAlignWith
|
||
this.resetStream_();
|
||
this.trigger('done', 'VideoSegmentStream');
|
||
return;
|
||
}
|
||
|
||
// Some gops were trimmed. clear dts info so minSegmentDts and pts are correct
|
||
// when recalculated before sending off to CoalesceStream
|
||
clearDtsInfo(track);
|
||
|
||
gops = alignedGops;
|
||
}
|
||
|
||
collectDtsInfo(track, gops);
|
||
|
||
// First, we have to build the index from byte locations to
|
||
// samples (that is, frames) in the video data
|
||
track.samples = this.generateSampleTable_(gops);
|
||
|
||
// Concatenate the video data and construct the mdat
|
||
mdat = mp4.mdat(this.concatenateNalData_(gops));
|
||
|
||
track.baseMediaDecodeTime = calculateTrackBaseMediaDecodeTime(track);
|
||
|
||
this.trigger('processedGopsInfo', gops.map(function(gop) {
|
||
return {
|
||
pts: gop.pts,
|
||
dts: gop.dts,
|
||
byteLength: gop.byteLength
|
||
};
|
||
}));
|
||
|
||
// save all the nals in the last GOP into the gop cache
|
||
this.gopCache_.unshift({
|
||
gop: gops.pop(),
|
||
pps: track.pps,
|
||
sps: track.sps
|
||
});
|
||
|
||
// Keep a maximum of 6 GOPs in the cache
|
||
this.gopCache_.length = Math.min(6, this.gopCache_.length);
|
||
|
||
// Clear nalUnits
|
||
nalUnits = [];
|
||
|
||
this.trigger('baseMediaDecodeTime', track.baseMediaDecodeTime);
|
||
this.trigger('timelineStartInfo', track.timelineStartInfo);
|
||
|
||
moof = mp4.moof(sequenceNumber, [track]);
|
||
|
||
// it would be great to allocate this array up front instead of
|
||
// throwing away hundreds of media segment fragments
|
||
boxes = new Uint8Array(moof.byteLength + mdat.byteLength);
|
||
|
||
// Bump the sequence number for next time
|
||
sequenceNumber++;
|
||
|
||
boxes.set(moof);
|
||
boxes.set(mdat, moof.byteLength);
|
||
|
||
this.trigger('data', {track: track, boxes: boxes});
|
||
|
||
this.resetStream_();
|
||
|
||
// Continue with the flush process now
|
||
this.trigger('done', 'VideoSegmentStream');
|
||
};
|
||
|
||
this.resetStream_ = function() {
|
||
clearDtsInfo(track);
|
||
|
||
// reset config and pps because they may differ across segments
|
||
// for instance, when we are rendition switching
|
||
config = undefined;
|
||
pps = undefined;
|
||
};
|
||
|
||
// Search for a candidate Gop for gop-fusion from the gop cache and
|
||
// return it or return null if no good candidate was found
|
||
this.getGopForFusion_ = function(nalUnit) {
|
||
var
|
||
halfSecond = 45000, // Half-a-second in a 90khz clock
|
||
allowableOverlap = 10000, // About 3 frames @ 30fps
|
||
nearestDistance = Infinity,
|
||
dtsDistance,
|
||
nearestGopObj,
|
||
currentGop,
|
||
currentGopObj,
|
||
i;
|
||
|
||
// Search for the GOP nearest to the beginning of this nal unit
|
||
for (i = 0; i < this.gopCache_.length; i++) {
|
||
currentGopObj = this.gopCache_[i];
|
||
currentGop = currentGopObj.gop;
|
||
|
||
// Reject Gops with different SPS or PPS
|
||
if (!(track.pps && arrayEquals(track.pps[0], currentGopObj.pps[0])) ||
|
||
!(track.sps && arrayEquals(track.sps[0], currentGopObj.sps[0]))) {
|
||
continue;
|
||
}
|
||
|
||
// Reject Gops that would require a negative baseMediaDecodeTime
|
||
if (currentGop.dts < track.timelineStartInfo.dts) {
|
||
continue;
|
||
}
|
||
|
||
// The distance between the end of the gop and the start of the nalUnit
|
||
dtsDistance = (nalUnit.dts - currentGop.dts) - currentGop.duration;
|
||
|
||
// Only consider GOPS that start before the nal unit and end within
|
||
// a half-second of the nal unit
|
||
if (dtsDistance >= -allowableOverlap &&
|
||
dtsDistance <= halfSecond) {
|
||
|
||
// Always use the closest GOP we found if there is more than
|
||
// one candidate
|
||
if (!nearestGopObj ||
|
||
nearestDistance > dtsDistance) {
|
||
nearestGopObj = currentGopObj;
|
||
nearestDistance = dtsDistance;
|
||
}
|
||
}
|
||
}
|
||
|
||
if (nearestGopObj) {
|
||
return nearestGopObj.gop;
|
||
}
|
||
return null;
|
||
};
|
||
|
||
this.extendFirstKeyFrame_ = function(gops) {
|
||
var currentGop;
|
||
|
||
if (!gops[0][0].keyFrame && gops.length > 1) {
|
||
// Remove the first GOP
|
||
currentGop = gops.shift();
|
||
|
||
gops.byteLength -= currentGop.byteLength;
|
||
gops.nalCount -= currentGop.nalCount;
|
||
|
||
// Extend the first frame of what is now the
|
||
// first gop to cover the time period of the
|
||
// frames we just removed
|
||
gops[0][0].dts = currentGop.dts;
|
||
gops[0][0].pts = currentGop.pts;
|
||
gops[0][0].duration += currentGop.duration;
|
||
}
|
||
|
||
return gops;
|
||
};
|
||
|
||
// Convert an array of nal units into an array of frames with each frame being
|
||
// composed of the nal units that make up that frame
|
||
// Also keep track of cummulative data about the frame from the nal units such
|
||
// as the frame duration, starting pts, etc.
|
||
this.groupNalsIntoFrames_ = function(nalUnits) {
|
||
var
|
||
i,
|
||
currentNal,
|
||
currentFrame = [],
|
||
frames = [];
|
||
|
||
currentFrame.byteLength = 0;
|
||
|
||
for (i = 0; i < nalUnits.length; i++) {
|
||
currentNal = nalUnits[i];
|
||
|
||
// Split on 'aud'-type nal units
|
||
if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') {
|
||
// Since the very first nal unit is expected to be an AUD
|
||
// only push to the frames array when currentFrame is not empty
|
||
if (currentFrame.length) {
|
||
currentFrame.duration = currentNal.dts - currentFrame.dts;
|
||
frames.push(currentFrame);
|
||
}
|
||
currentFrame = [currentNal];
|
||
currentFrame.byteLength = currentNal.data.byteLength;
|
||
currentFrame.pts = currentNal.pts;
|
||
currentFrame.dts = currentNal.dts;
|
||
} else {
|
||
// Specifically flag key frames for ease of use later
|
||
if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') {
|
||
currentFrame.keyFrame = true;
|
||
}
|
||
currentFrame.duration = currentNal.dts - currentFrame.dts;
|
||
currentFrame.byteLength += currentNal.data.byteLength;
|
||
currentFrame.push(currentNal);
|
||
}
|
||
}
|
||
|
||
// For the last frame, use the duration of the previous frame if we
|
||
// have nothing better to go on
|
||
if (frames.length &&
|
||
(!currentFrame.duration ||
|
||
currentFrame.duration <= 0)) {
|
||
currentFrame.duration = frames[frames.length - 1].duration;
|
||
}
|
||
|
||
// Push the final frame
|
||
frames.push(currentFrame);
|
||
return frames;
|
||
};
|
||
|
||
// Convert an array of frames into an array of Gop with each Gop being composed
|
||
// of the frames that make up that Gop
|
||
// Also keep track of cummulative data about the Gop from the frames such as the
|
||
// Gop duration, starting pts, etc.
|
||
this.groupFramesIntoGops_ = function(frames) {
|
||
var
|
||
i,
|
||
currentFrame,
|
||
currentGop = [],
|
||
gops = [];
|
||
|
||
// We must pre-set some of the values on the Gop since we
|
||
// keep running totals of these values
|
||
currentGop.byteLength = 0;
|
||
currentGop.nalCount = 0;
|
||
currentGop.duration = 0;
|
||
currentGop.pts = frames[0].pts;
|
||
currentGop.dts = frames[0].dts;
|
||
|
||
// store some metadata about all the Gops
|
||
gops.byteLength = 0;
|
||
gops.nalCount = 0;
|
||
gops.duration = 0;
|
||
gops.pts = frames[0].pts;
|
||
gops.dts = frames[0].dts;
|
||
|
||
for (i = 0; i < frames.length; i++) {
|
||
currentFrame = frames[i];
|
||
|
||
if (currentFrame.keyFrame) {
|
||
// Since the very first frame is expected to be an keyframe
|
||
// only push to the gops array when currentGop is not empty
|
||
if (currentGop.length) {
|
||
gops.push(currentGop);
|
||
gops.byteLength += currentGop.byteLength;
|
||
gops.nalCount += currentGop.nalCount;
|
||
gops.duration += currentGop.duration;
|
||
}
|
||
|
||
currentGop = [currentFrame];
|
||
currentGop.nalCount = currentFrame.length;
|
||
currentGop.byteLength = currentFrame.byteLength;
|
||
currentGop.pts = currentFrame.pts;
|
||
currentGop.dts = currentFrame.dts;
|
||
currentGop.duration = currentFrame.duration;
|
||
} else {
|
||
currentGop.duration += currentFrame.duration;
|
||
currentGop.nalCount += currentFrame.length;
|
||
currentGop.byteLength += currentFrame.byteLength;
|
||
currentGop.push(currentFrame);
|
||
}
|
||
}
|
||
|
||
if (gops.length && currentGop.duration <= 0) {
|
||
currentGop.duration = gops[gops.length - 1].duration;
|
||
}
|
||
gops.byteLength += currentGop.byteLength;
|
||
gops.nalCount += currentGop.nalCount;
|
||
gops.duration += currentGop.duration;
|
||
|
||
// push the final Gop
|
||
gops.push(currentGop);
|
||
return gops;
|
||
};
|
||
|
||
// generate the track's sample table from an array of gops
|
||
this.generateSampleTable_ = function(gops, baseDataOffset) {
|
||
var
|
||
h, i,
|
||
sample,
|
||
currentGop,
|
||
currentFrame,
|
||
dataOffset = baseDataOffset || 0,
|
||
samples = [];
|
||
|
||
for (h = 0; h < gops.length; h++) {
|
||
currentGop = gops[h];
|
||
|
||
for (i = 0; i < currentGop.length; i++) {
|
||
currentFrame = currentGop[i];
|
||
|
||
sample = createDefaultSample();
|
||
|
||
sample.dataOffset = dataOffset;
|
||
sample.compositionTimeOffset = currentFrame.pts - currentFrame.dts;
|
||
sample.duration = currentFrame.duration;
|
||
sample.size = 4 * currentFrame.length; // Space for nal unit size
|
||
sample.size += currentFrame.byteLength;
|
||
|
||
if (currentFrame.keyFrame) {
|
||
sample.flags.dependsOn = 2;
|
||
}
|
||
|
||
dataOffset += sample.size;
|
||
|
||
samples.push(sample);
|
||
}
|
||
}
|
||
return samples;
|
||
};
|
||
|
||
// generate the track's raw mdat data from an array of gops
|
||
this.concatenateNalData_ = function(gops) {
|
||
var
|
||
h, i, j,
|
||
currentGop,
|
||
currentFrame,
|
||
currentNal,
|
||
dataOffset = 0,
|
||
nalsByteLength = gops.byteLength,
|
||
numberOfNals = gops.nalCount,
|
||
totalByteLength = nalsByteLength + 4 * numberOfNals,
|
||
data = new Uint8Array(totalByteLength),
|
||
view = new DataView(data.buffer);
|
||
|
||
// For each Gop..
|
||
for (h = 0; h < gops.length; h++) {
|
||
currentGop = gops[h];
|
||
|
||
// For each Frame..
|
||
for (i = 0; i < currentGop.length; i++) {
|
||
currentFrame = currentGop[i];
|
||
|
||
// For each NAL..
|
||
for (j = 0; j < currentFrame.length; j++) {
|
||
currentNal = currentFrame[j];
|
||
|
||
view.setUint32(dataOffset, currentNal.data.byteLength);
|
||
dataOffset += 4;
|
||
data.set(currentNal.data, dataOffset);
|
||
dataOffset += currentNal.data.byteLength;
|
||
}
|
||
}
|
||
}
|
||
return data;
|
||
};
|
||
|
||
// trim gop list to the first gop found that has a matching pts with a gop in the list
|
||
// of gopsToAlignWith starting from the START of the list
|
||
this.alignGopsAtStart_ = function(gops) {
|
||
var alignIndex, gopIndex, align, gop, byteLength, nalCount, duration, alignedGops;
|
||
|
||
byteLength = gops.byteLength;
|
||
nalCount = gops.nalCount;
|
||
duration = gops.duration;
|
||
alignIndex = gopIndex = 0;
|
||
|
||
while (alignIndex < gopsToAlignWith.length && gopIndex < gops.length) {
|
||
align = gopsToAlignWith[alignIndex];
|
||
gop = gops[gopIndex];
|
||
|
||
if (align.pts === gop.pts) {
|
||
break;
|
||
}
|
||
|
||
if (gop.pts > align.pts) {
|
||
// this current gop starts after the current gop we want to align on, so increment
|
||
// align index
|
||
alignIndex++;
|
||
continue;
|
||
}
|
||
|
||
// current gop starts before the current gop we want to align on. so increment gop
|
||
// index
|
||
gopIndex++;
|
||
byteLength -= gop.byteLength;
|
||
nalCount -= gop.nalCount;
|
||
duration -= gop.duration;
|
||
}
|
||
|
||
if (gopIndex === 0) {
|
||
// no gops to trim
|
||
return gops;
|
||
}
|
||
|
||
if (gopIndex === gops.length) {
|
||
// all gops trimmed, skip appending all gops
|
||
return null;
|
||
}
|
||
|
||
alignedGops = gops.slice(gopIndex);
|
||
alignedGops.byteLength = byteLength;
|
||
alignedGops.duration = duration;
|
||
alignedGops.nalCount = nalCount;
|
||
alignedGops.pts = alignedGops[0].pts;
|
||
alignedGops.dts = alignedGops[0].dts;
|
||
|
||
return alignedGops;
|
||
};
|
||
|
||
// trim gop list to the first gop found that has a matching pts with a gop in the list
|
||
// of gopsToAlignWith starting from the END of the list
|
||
this.alignGopsAtEnd_ = function(gops) {
|
||
var alignIndex, gopIndex, align, gop, alignEndIndex, matchFound;
|
||
|
||
alignIndex = gopsToAlignWith.length - 1;
|
||
gopIndex = gops.length - 1;
|
||
alignEndIndex = null;
|
||
matchFound = false;
|
||
|
||
while (alignIndex >= 0 && gopIndex >= 0) {
|
||
align = gopsToAlignWith[alignIndex];
|
||
gop = gops[gopIndex];
|
||
|
||
if (align.pts === gop.pts) {
|
||
matchFound = true;
|
||
break;
|
||
}
|
||
|
||
if (align.pts > gop.pts) {
|
||
alignIndex--;
|
||
continue;
|
||
}
|
||
|
||
if (alignIndex === gopsToAlignWith.length - 1) {
|
||
// gop.pts is greater than the last alignment candidate. If no match is found
|
||
// by the end of this loop, we still want to append gops that come after this
|
||
// point
|
||
alignEndIndex = gopIndex;
|
||
}
|
||
|
||
gopIndex--;
|
||
}
|
||
|
||
if (!matchFound && alignEndIndex === null) {
|
||
return null;
|
||
}
|
||
|
||
var trimIndex;
|
||
|
||
if (matchFound) {
|
||
trimIndex = gopIndex;
|
||
} else {
|
||
trimIndex = alignEndIndex;
|
||
}
|
||
|
||
if (trimIndex === 0) {
|
||
return gops;
|
||
}
|
||
|
||
var alignedGops = gops.slice(trimIndex);
|
||
var metadata = alignedGops.reduce(function(total, gop) {
|
||
total.byteLength += gop.byteLength;
|
||
total.duration += gop.duration;
|
||
total.nalCount += gop.nalCount;
|
||
return total;
|
||
}, { byteLength: 0, duration: 0, nalCount: 0 });
|
||
|
||
alignedGops.byteLength = metadata.byteLength;
|
||
alignedGops.duration = metadata.duration;
|
||
alignedGops.nalCount = metadata.nalCount;
|
||
alignedGops.pts = alignedGops[0].pts;
|
||
alignedGops.dts = alignedGops[0].dts;
|
||
|
||
return alignedGops;
|
||
};
|
||
|
||
this.alignGopsWith = function(newGopsToAlignWith) {
|
||
gopsToAlignWith = newGopsToAlignWith;
|
||
};
|
||
};
|
||
|
||
VideoSegmentStream.prototype = new Stream();
|
||
|
||
/**
|
||
* Store information about the start and end of the track and the
|
||
* duration for each frame/sample we process in order to calculate
|
||
* the baseMediaDecodeTime
|
||
*/
|
||
collectDtsInfo = function(track, data) {
|
||
if (typeof data.pts === 'number') {
|
||
if (track.timelineStartInfo.pts === undefined) {
|
||
track.timelineStartInfo.pts = data.pts;
|
||
}
|
||
|
||
if (track.minSegmentPts === undefined) {
|
||
track.minSegmentPts = data.pts;
|
||
} else {
|
||
track.minSegmentPts = Math.min(track.minSegmentPts, data.pts);
|
||
}
|
||
|
||
if (track.maxSegmentPts === undefined) {
|
||
track.maxSegmentPts = data.pts;
|
||
} else {
|
||
track.maxSegmentPts = Math.max(track.maxSegmentPts, data.pts);
|
||
}
|
||
}
|
||
|
||
if (typeof data.dts === 'number') {
|
||
if (track.timelineStartInfo.dts === undefined) {
|
||
track.timelineStartInfo.dts = data.dts;
|
||
}
|
||
|
||
if (track.minSegmentDts === undefined) {
|
||
track.minSegmentDts = data.dts;
|
||
} else {
|
||
track.minSegmentDts = Math.min(track.minSegmentDts, data.dts);
|
||
}
|
||
|
||
if (track.maxSegmentDts === undefined) {
|
||
track.maxSegmentDts = data.dts;
|
||
} else {
|
||
track.maxSegmentDts = Math.max(track.maxSegmentDts, data.dts);
|
||
}
|
||
}
|
||
};
|
||
|
||
/**
|
||
* Clear values used to calculate the baseMediaDecodeTime between
|
||
* tracks
|
||
*/
|
||
clearDtsInfo = function(track) {
|
||
delete track.minSegmentDts;
|
||
delete track.maxSegmentDts;
|
||
delete track.minSegmentPts;
|
||
delete track.maxSegmentPts;
|
||
};
|
||
|
||
/**
|
||
* Calculate the track's baseMediaDecodeTime based on the earliest
|
||
* DTS the transmuxer has ever seen and the minimum DTS for the
|
||
* current track
|
||
*/
|
||
calculateTrackBaseMediaDecodeTime = function(track) {
|
||
var
|
||
baseMediaDecodeTime,
|
||
scale,
|
||
// Calculate the distance, in time, that this segment starts from the start
|
||
// of the timeline (earliest time seen since the transmuxer initialized)
|
||
timeSinceStartOfTimeline = track.minSegmentDts - track.timelineStartInfo.dts;
|
||
|
||
// track.timelineStartInfo.baseMediaDecodeTime is the location, in time, where
|
||
// we want the start of the first segment to be placed
|
||
baseMediaDecodeTime = track.timelineStartInfo.baseMediaDecodeTime;
|
||
|
||
// Add to that the distance this segment is from the very first
|
||
baseMediaDecodeTime += timeSinceStartOfTimeline;
|
||
|
||
// baseMediaDecodeTime must not become negative
|
||
baseMediaDecodeTime = Math.max(0, baseMediaDecodeTime);
|
||
|
||
if (track.type === 'audio') {
|
||
// Audio has a different clock equal to the sampling_rate so we need to
|
||
// scale the PTS values into the clock rate of the track
|
||
scale = track.samplerate / ONE_SECOND_IN_TS;
|
||
baseMediaDecodeTime *= scale;
|
||
baseMediaDecodeTime = Math.floor(baseMediaDecodeTime);
|
||
}
|
||
|
||
return baseMediaDecodeTime;
|
||
};
|
||
|
||
/**
|
||
* A Stream that can combine multiple streams (ie. audio & video)
|
||
* into a single output segment for MSE. Also supports audio-only
|
||
* and video-only streams.
|
||
*/
|
||
CoalesceStream = function(options, metadataStream) {
|
||
// Number of Tracks per output segment
|
||
// If greater than 1, we combine multiple
|
||
// tracks into a single segment
|
||
this.numberOfTracks = 0;
|
||
this.metadataStream = metadataStream;
|
||
|
||
if (typeof options.remux !== 'undefined') {
|
||
this.remuxTracks = !!options.remux;
|
||
} else {
|
||
this.remuxTracks = true;
|
||
}
|
||
|
||
this.pendingTracks = [];
|
||
this.videoTrack = null;
|
||
this.pendingBoxes = [];
|
||
this.pendingCaptions = [];
|
||
this.pendingMetadata = [];
|
||
this.pendingBytes = 0;
|
||
this.emittedTracks = 0;
|
||
|
||
CoalesceStream.prototype.init.call(this);
|
||
|
||
// Take output from multiple
|
||
this.push = function(output) {
|
||
// buffer incoming captions until the associated video segment
|
||
// finishes
|
||
if (output.text) {
|
||
return this.pendingCaptions.push(output);
|
||
}
|
||
// buffer incoming id3 tags until the final flush
|
||
if (output.frames) {
|
||
return this.pendingMetadata.push(output);
|
||
}
|
||
|
||
// Add this track to the list of pending tracks and store
|
||
// important information required for the construction of
|
||
// the final segment
|
||
this.pendingTracks.push(output.track);
|
||
this.pendingBoxes.push(output.boxes);
|
||
this.pendingBytes += output.boxes.byteLength;
|
||
|
||
if (output.track.type === 'video') {
|
||
this.videoTrack = output.track;
|
||
}
|
||
if (output.track.type === 'audio') {
|
||
this.audioTrack = output.track;
|
||
}
|
||
};
|
||
};
|
||
|
||
CoalesceStream.prototype = new Stream();
|
||
CoalesceStream.prototype.flush = function(flushSource) {
|
||
var
|
||
offset = 0,
|
||
event = {
|
||
captions: [],
|
||
captionStreams: {},
|
||
metadata: [],
|
||
info: {}
|
||
},
|
||
caption,
|
||
id3,
|
||
initSegment,
|
||
timelineStartPts = 0,
|
||
i;
|
||
|
||
if (this.pendingTracks.length < this.numberOfTracks) {
|
||
if (flushSource !== 'VideoSegmentStream' &&
|
||
flushSource !== 'AudioSegmentStream') {
|
||
// Return because we haven't received a flush from a data-generating
|
||
// portion of the segment (meaning that we have only recieved meta-data
|
||
// or captions.)
|
||
return;
|
||
} else if (this.remuxTracks) {
|
||
// Return until we have enough tracks from the pipeline to remux (if we
|
||
// are remuxing audio and video into a single MP4)
|
||
return;
|
||
} else if (this.pendingTracks.length === 0) {
|
||
// In the case where we receive a flush without any data having been
|
||
// received we consider it an emitted track for the purposes of coalescing
|
||
// `done` events.
|
||
// We do this for the case where there is an audio and video track in the
|
||
// segment but no audio data. (seen in several playlists with alternate
|
||
// audio tracks and no audio present in the main TS segments.)
|
||
this.emittedTracks++;
|
||
|
||
if (this.emittedTracks >= this.numberOfTracks) {
|
||
this.trigger('done');
|
||
this.emittedTracks = 0;
|
||
}
|
||
return;
|
||
}
|
||
}
|
||
|
||
if (this.videoTrack) {
|
||
timelineStartPts = this.videoTrack.timelineStartInfo.pts;
|
||
VIDEO_PROPERTIES.forEach(function(prop) {
|
||
event.info[prop] = this.videoTrack[prop];
|
||
}, this);
|
||
} else if (this.audioTrack) {
|
||
timelineStartPts = this.audioTrack.timelineStartInfo.pts;
|
||
AUDIO_PROPERTIES.forEach(function(prop) {
|
||
event.info[prop] = this.audioTrack[prop];
|
||
}, this);
|
||
}
|
||
|
||
if (this.pendingTracks.length === 1) {
|
||
event.type = this.pendingTracks[0].type;
|
||
} else {
|
||
event.type = 'combined';
|
||
}
|
||
|
||
this.emittedTracks += this.pendingTracks.length;
|
||
|
||
initSegment = mp4.initSegment(this.pendingTracks);
|
||
|
||
// Create a new typed array to hold the init segment
|
||
event.initSegment = new Uint8Array(initSegment.byteLength);
|
||
|
||
// Create an init segment containing a moov
|
||
// and track definitions
|
||
event.initSegment.set(initSegment);
|
||
|
||
// Create a new typed array to hold the moof+mdats
|
||
event.data = new Uint8Array(this.pendingBytes);
|
||
|
||
// Append each moof+mdat (one per track) together
|
||
for (i = 0; i < this.pendingBoxes.length; i++) {
|
||
event.data.set(this.pendingBoxes[i], offset);
|
||
offset += this.pendingBoxes[i].byteLength;
|
||
}
|
||
|
||
// Translate caption PTS times into second offsets into the
|
||
// video timeline for the segment, and add track info
|
||
for (i = 0; i < this.pendingCaptions.length; i++) {
|
||
caption = this.pendingCaptions[i];
|
||
caption.startTime = (caption.startPts - timelineStartPts);
|
||
caption.startTime /= 90e3;
|
||
caption.endTime = (caption.endPts - timelineStartPts);
|
||
caption.endTime /= 90e3;
|
||
event.captionStreams[caption.stream] = true;
|
||
event.captions.push(caption);
|
||
}
|
||
|
||
// Translate ID3 frame PTS times into second offsets into the
|
||
// video timeline for the segment
|
||
for (i = 0; i < this.pendingMetadata.length; i++) {
|
||
id3 = this.pendingMetadata[i];
|
||
id3.cueTime = (id3.pts - timelineStartPts);
|
||
id3.cueTime /= 90e3;
|
||
event.metadata.push(id3);
|
||
}
|
||
// We add this to every single emitted segment even though we only need
|
||
// it for the first
|
||
event.metadata.dispatchType = this.metadataStream.dispatchType;
|
||
|
||
// Reset stream state
|
||
this.pendingTracks.length = 0;
|
||
this.videoTrack = null;
|
||
this.pendingBoxes.length = 0;
|
||
this.pendingCaptions.length = 0;
|
||
this.pendingBytes = 0;
|
||
this.pendingMetadata.length = 0;
|
||
|
||
// Emit the built segment
|
||
this.trigger('data', event);
|
||
|
||
// Only emit `done` if all tracks have been flushed and emitted
|
||
if (this.emittedTracks >= this.numberOfTracks) {
|
||
this.trigger('done');
|
||
this.emittedTracks = 0;
|
||
}
|
||
};
|
||
/**
|
||
* A Stream that expects MP2T binary data as input and produces
|
||
* corresponding media segments, suitable for use with Media Source
|
||
* Extension (MSE) implementations that support the ISO BMFF byte
|
||
* stream format, like Chrome.
|
||
*/
|
||
Transmuxer = function(options) {
|
||
var
|
||
self = this,
|
||
hasFlushed = true,
|
||
videoTrack,
|
||
audioTrack;
|
||
|
||
Transmuxer.prototype.init.call(this);
|
||
|
||
options = options || {};
|
||
this.baseMediaDecodeTime = options.baseMediaDecodeTime || 0;
|
||
this.transmuxPipeline_ = {};
|
||
|
||
this.setupAacPipeline = function() {
|
||
var pipeline = {};
|
||
this.transmuxPipeline_ = pipeline;
|
||
|
||
pipeline.type = 'aac';
|
||
pipeline.metadataStream = new m2ts.MetadataStream();
|
||
|
||
// set up the parsing pipeline
|
||
pipeline.aacStream = new AacStream();
|
||
pipeline.audioTimestampRolloverStream = new m2ts.TimestampRolloverStream('audio');
|
||
pipeline.timedMetadataTimestampRolloverStream = new m2ts.TimestampRolloverStream('timed-metadata');
|
||
pipeline.adtsStream = new AdtsStream();
|
||
pipeline.coalesceStream = new CoalesceStream(options, pipeline.metadataStream);
|
||
pipeline.headOfPipeline = pipeline.aacStream;
|
||
|
||
pipeline.aacStream
|
||
.pipe(pipeline.audioTimestampRolloverStream)
|
||
.pipe(pipeline.adtsStream);
|
||
pipeline.aacStream
|
||
.pipe(pipeline.timedMetadataTimestampRolloverStream)
|
||
.pipe(pipeline.metadataStream)
|
||
.pipe(pipeline.coalesceStream);
|
||
|
||
pipeline.metadataStream.on('timestamp', function(frame) {
|
||
pipeline.aacStream.setTimestamp(frame.timeStamp);
|
||
});
|
||
|
||
pipeline.aacStream.on('data', function(data) {
|
||
if (data.type === 'timed-metadata' && !pipeline.audioSegmentStream) {
|
||
audioTrack = audioTrack || {
|
||
timelineStartInfo: {
|
||
baseMediaDecodeTime: self.baseMediaDecodeTime
|
||
},
|
||
codec: 'adts',
|
||
type: 'audio'
|
||
};
|
||
// hook up the audio segment stream to the first track with aac data
|
||
pipeline.coalesceStream.numberOfTracks++;
|
||
pipeline.audioSegmentStream = new AudioSegmentStream(audioTrack);
|
||
// Set up the final part of the audio pipeline
|
||
pipeline.adtsStream
|
||
.pipe(pipeline.audioSegmentStream)
|
||
.pipe(pipeline.coalesceStream);
|
||
}
|
||
});
|
||
|
||
// Re-emit any data coming from the coalesce stream to the outside world
|
||
pipeline.coalesceStream.on('data', this.trigger.bind(this, 'data'));
|
||
// Let the consumer know we have finished flushing the entire pipeline
|
||
pipeline.coalesceStream.on('done', this.trigger.bind(this, 'done'));
|
||
};
|
||
|
||
this.setupTsPipeline = function() {
|
||
var pipeline = {};
|
||
this.transmuxPipeline_ = pipeline;
|
||
|
||
pipeline.type = 'ts';
|
||
pipeline.metadataStream = new m2ts.MetadataStream();
|
||
|
||
// set up the parsing pipeline
|
||
pipeline.packetStream = new m2ts.TransportPacketStream();
|
||
pipeline.parseStream = new m2ts.TransportParseStream();
|
||
pipeline.elementaryStream = new m2ts.ElementaryStream();
|
||
pipeline.videoTimestampRolloverStream = new m2ts.TimestampRolloverStream('video');
|
||
pipeline.audioTimestampRolloverStream = new m2ts.TimestampRolloverStream('audio');
|
||
pipeline.timedMetadataTimestampRolloverStream = new m2ts.TimestampRolloverStream('timed-metadata');
|
||
pipeline.adtsStream = new AdtsStream();
|
||
pipeline.h264Stream = new H264Stream();
|
||
pipeline.captionStream = new m2ts.CaptionStream();
|
||
pipeline.coalesceStream = new CoalesceStream(options, pipeline.metadataStream);
|
||
pipeline.headOfPipeline = pipeline.packetStream;
|
||
|
||
// disassemble MPEG2-TS packets into elementary streams
|
||
pipeline.packetStream
|
||
.pipe(pipeline.parseStream)
|
||
.pipe(pipeline.elementaryStream);
|
||
|
||
// !!THIS ORDER IS IMPORTANT!!
|
||
// demux the streams
|
||
pipeline.elementaryStream
|
||
.pipe(pipeline.videoTimestampRolloverStream)
|
||
.pipe(pipeline.h264Stream);
|
||
pipeline.elementaryStream
|
||
.pipe(pipeline.audioTimestampRolloverStream)
|
||
.pipe(pipeline.adtsStream);
|
||
|
||
pipeline.elementaryStream
|
||
.pipe(pipeline.timedMetadataTimestampRolloverStream)
|
||
.pipe(pipeline.metadataStream)
|
||
.pipe(pipeline.coalesceStream);
|
||
|
||
// Hook up CEA-608/708 caption stream
|
||
pipeline.h264Stream.pipe(pipeline.captionStream)
|
||
.pipe(pipeline.coalesceStream);
|
||
|
||
pipeline.elementaryStream.on('data', function(data) {
|
||
var i;
|
||
|
||
if (data.type === 'metadata') {
|
||
i = data.tracks.length;
|
||
|
||
// scan the tracks listed in the metadata
|
||
while (i--) {
|
||
if (!videoTrack && data.tracks[i].type === 'video') {
|
||
videoTrack = data.tracks[i];
|
||
videoTrack.timelineStartInfo.baseMediaDecodeTime = self.baseMediaDecodeTime;
|
||
} else if (!audioTrack && data.tracks[i].type === 'audio') {
|
||
audioTrack = data.tracks[i];
|
||
audioTrack.timelineStartInfo.baseMediaDecodeTime = self.baseMediaDecodeTime;
|
||
}
|
||
}
|
||
|
||
// hook up the video segment stream to the first track with h264 data
|
||
if (videoTrack && !pipeline.videoSegmentStream) {
|
||
pipeline.coalesceStream.numberOfTracks++;
|
||
pipeline.videoSegmentStream = new VideoSegmentStream(videoTrack, options);
|
||
|
||
pipeline.videoSegmentStream.on('timelineStartInfo', function(timelineStartInfo) {
|
||
// When video emits timelineStartInfo data after a flush, we forward that
|
||
// info to the AudioSegmentStream, if it exists, because video timeline
|
||
// data takes precedence.
|
||
if (audioTrack) {
|
||
audioTrack.timelineStartInfo = timelineStartInfo;
|
||
// On the first segment we trim AAC frames that exist before the
|
||
// very earliest DTS we have seen in video because Chrome will
|
||
// interpret any video track with a baseMediaDecodeTime that is
|
||
// non-zero as a gap.
|
||
pipeline.audioSegmentStream.setEarliestDts(timelineStartInfo.dts);
|
||
}
|
||
});
|
||
|
||
pipeline.videoSegmentStream.on('processedGopsInfo',
|
||
self.trigger.bind(self, 'gopInfo'));
|
||
|
||
pipeline.videoSegmentStream.on('baseMediaDecodeTime', function(baseMediaDecodeTime) {
|
||
if (audioTrack) {
|
||
pipeline.audioSegmentStream.setVideoBaseMediaDecodeTime(baseMediaDecodeTime);
|
||
}
|
||
});
|
||
|
||
// Set up the final part of the video pipeline
|
||
pipeline.h264Stream
|
||
.pipe(pipeline.videoSegmentStream)
|
||
.pipe(pipeline.coalesceStream);
|
||
}
|
||
|
||
if (audioTrack && !pipeline.audioSegmentStream) {
|
||
// hook up the audio segment stream to the first track with aac data
|
||
pipeline.coalesceStream.numberOfTracks++;
|
||
pipeline.audioSegmentStream = new AudioSegmentStream(audioTrack);
|
||
|
||
// Set up the final part of the audio pipeline
|
||
pipeline.adtsStream
|
||
.pipe(pipeline.audioSegmentStream)
|
||
.pipe(pipeline.coalesceStream);
|
||
}
|
||
}
|
||
});
|
||
|
||
// Re-emit any data coming from the coalesce stream to the outside world
|
||
pipeline.coalesceStream.on('data', this.trigger.bind(this, 'data'));
|
||
// Let the consumer know we have finished flushing the entire pipeline
|
||
pipeline.coalesceStream.on('done', this.trigger.bind(this, 'done'));
|
||
};
|
||
|
||
// hook up the segment streams once track metadata is delivered
|
||
this.setBaseMediaDecodeTime = function(baseMediaDecodeTime) {
|
||
var pipeline = this.transmuxPipeline_;
|
||
|
||
this.baseMediaDecodeTime = baseMediaDecodeTime;
|
||
if (audioTrack) {
|
||
audioTrack.timelineStartInfo.dts = undefined;
|
||
audioTrack.timelineStartInfo.pts = undefined;
|
||
clearDtsInfo(audioTrack);
|
||
audioTrack.timelineStartInfo.baseMediaDecodeTime = baseMediaDecodeTime;
|
||
if (pipeline.audioTimestampRolloverStream) {
|
||
pipeline.audioTimestampRolloverStream.discontinuity();
|
||
}
|
||
}
|
||
if (videoTrack) {
|
||
if (pipeline.videoSegmentStream) {
|
||
pipeline.videoSegmentStream.gopCache_ = [];
|
||
pipeline.videoTimestampRolloverStream.discontinuity();
|
||
}
|
||
videoTrack.timelineStartInfo.dts = undefined;
|
||
videoTrack.timelineStartInfo.pts = undefined;
|
||
clearDtsInfo(videoTrack);
|
||
pipeline.captionStream.reset();
|
||
videoTrack.timelineStartInfo.baseMediaDecodeTime = baseMediaDecodeTime;
|
||
}
|
||
|
||
if (pipeline.timedMetadataTimestampRolloverStream) {
|
||
pipeline.timedMetadataTimestampRolloverStream.discontinuity();
|
||
}
|
||
};
|
||
|
||
this.setAudioAppendStart = function(timestamp) {
|
||
if (audioTrack) {
|
||
this.transmuxPipeline_.audioSegmentStream.setAudioAppendStart(timestamp);
|
||
}
|
||
};
|
||
|
||
this.alignGopsWith = function(gopsToAlignWith) {
|
||
if (videoTrack && this.transmuxPipeline_.videoSegmentStream) {
|
||
this.transmuxPipeline_.videoSegmentStream.alignGopsWith(gopsToAlignWith);
|
||
}
|
||
};
|
||
|
||
// feed incoming data to the front of the parsing pipeline
|
||
this.push = function(data) {
|
||
if (hasFlushed) {
|
||
var isAac = isLikelyAacData(data);
|
||
|
||
if (isAac && this.transmuxPipeline_.type !== 'aac') {
|
||
this.setupAacPipeline();
|
||
} else if (!isAac && this.transmuxPipeline_.type !== 'ts') {
|
||
this.setupTsPipeline();
|
||
}
|
||
hasFlushed = false;
|
||
}
|
||
this.transmuxPipeline_.headOfPipeline.push(data);
|
||
};
|
||
|
||
// flush any buffered data
|
||
this.flush = function() {
|
||
hasFlushed = true;
|
||
// Start at the top of the pipeline and flush all pending work
|
||
this.transmuxPipeline_.headOfPipeline.flush();
|
||
};
|
||
|
||
// Caption data has to be reset when seeking outside buffered range
|
||
this.resetCaptions = function() {
|
||
if (this.transmuxPipeline_.captionStream) {
|
||
this.transmuxPipeline_.captionStream.reset();
|
||
}
|
||
};
|
||
|
||
};
|
||
Transmuxer.prototype = new Stream();
|
||
|
||
module.exports = {
|
||
Transmuxer: Transmuxer,
|
||
VideoSegmentStream: VideoSegmentStream,
|
||
AudioSegmentStream: AudioSegmentStream,
|
||
AUDIO_PROPERTIES: AUDIO_PROPERTIES,
|
||
VIDEO_PROPERTIES: VIDEO_PROPERTIES
|
||
};
|
||
|
||
},{"../aac":4,"../codecs/adts.js":6,"../codecs/h264":7,"../data/silence":9,"../m2ts/m2ts.js":19,"../utils/clock":31,"../utils/stream.js":33,"./mp4-generator.js":25}],28:[function(require,module,exports){
|
||
'use strict';
|
||
|
||
var
|
||
tagTypes = {
|
||
0x08: 'audio',
|
||
0x09: 'video',
|
||
0x12: 'metadata'
|
||
},
|
||
hex = function(val) {
|
||
return '0x' + ('00' + val.toString(16)).slice(-2).toUpperCase();
|
||
},
|
||
hexStringList = function(data) {
|
||
var arr = [], i;
|
||
|
||
while (data.byteLength > 0) {
|
||
i = 0;
|
||
arr.push(hex(data[i++]));
|
||
data = data.subarray(i);
|
||
}
|
||
return arr.join(' ');
|
||
},
|
||
parseAVCTag = function(tag, obj) {
|
||
var
|
||
avcPacketTypes = [
|
||
'AVC Sequence Header',
|
||
'AVC NALU',
|
||
'AVC End-of-Sequence'
|
||
],
|
||
compositionTime = (tag[1] & parseInt('01111111', 2) << 16) | (tag[2] << 8) | tag[3];
|
||
|
||
obj = obj || {};
|
||
|
||
obj.avcPacketType = avcPacketTypes[tag[0]];
|
||
obj.CompositionTime = (tag[1] & parseInt('10000000', 2)) ? -compositionTime : compositionTime;
|
||
|
||
if (tag[0] === 1) {
|
||
obj.nalUnitTypeRaw = hexStringList(tag.subarray(4, 100));
|
||
} else {
|
||
obj.data = hexStringList(tag.subarray(4));
|
||
}
|
||
|
||
return obj;
|
||
},
|
||
parseVideoTag = function(tag, obj) {
|
||
var
|
||
frameTypes = [
|
||
'Unknown',
|
||
'Keyframe (for AVC, a seekable frame)',
|
||
'Inter frame (for AVC, a nonseekable frame)',
|
||
'Disposable inter frame (H.263 only)',
|
||
'Generated keyframe (reserved for server use only)',
|
||
'Video info/command frame'
|
||
],
|
||
codecID = tag[0] & parseInt('00001111', 2);
|
||
|
||
obj = obj || {};
|
||
|
||
obj.frameType = frameTypes[(tag[0] & parseInt('11110000', 2)) >>> 4];
|
||
obj.codecID = codecID;
|
||
|
||
if (codecID === 7) {
|
||
return parseAVCTag(tag.subarray(1), obj);
|
||
}
|
||
return obj;
|
||
},
|
||
parseAACTag = function(tag, obj) {
|
||
var packetTypes = [
|
||
'AAC Sequence Header',
|
||
'AAC Raw'
|
||
];
|
||
|
||
obj = obj || {};
|
||
|
||
obj.aacPacketType = packetTypes[tag[0]];
|
||
obj.data = hexStringList(tag.subarray(1));
|
||
|
||
return obj;
|
||
},
|
||
parseAudioTag = function(tag, obj) {
|
||
var
|
||
formatTable = [
|
||
'Linear PCM, platform endian',
|
||
'ADPCM',
|
||
'MP3',
|
||
'Linear PCM, little endian',
|
||
'Nellymoser 16-kHz mono',
|
||
'Nellymoser 8-kHz mono',
|
||
'Nellymoser',
|
||
'G.711 A-law logarithmic PCM',
|
||
'G.711 mu-law logarithmic PCM',
|
||
'reserved',
|
||
'AAC',
|
||
'Speex',
|
||
'MP3 8-Khz',
|
||
'Device-specific sound'
|
||
],
|
||
samplingRateTable = [
|
||
'5.5-kHz',
|
||
'11-kHz',
|
||
'22-kHz',
|
||
'44-kHz'
|
||
],
|
||
soundFormat = (tag[0] & parseInt('11110000', 2)) >>> 4;
|
||
|
||
obj = obj || {};
|
||
|
||
obj.soundFormat = formatTable[soundFormat];
|
||
obj.soundRate = samplingRateTable[(tag[0] & parseInt('00001100', 2)) >>> 2];
|
||
obj.soundSize = ((tag[0] & parseInt('00000010', 2)) >>> 1) ? '16-bit' : '8-bit';
|
||
obj.soundType = (tag[0] & parseInt('00000001', 2)) ? 'Stereo' : 'Mono';
|
||
|
||
if (soundFormat === 10) {
|
||
return parseAACTag(tag.subarray(1), obj);
|
||
}
|
||
return obj;
|
||
},
|
||
parseGenericTag = function(tag) {
|
||
return {
|
||
tagType: tagTypes[tag[0]],
|
||
dataSize: (tag[1] << 16) | (tag[2] << 8) | tag[3],
|
||
timestamp: (tag[7] << 24) | (tag[4] << 16) | (tag[5] << 8) | tag[6],
|
||
streamID: (tag[8] << 16) | (tag[9] << 8) | tag[10]
|
||
};
|
||
},
|
||
inspectFlvTag = function(tag) {
|
||
var header = parseGenericTag(tag);
|
||
switch (tag[0]) {
|
||
case 0x08:
|
||
parseAudioTag(tag.subarray(11), header);
|
||
break;
|
||
case 0x09:
|
||
parseVideoTag(tag.subarray(11), header);
|
||
break;
|
||
case 0x12:
|
||
}
|
||
return header;
|
||
},
|
||
inspectFlv = function(bytes) {
|
||
var i = 9, // header
|
||
dataSize,
|
||
parsedResults = [],
|
||
tag;
|
||
|
||
// traverse the tags
|
||
i += 4; // skip previous tag size
|
||
while (i < bytes.byteLength) {
|
||
dataSize = bytes[i + 1] << 16;
|
||
dataSize |= bytes[i + 2] << 8;
|
||
dataSize |= bytes[i + 3];
|
||
dataSize += 11;
|
||
|
||
tag = bytes.subarray(i, i + dataSize);
|
||
parsedResults.push(inspectFlvTag(tag));
|
||
i += dataSize + 4;
|
||
}
|
||
return parsedResults;
|
||
},
|
||
textifyFlv = function(flvTagArray) {
|
||
return JSON.stringify(flvTagArray, null, 2);
|
||
};
|
||
|
||
module.exports = {
|
||
inspectTag: inspectFlvTag,
|
||
inspect: inspectFlv,
|
||
textify: textifyFlv
|
||
};
|
||
|
||
},{}],29:[function(require,module,exports){
|
||
(function (global){
|
||
/**
|
||
* mux.js
|
||
*
|
||
* Copyright (c) 2015 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* Parse the internal MP4 structure into an equivalent javascript
|
||
* object.
|
||
*/
|
||
'use strict';
|
||
|
||
var
|
||
inspectMp4,
|
||
textifyMp4,
|
||
|
||
parseType = require('../mp4/probe').parseType,
|
||
parseMp4Date = function(seconds) {
|
||
return new Date(seconds * 1000 - 2082844800000);
|
||
},
|
||
parseSampleFlags = function(flags) {
|
||
return {
|
||
isLeading: (flags[0] & 0x0c) >>> 2,
|
||
dependsOn: flags[0] & 0x03,
|
||
isDependedOn: (flags[1] & 0xc0) >>> 6,
|
||
hasRedundancy: (flags[1] & 0x30) >>> 4,
|
||
paddingValue: (flags[1] & 0x0e) >>> 1,
|
||
isNonSyncSample: flags[1] & 0x01,
|
||
degradationPriority: (flags[2] << 8) | flags[3]
|
||
};
|
||
},
|
||
nalParse = function(avcStream) {
|
||
var
|
||
avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength),
|
||
result = [],
|
||
i,
|
||
length;
|
||
for (i = 0; i + 4 < avcStream.length; i += length) {
|
||
length = avcView.getUint32(i);
|
||
i += 4;
|
||
|
||
// bail if this doesn't appear to be an H264 stream
|
||
if (length <= 0) {
|
||
result.push('<span style=\'color:red;\'>MALFORMED DATA</span>');
|
||
continue;
|
||
}
|
||
|
||
switch (avcStream[i] & 0x1F) {
|
||
case 0x01:
|
||
result.push('slice_layer_without_partitioning_rbsp');
|
||
break;
|
||
case 0x05:
|
||
result.push('slice_layer_without_partitioning_rbsp_idr');
|
||
break;
|
||
case 0x06:
|
||
result.push('sei_rbsp');
|
||
break;
|
||
case 0x07:
|
||
result.push('seq_parameter_set_rbsp');
|
||
break;
|
||
case 0x08:
|
||
result.push('pic_parameter_set_rbsp');
|
||
break;
|
||
case 0x09:
|
||
result.push('access_unit_delimiter_rbsp');
|
||
break;
|
||
default:
|
||
result.push('UNKNOWN NAL - ' + avcStream[i] & 0x1F);
|
||
break;
|
||
}
|
||
}
|
||
return result;
|
||
},
|
||
|
||
// registry of handlers for individual mp4 box types
|
||
parse = {
|
||
// codingname, not a first-class box type. stsd entries share the
|
||
// same format as real boxes so the parsing infrastructure can be
|
||
// shared
|
||
avc1: function(data) {
|
||
var view = new DataView(data.buffer, data.byteOffset, data.byteLength);
|
||
return {
|
||
dataReferenceIndex: view.getUint16(6),
|
||
width: view.getUint16(24),
|
||
height: view.getUint16(26),
|
||
horizresolution: view.getUint16(28) + (view.getUint16(30) / 16),
|
||
vertresolution: view.getUint16(32) + (view.getUint16(34) / 16),
|
||
frameCount: view.getUint16(40),
|
||
depth: view.getUint16(74),
|
||
config: inspectMp4(data.subarray(78, data.byteLength))
|
||
};
|
||
},
|
||
avcC: function(data) {
|
||
var
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
result = {
|
||
configurationVersion: data[0],
|
||
avcProfileIndication: data[1],
|
||
profileCompatibility: data[2],
|
||
avcLevelIndication: data[3],
|
||
lengthSizeMinusOne: data[4] & 0x03,
|
||
sps: [],
|
||
pps: []
|
||
},
|
||
numOfSequenceParameterSets = data[5] & 0x1f,
|
||
numOfPictureParameterSets,
|
||
nalSize,
|
||
offset,
|
||
i;
|
||
|
||
// iterate past any SPSs
|
||
offset = 6;
|
||
for (i = 0; i < numOfSequenceParameterSets; i++) {
|
||
nalSize = view.getUint16(offset);
|
||
offset += 2;
|
||
result.sps.push(new Uint8Array(data.subarray(offset, offset + nalSize)));
|
||
offset += nalSize;
|
||
}
|
||
// iterate past any PPSs
|
||
numOfPictureParameterSets = data[offset];
|
||
offset++;
|
||
for (i = 0; i < numOfPictureParameterSets; i++) {
|
||
nalSize = view.getUint16(offset);
|
||
offset += 2;
|
||
result.pps.push(new Uint8Array(data.subarray(offset, offset + nalSize)));
|
||
offset += nalSize;
|
||
}
|
||
return result;
|
||
},
|
||
btrt: function(data) {
|
||
var view = new DataView(data.buffer, data.byteOffset, data.byteLength);
|
||
return {
|
||
bufferSizeDB: view.getUint32(0),
|
||
maxBitrate: view.getUint32(4),
|
||
avgBitrate: view.getUint32(8)
|
||
};
|
||
},
|
||
esds: function(data) {
|
||
return {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
esId: (data[6] << 8) | data[7],
|
||
streamPriority: data[8] & 0x1f,
|
||
decoderConfig: {
|
||
objectProfileIndication: data[11],
|
||
streamType: (data[12] >>> 2) & 0x3f,
|
||
bufferSize: (data[13] << 16) | (data[14] << 8) | data[15],
|
||
maxBitrate: (data[16] << 24) |
|
||
(data[17] << 16) |
|
||
(data[18] << 8) |
|
||
data[19],
|
||
avgBitrate: (data[20] << 24) |
|
||
(data[21] << 16) |
|
||
(data[22] << 8) |
|
||
data[23],
|
||
decoderConfigDescriptor: {
|
||
tag: data[24],
|
||
length: data[25],
|
||
audioObjectType: (data[26] >>> 3) & 0x1f,
|
||
samplingFrequencyIndex: ((data[26] & 0x07) << 1) |
|
||
((data[27] >>> 7) & 0x01),
|
||
channelConfiguration: (data[27] >>> 3) & 0x0f
|
||
}
|
||
}
|
||
};
|
||
},
|
||
ftyp: function(data) {
|
||
var
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
result = {
|
||
majorBrand: parseType(data.subarray(0, 4)),
|
||
minorVersion: view.getUint32(4),
|
||
compatibleBrands: []
|
||
},
|
||
i = 8;
|
||
while (i < data.byteLength) {
|
||
result.compatibleBrands.push(parseType(data.subarray(i, i + 4)));
|
||
i += 4;
|
||
}
|
||
return result;
|
||
},
|
||
dinf: function(data) {
|
||
return {
|
||
boxes: inspectMp4(data)
|
||
};
|
||
},
|
||
dref: function(data) {
|
||
return {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
dataReferences: inspectMp4(data.subarray(8))
|
||
};
|
||
},
|
||
hdlr: function(data) {
|
||
var
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
result = {
|
||
version: view.getUint8(0),
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
handlerType: parseType(data.subarray(8, 12)),
|
||
name: ''
|
||
},
|
||
i = 8;
|
||
|
||
// parse out the name field
|
||
for (i = 24; i < data.byteLength; i++) {
|
||
if (data[i] === 0x00) {
|
||
// the name field is null-terminated
|
||
i++;
|
||
break;
|
||
}
|
||
result.name += String.fromCharCode(data[i]);
|
||
}
|
||
// decode UTF-8 to javascript's internal representation
|
||
// see http://ecmanaut.blogspot.com/2006/07/encoding-decoding-utf8-in-javascript.html
|
||
result.name = decodeURIComponent(global.escape(result.name));
|
||
|
||
return result;
|
||
},
|
||
mdat: function(data) {
|
||
return {
|
||
byteLength: data.byteLength,
|
||
nals: nalParse(data)
|
||
};
|
||
},
|
||
mdhd: function(data) {
|
||
var
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
i = 4,
|
||
language,
|
||
result = {
|
||
version: view.getUint8(0),
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
language: ''
|
||
};
|
||
if (result.version === 1) {
|
||
i += 4;
|
||
result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes
|
||
i += 8;
|
||
result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes
|
||
i += 4;
|
||
result.timescale = view.getUint32(i);
|
||
i += 8;
|
||
result.duration = view.getUint32(i); // truncating top 4 bytes
|
||
} else {
|
||
result.creationTime = parseMp4Date(view.getUint32(i));
|
||
i += 4;
|
||
result.modificationTime = parseMp4Date(view.getUint32(i));
|
||
i += 4;
|
||
result.timescale = view.getUint32(i);
|
||
i += 4;
|
||
result.duration = view.getUint32(i);
|
||
}
|
||
i += 4;
|
||
// language is stored as an ISO-639-2/T code in an array of three 5-bit fields
|
||
// each field is the packed difference between its ASCII value and 0x60
|
||
language = view.getUint16(i);
|
||
result.language += String.fromCharCode((language >> 10) + 0x60);
|
||
result.language += String.fromCharCode(((language & 0x03c0) >> 5) + 0x60);
|
||
result.language += String.fromCharCode((language & 0x1f) + 0x60);
|
||
|
||
return result;
|
||
},
|
||
mdia: function(data) {
|
||
return {
|
||
boxes: inspectMp4(data)
|
||
};
|
||
},
|
||
mfhd: function(data) {
|
||
return {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
sequenceNumber: (data[4] << 24) |
|
||
(data[5] << 16) |
|
||
(data[6] << 8) |
|
||
(data[7])
|
||
};
|
||
},
|
||
minf: function(data) {
|
||
return {
|
||
boxes: inspectMp4(data)
|
||
};
|
||
},
|
||
// codingname, not a first-class box type. stsd entries share the
|
||
// same format as real boxes so the parsing infrastructure can be
|
||
// shared
|
||
mp4a: function(data) {
|
||
var
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
result = {
|
||
// 6 bytes reserved
|
||
dataReferenceIndex: view.getUint16(6),
|
||
// 4 + 4 bytes reserved
|
||
channelcount: view.getUint16(16),
|
||
samplesize: view.getUint16(18),
|
||
// 2 bytes pre_defined
|
||
// 2 bytes reserved
|
||
samplerate: view.getUint16(24) + (view.getUint16(26) / 65536)
|
||
};
|
||
|
||
// if there are more bytes to process, assume this is an ISO/IEC
|
||
// 14496-14 MP4AudioSampleEntry and parse the ESDBox
|
||
if (data.byteLength > 28) {
|
||
result.streamDescriptor = inspectMp4(data.subarray(28))[0];
|
||
}
|
||
return result;
|
||
},
|
||
moof: function(data) {
|
||
return {
|
||
boxes: inspectMp4(data)
|
||
};
|
||
},
|
||
moov: function(data) {
|
||
return {
|
||
boxes: inspectMp4(data)
|
||
};
|
||
},
|
||
mvex: function(data) {
|
||
return {
|
||
boxes: inspectMp4(data)
|
||
};
|
||
},
|
||
mvhd: function(data) {
|
||
var
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
i = 4,
|
||
result = {
|
||
version: view.getUint8(0),
|
||
flags: new Uint8Array(data.subarray(1, 4))
|
||
};
|
||
|
||
if (result.version === 1) {
|
||
i += 4;
|
||
result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes
|
||
i += 8;
|
||
result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes
|
||
i += 4;
|
||
result.timescale = view.getUint32(i);
|
||
i += 8;
|
||
result.duration = view.getUint32(i); // truncating top 4 bytes
|
||
} else {
|
||
result.creationTime = parseMp4Date(view.getUint32(i));
|
||
i += 4;
|
||
result.modificationTime = parseMp4Date(view.getUint32(i));
|
||
i += 4;
|
||
result.timescale = view.getUint32(i);
|
||
i += 4;
|
||
result.duration = view.getUint32(i);
|
||
}
|
||
i += 4;
|
||
|
||
// convert fixed-point, base 16 back to a number
|
||
result.rate = view.getUint16(i) + (view.getUint16(i + 2) / 16);
|
||
i += 4;
|
||
result.volume = view.getUint8(i) + (view.getUint8(i + 1) / 8);
|
||
i += 2;
|
||
i += 2;
|
||
i += 2 * 4;
|
||
result.matrix = new Uint32Array(data.subarray(i, i + (9 * 4)));
|
||
i += 9 * 4;
|
||
i += 6 * 4;
|
||
result.nextTrackId = view.getUint32(i);
|
||
return result;
|
||
},
|
||
pdin: function(data) {
|
||
var view = new DataView(data.buffer, data.byteOffset, data.byteLength);
|
||
return {
|
||
version: view.getUint8(0),
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
rate: view.getUint32(4),
|
||
initialDelay: view.getUint32(8)
|
||
};
|
||
},
|
||
sdtp: function(data) {
|
||
var
|
||
result = {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
samples: []
|
||
}, i;
|
||
|
||
for (i = 4; i < data.byteLength; i++) {
|
||
result.samples.push({
|
||
dependsOn: (data[i] & 0x30) >> 4,
|
||
isDependedOn: (data[i] & 0x0c) >> 2,
|
||
hasRedundancy: data[i] & 0x03
|
||
});
|
||
}
|
||
return result;
|
||
},
|
||
sidx: function(data) {
|
||
var view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
result = {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
references: [],
|
||
referenceId: view.getUint32(4),
|
||
timescale: view.getUint32(8),
|
||
earliestPresentationTime: view.getUint32(12),
|
||
firstOffset: view.getUint32(16)
|
||
},
|
||
referenceCount = view.getUint16(22),
|
||
i;
|
||
|
||
for (i = 24; referenceCount; i += 12, referenceCount--) {
|
||
result.references.push({
|
||
referenceType: (data[i] & 0x80) >>> 7,
|
||
referencedSize: view.getUint32(i) & 0x7FFFFFFF,
|
||
subsegmentDuration: view.getUint32(i + 4),
|
||
startsWithSap: !!(data[i + 8] & 0x80),
|
||
sapType: (data[i + 8] & 0x70) >>> 4,
|
||
sapDeltaTime: view.getUint32(i + 8) & 0x0FFFFFFF
|
||
});
|
||
}
|
||
|
||
return result;
|
||
},
|
||
smhd: function(data) {
|
||
return {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
balance: data[4] + (data[5] / 256)
|
||
};
|
||
},
|
||
stbl: function(data) {
|
||
return {
|
||
boxes: inspectMp4(data)
|
||
};
|
||
},
|
||
stco: function(data) {
|
||
var
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
result = {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
chunkOffsets: []
|
||
},
|
||
entryCount = view.getUint32(4),
|
||
i;
|
||
for (i = 8; entryCount; i += 4, entryCount--) {
|
||
result.chunkOffsets.push(view.getUint32(i));
|
||
}
|
||
return result;
|
||
},
|
||
stsc: function(data) {
|
||
var
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
entryCount = view.getUint32(4),
|
||
result = {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
sampleToChunks: []
|
||
},
|
||
i;
|
||
for (i = 8; entryCount; i += 12, entryCount--) {
|
||
result.sampleToChunks.push({
|
||
firstChunk: view.getUint32(i),
|
||
samplesPerChunk: view.getUint32(i + 4),
|
||
sampleDescriptionIndex: view.getUint32(i + 8)
|
||
});
|
||
}
|
||
return result;
|
||
},
|
||
stsd: function(data) {
|
||
return {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
sampleDescriptions: inspectMp4(data.subarray(8))
|
||
};
|
||
},
|
||
stsz: function(data) {
|
||
var
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
result = {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
sampleSize: view.getUint32(4),
|
||
entries: []
|
||
},
|
||
i;
|
||
for (i = 12; i < data.byteLength; i += 4) {
|
||
result.entries.push(view.getUint32(i));
|
||
}
|
||
return result;
|
||
},
|
||
stts: function(data) {
|
||
var
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
result = {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
timeToSamples: []
|
||
},
|
||
entryCount = view.getUint32(4),
|
||
i;
|
||
|
||
for (i = 8; entryCount; i += 8, entryCount--) {
|
||
result.timeToSamples.push({
|
||
sampleCount: view.getUint32(i),
|
||
sampleDelta: view.getUint32(i + 4)
|
||
});
|
||
}
|
||
return result;
|
||
},
|
||
styp: function(data) {
|
||
return parse.ftyp(data);
|
||
},
|
||
tfdt: function(data) {
|
||
var result = {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
baseMediaDecodeTime: data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]
|
||
};
|
||
if (result.version === 1) {
|
||
result.baseMediaDecodeTime *= Math.pow(2, 32);
|
||
result.baseMediaDecodeTime += data[8] << 24 | data[9] << 16 | data[10] << 8 | data[11];
|
||
}
|
||
return result;
|
||
},
|
||
tfhd: function(data) {
|
||
var
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
result = {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
trackId: view.getUint32(4)
|
||
},
|
||
baseDataOffsetPresent = result.flags[2] & 0x01,
|
||
sampleDescriptionIndexPresent = result.flags[2] & 0x02,
|
||
defaultSampleDurationPresent = result.flags[2] & 0x08,
|
||
defaultSampleSizePresent = result.flags[2] & 0x10,
|
||
defaultSampleFlagsPresent = result.flags[2] & 0x20,
|
||
i;
|
||
|
||
i = 8;
|
||
if (baseDataOffsetPresent) {
|
||
i += 4; // truncate top 4 bytes
|
||
result.baseDataOffset = view.getUint32(12);
|
||
i += 4;
|
||
}
|
||
if (sampleDescriptionIndexPresent) {
|
||
result.sampleDescriptionIndex = view.getUint32(i);
|
||
i += 4;
|
||
}
|
||
if (defaultSampleDurationPresent) {
|
||
result.defaultSampleDuration = view.getUint32(i);
|
||
i += 4;
|
||
}
|
||
if (defaultSampleSizePresent) {
|
||
result.defaultSampleSize = view.getUint32(i);
|
||
i += 4;
|
||
}
|
||
if (defaultSampleFlagsPresent) {
|
||
result.defaultSampleFlags = view.getUint32(i);
|
||
}
|
||
return result;
|
||
},
|
||
tkhd: function(data) {
|
||
var
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
i = 4,
|
||
result = {
|
||
version: view.getUint8(0),
|
||
flags: new Uint8Array(data.subarray(1, 4))
|
||
};
|
||
if (result.version === 1) {
|
||
i += 4;
|
||
result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes
|
||
i += 8;
|
||
result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes
|
||
i += 4;
|
||
result.trackId = view.getUint32(i);
|
||
i += 4;
|
||
i += 8;
|
||
result.duration = view.getUint32(i); // truncating top 4 bytes
|
||
} else {
|
||
result.creationTime = parseMp4Date(view.getUint32(i));
|
||
i += 4;
|
||
result.modificationTime = parseMp4Date(view.getUint32(i));
|
||
i += 4;
|
||
result.trackId = view.getUint32(i);
|
||
i += 4;
|
||
i += 4;
|
||
result.duration = view.getUint32(i);
|
||
}
|
||
i += 4;
|
||
i += 2 * 4;
|
||
result.layer = view.getUint16(i);
|
||
i += 2;
|
||
result.alternateGroup = view.getUint16(i);
|
||
i += 2;
|
||
// convert fixed-point, base 16 back to a number
|
||
result.volume = view.getUint8(i) + (view.getUint8(i + 1) / 8);
|
||
i += 2;
|
||
i += 2;
|
||
result.matrix = new Uint32Array(data.subarray(i, i + (9 * 4)));
|
||
i += 9 * 4;
|
||
result.width = view.getUint16(i) + (view.getUint16(i + 2) / 16);
|
||
i += 4;
|
||
result.height = view.getUint16(i) + (view.getUint16(i + 2) / 16);
|
||
return result;
|
||
},
|
||
traf: function(data) {
|
||
return {
|
||
boxes: inspectMp4(data)
|
||
};
|
||
},
|
||
trak: function(data) {
|
||
return {
|
||
boxes: inspectMp4(data)
|
||
};
|
||
},
|
||
trex: function(data) {
|
||
var view = new DataView(data.buffer, data.byteOffset, data.byteLength);
|
||
return {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
trackId: view.getUint32(4),
|
||
defaultSampleDescriptionIndex: view.getUint32(8),
|
||
defaultSampleDuration: view.getUint32(12),
|
||
defaultSampleSize: view.getUint32(16),
|
||
sampleDependsOn: data[20] & 0x03,
|
||
sampleIsDependedOn: (data[21] & 0xc0) >> 6,
|
||
sampleHasRedundancy: (data[21] & 0x30) >> 4,
|
||
samplePaddingValue: (data[21] & 0x0e) >> 1,
|
||
sampleIsDifferenceSample: !!(data[21] & 0x01),
|
||
sampleDegradationPriority: view.getUint16(22)
|
||
};
|
||
},
|
||
trun: function(data) {
|
||
var
|
||
result = {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
samples: []
|
||
},
|
||
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
|
||
dataOffsetPresent = result.flags[2] & 0x01,
|
||
firstSampleFlagsPresent = result.flags[2] & 0x04,
|
||
sampleDurationPresent = result.flags[1] & 0x01,
|
||
sampleSizePresent = result.flags[1] & 0x02,
|
||
sampleFlagsPresent = result.flags[1] & 0x04,
|
||
sampleCompositionTimeOffsetPresent = result.flags[1] & 0x08,
|
||
sampleCount = view.getUint32(4),
|
||
offset = 8,
|
||
sample;
|
||
|
||
if (dataOffsetPresent) {
|
||
result.dataOffset = view.getUint32(offset);
|
||
offset += 4;
|
||
}
|
||
|
||
if (firstSampleFlagsPresent && sampleCount) {
|
||
sample = {
|
||
flags: parseSampleFlags(data.subarray(offset, offset + 4))
|
||
};
|
||
offset += 4;
|
||
if (sampleDurationPresent) {
|
||
sample.duration = view.getUint32(offset);
|
||
offset += 4;
|
||
}
|
||
if (sampleSizePresent) {
|
||
sample.size = view.getUint32(offset);
|
||
offset += 4;
|
||
}
|
||
if (sampleCompositionTimeOffsetPresent) {
|
||
sample.compositionTimeOffset = view.getUint32(offset);
|
||
offset += 4;
|
||
}
|
||
result.samples.push(sample);
|
||
sampleCount--;
|
||
}
|
||
|
||
while (sampleCount--) {
|
||
sample = {};
|
||
if (sampleDurationPresent) {
|
||
sample.duration = view.getUint32(offset);
|
||
offset += 4;
|
||
}
|
||
if (sampleSizePresent) {
|
||
sample.size = view.getUint32(offset);
|
||
offset += 4;
|
||
}
|
||
if (sampleFlagsPresent) {
|
||
sample.flags = parseSampleFlags(data.subarray(offset, offset + 4));
|
||
offset += 4;
|
||
}
|
||
if (sampleCompositionTimeOffsetPresent) {
|
||
sample.compositionTimeOffset = view.getUint32(offset);
|
||
offset += 4;
|
||
}
|
||
result.samples.push(sample);
|
||
}
|
||
return result;
|
||
},
|
||
'url ': function(data) {
|
||
return {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4))
|
||
};
|
||
},
|
||
vmhd: function(data) {
|
||
var view = new DataView(data.buffer, data.byteOffset, data.byteLength);
|
||
return {
|
||
version: data[0],
|
||
flags: new Uint8Array(data.subarray(1, 4)),
|
||
graphicsmode: view.getUint16(4),
|
||
opcolor: new Uint16Array([view.getUint16(6),
|
||
view.getUint16(8),
|
||
view.getUint16(10)])
|
||
};
|
||
}
|
||
};
|
||
|
||
|
||
/**
|
||
* Return a javascript array of box objects parsed from an ISO base
|
||
* media file.
|
||
* @param data {Uint8Array} the binary data of the media to be inspected
|
||
* @return {array} a javascript array of potentially nested box objects
|
||
*/
|
||
inspectMp4 = function(data) {
|
||
var
|
||
i = 0,
|
||
result = [],
|
||
view,
|
||
size,
|
||
type,
|
||
end,
|
||
box;
|
||
|
||
// Convert data from Uint8Array to ArrayBuffer, to follow Dataview API
|
||
var ab = new ArrayBuffer(data.length);
|
||
var v = new Uint8Array(ab);
|
||
for (var z = 0; z < data.length; ++z) {
|
||
v[z] = data[z];
|
||
}
|
||
view = new DataView(ab);
|
||
|
||
|
||
while (i < data.byteLength) {
|
||
// parse box data
|
||
size = view.getUint32(i);
|
||
type = parseType(data.subarray(i + 4, i + 8));
|
||
end = size > 1 ? i + size : data.byteLength;
|
||
|
||
// parse type-specific data
|
||
box = (parse[type] || function(data) {
|
||
return {
|
||
data: data
|
||
};
|
||
})(data.subarray(i + 8, end));
|
||
box.size = size;
|
||
box.type = type;
|
||
|
||
// store this box and move to the next
|
||
result.push(box);
|
||
i = end;
|
||
}
|
||
return result;
|
||
};
|
||
|
||
/**
|
||
* Returns a textual representation of the javascript represtentation
|
||
* of an MP4 file. You can use it as an alternative to
|
||
* JSON.stringify() to compare inspected MP4s.
|
||
* @param inspectedMp4 {array} the parsed array of boxes in an MP4
|
||
* file
|
||
* @param depth {number} (optional) the number of ancestor boxes of
|
||
* the elements of inspectedMp4. Assumed to be zero if unspecified.
|
||
* @return {string} a text representation of the parsed MP4
|
||
*/
|
||
textifyMp4 = function(inspectedMp4, depth) {
|
||
var indent;
|
||
depth = depth || 0;
|
||
indent = new Array(depth * 2 + 1).join(' ');
|
||
|
||
// iterate over all the boxes
|
||
return inspectedMp4.map(function(box, index) {
|
||
|
||
// list the box type first at the current indentation level
|
||
return indent + box.type + '\n' +
|
||
|
||
// the type is already included and handle child boxes separately
|
||
Object.keys(box).filter(function(key) {
|
||
return key !== 'type' && key !== 'boxes';
|
||
|
||
// output all the box properties
|
||
}).map(function(key) {
|
||
var prefix = indent + ' ' + key + ': ',
|
||
value = box[key];
|
||
|
||
// print out raw bytes as hexademical
|
||
if (value instanceof Uint8Array || value instanceof Uint32Array) {
|
||
var bytes = Array.prototype.slice.call(new Uint8Array(value.buffer, value.byteOffset, value.byteLength))
|
||
.map(function(byte) {
|
||
return ' ' + ('00' + byte.toString(16)).slice(-2);
|
||
}).join('').match(/.{1,24}/g);
|
||
if (!bytes) {
|
||
return prefix + '<>';
|
||
}
|
||
if (bytes.length === 1) {
|
||
return prefix + '<' + bytes.join('').slice(1) + '>';
|
||
}
|
||
return prefix + '<\n' + bytes.map(function(line) {
|
||
return indent + ' ' + line;
|
||
}).join('\n') + '\n' + indent + ' >';
|
||
}
|
||
|
||
// stringify generic objects
|
||
return prefix +
|
||
JSON.stringify(value, null, 2)
|
||
.split('\n').map(function(line, index) {
|
||
if (index === 0) {
|
||
return line;
|
||
}
|
||
return indent + ' ' + line;
|
||
}).join('\n');
|
||
}).join('\n') +
|
||
|
||
// recursively textify the child boxes
|
||
(box.boxes ? '\n' + textifyMp4(box.boxes, depth + 1) : '');
|
||
}).join('\n');
|
||
};
|
||
|
||
module.exports = {
|
||
inspect: inspectMp4,
|
||
textify: textifyMp4
|
||
};
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"../mp4/probe":26}],30:[function(require,module,exports){
|
||
/**
|
||
* mux.js
|
||
*
|
||
* Copyright (c) 2016 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* Parse mpeg2 transport stream packets to extract basic timing information
|
||
*/
|
||
'use strict';
|
||
|
||
var StreamTypes = require('../m2ts/stream-types.js');
|
||
var handleRollover = require('../m2ts/timestamp-rollover-stream.js').handleRollover;
|
||
var probe = {};
|
||
probe.ts = require('../m2ts/probe.js');
|
||
probe.aac = require('../aac/probe.js');
|
||
|
||
|
||
var
|
||
PES_TIMESCALE = 90000,
|
||
MP2T_PACKET_LENGTH = 188, // bytes
|
||
SYNC_BYTE = 0x47;
|
||
|
||
var isLikelyAacData = function(data) {
|
||
if ((data[0] === 'I'.charCodeAt(0)) &&
|
||
(data[1] === 'D'.charCodeAt(0)) &&
|
||
(data[2] === '3'.charCodeAt(0))) {
|
||
return true;
|
||
}
|
||
return false;
|
||
};
|
||
|
||
/**
|
||
* walks through segment data looking for pat and pmt packets to parse out
|
||
* program map table information
|
||
*/
|
||
var parsePsi_ = function(bytes, pmt) {
|
||
var
|
||
startIndex = 0,
|
||
endIndex = MP2T_PACKET_LENGTH,
|
||
packet, type;
|
||
|
||
while (endIndex < bytes.byteLength) {
|
||
// Look for a pair of start and end sync bytes in the data..
|
||
if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {
|
||
// We found a packet
|
||
packet = bytes.subarray(startIndex, endIndex);
|
||
type = probe.ts.parseType(packet, pmt.pid);
|
||
|
||
switch (type) {
|
||
case 'pat':
|
||
if (!pmt.pid) {
|
||
pmt.pid = probe.ts.parsePat(packet);
|
||
}
|
||
break;
|
||
case 'pmt':
|
||
if (!pmt.table) {
|
||
pmt.table = probe.ts.parsePmt(packet);
|
||
}
|
||
break;
|
||
default:
|
||
break;
|
||
}
|
||
|
||
// Found the pat and pmt, we can stop walking the segment
|
||
if (pmt.pid && pmt.table) {
|
||
return;
|
||
}
|
||
|
||
startIndex += MP2T_PACKET_LENGTH;
|
||
endIndex += MP2T_PACKET_LENGTH;
|
||
continue;
|
||
}
|
||
|
||
// If we get here, we have somehow become de-synchronized and we need to step
|
||
// forward one byte at a time until we find a pair of sync bytes that denote
|
||
// a packet
|
||
startIndex++;
|
||
endIndex++;
|
||
}
|
||
};
|
||
|
||
/**
|
||
* walks through the segment data from the start and end to get timing information
|
||
* for the first and last audio pes packets
|
||
*/
|
||
var parseAudioPes_ = function(bytes, pmt, result) {
|
||
var
|
||
startIndex = 0,
|
||
endIndex = MP2T_PACKET_LENGTH,
|
||
packet, type, pesType, pusi, parsed;
|
||
|
||
var endLoop = false;
|
||
|
||
// Start walking from start of segment to get first audio packet
|
||
while (endIndex < bytes.byteLength) {
|
||
// Look for a pair of start and end sync bytes in the data..
|
||
if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {
|
||
// We found a packet
|
||
packet = bytes.subarray(startIndex, endIndex);
|
||
type = probe.ts.parseType(packet, pmt.pid);
|
||
|
||
switch (type) {
|
||
case 'pes':
|
||
pesType = probe.ts.parsePesType(packet, pmt.table);
|
||
pusi = probe.ts.parsePayloadUnitStartIndicator(packet);
|
||
if (pesType === 'audio' && pusi) {
|
||
parsed = probe.ts.parsePesTime(packet);
|
||
if (parsed) {
|
||
parsed.type = 'audio';
|
||
result.audio.push(parsed);
|
||
endLoop = true;
|
||
}
|
||
}
|
||
break;
|
||
default:
|
||
break;
|
||
}
|
||
|
||
if (endLoop) {
|
||
break;
|
||
}
|
||
|
||
startIndex += MP2T_PACKET_LENGTH;
|
||
endIndex += MP2T_PACKET_LENGTH;
|
||
continue;
|
||
}
|
||
|
||
// If we get here, we have somehow become de-synchronized and we need to step
|
||
// forward one byte at a time until we find a pair of sync bytes that denote
|
||
// a packet
|
||
startIndex++;
|
||
endIndex++;
|
||
}
|
||
|
||
// Start walking from end of segment to get last audio packet
|
||
endIndex = bytes.byteLength;
|
||
startIndex = endIndex - MP2T_PACKET_LENGTH;
|
||
endLoop = false;
|
||
while (startIndex >= 0) {
|
||
// Look for a pair of start and end sync bytes in the data..
|
||
if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {
|
||
// We found a packet
|
||
packet = bytes.subarray(startIndex, endIndex);
|
||
type = probe.ts.parseType(packet, pmt.pid);
|
||
|
||
switch (type) {
|
||
case 'pes':
|
||
pesType = probe.ts.parsePesType(packet, pmt.table);
|
||
pusi = probe.ts.parsePayloadUnitStartIndicator(packet);
|
||
if (pesType === 'audio' && pusi) {
|
||
parsed = probe.ts.parsePesTime(packet);
|
||
if (parsed) {
|
||
parsed.type = 'audio';
|
||
result.audio.push(parsed);
|
||
endLoop = true;
|
||
}
|
||
}
|
||
break;
|
||
default:
|
||
break;
|
||
}
|
||
|
||
if (endLoop) {
|
||
break;
|
||
}
|
||
|
||
startIndex -= MP2T_PACKET_LENGTH;
|
||
endIndex -= MP2T_PACKET_LENGTH;
|
||
continue;
|
||
}
|
||
|
||
// If we get here, we have somehow become de-synchronized and we need to step
|
||
// forward one byte at a time until we find a pair of sync bytes that denote
|
||
// a packet
|
||
startIndex--;
|
||
endIndex--;
|
||
}
|
||
};
|
||
|
||
/**
|
||
* walks through the segment data from the start and end to get timing information
|
||
* for the first and last video pes packets as well as timing information for the first
|
||
* key frame.
|
||
*/
|
||
var parseVideoPes_ = function(bytes, pmt, result) {
|
||
var
|
||
startIndex = 0,
|
||
endIndex = MP2T_PACKET_LENGTH,
|
||
packet, type, pesType, pusi, parsed, frame, i, pes;
|
||
|
||
var endLoop = false;
|
||
|
||
var currentFrame = {
|
||
data: [],
|
||
size: 0
|
||
};
|
||
|
||
// Start walking from start of segment to get first video packet
|
||
while (endIndex < bytes.byteLength) {
|
||
// Look for a pair of start and end sync bytes in the data..
|
||
if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {
|
||
// We found a packet
|
||
packet = bytes.subarray(startIndex, endIndex);
|
||
type = probe.ts.parseType(packet, pmt.pid);
|
||
|
||
switch (type) {
|
||
case 'pes':
|
||
pesType = probe.ts.parsePesType(packet, pmt.table);
|
||
pusi = probe.ts.parsePayloadUnitStartIndicator(packet);
|
||
if (pesType === 'video') {
|
||
if (pusi && !endLoop) {
|
||
parsed = probe.ts.parsePesTime(packet);
|
||
if (parsed) {
|
||
parsed.type = 'video';
|
||
result.video.push(parsed);
|
||
endLoop = true;
|
||
}
|
||
}
|
||
if (!result.firstKeyFrame) {
|
||
if (pusi) {
|
||
if (currentFrame.size !== 0) {
|
||
frame = new Uint8Array(currentFrame.size);
|
||
i = 0;
|
||
while (currentFrame.data.length) {
|
||
pes = currentFrame.data.shift();
|
||
frame.set(pes, i);
|
||
i += pes.byteLength;
|
||
}
|
||
if (probe.ts.videoPacketContainsKeyFrame(frame)) {
|
||
result.firstKeyFrame = probe.ts.parsePesTime(frame);
|
||
result.firstKeyFrame.type = 'video';
|
||
}
|
||
currentFrame.size = 0;
|
||
}
|
||
}
|
||
currentFrame.data.push(packet);
|
||
currentFrame.size += packet.byteLength;
|
||
}
|
||
}
|
||
break;
|
||
default:
|
||
break;
|
||
}
|
||
|
||
if (endLoop && result.firstKeyFrame) {
|
||
break;
|
||
}
|
||
|
||
startIndex += MP2T_PACKET_LENGTH;
|
||
endIndex += MP2T_PACKET_LENGTH;
|
||
continue;
|
||
}
|
||
|
||
// If we get here, we have somehow become de-synchronized and we need to step
|
||
// forward one byte at a time until we find a pair of sync bytes that denote
|
||
// a packet
|
||
startIndex++;
|
||
endIndex++;
|
||
}
|
||
|
||
// Start walking from end of segment to get last video packet
|
||
endIndex = bytes.byteLength;
|
||
startIndex = endIndex - MP2T_PACKET_LENGTH;
|
||
endLoop = false;
|
||
while (startIndex >= 0) {
|
||
// Look for a pair of start and end sync bytes in the data..
|
||
if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {
|
||
// We found a packet
|
||
packet = bytes.subarray(startIndex, endIndex);
|
||
type = probe.ts.parseType(packet, pmt.pid);
|
||
|
||
switch (type) {
|
||
case 'pes':
|
||
pesType = probe.ts.parsePesType(packet, pmt.table);
|
||
pusi = probe.ts.parsePayloadUnitStartIndicator(packet);
|
||
if (pesType === 'video' && pusi) {
|
||
parsed = probe.ts.parsePesTime(packet);
|
||
if (parsed) {
|
||
parsed.type = 'video';
|
||
result.video.push(parsed);
|
||
endLoop = true;
|
||
}
|
||
}
|
||
break;
|
||
default:
|
||
break;
|
||
}
|
||
|
||
if (endLoop) {
|
||
break;
|
||
}
|
||
|
||
startIndex -= MP2T_PACKET_LENGTH;
|
||
endIndex -= MP2T_PACKET_LENGTH;
|
||
continue;
|
||
}
|
||
|
||
// If we get here, we have somehow become de-synchronized and we need to step
|
||
// forward one byte at a time until we find a pair of sync bytes that denote
|
||
// a packet
|
||
startIndex--;
|
||
endIndex--;
|
||
}
|
||
};
|
||
|
||
/**
|
||
* Adjusts the timestamp information for the segment to account for
|
||
* rollover and convert to seconds based on pes packet timescale (90khz clock)
|
||
*/
|
||
var adjustTimestamp_ = function(segmentInfo, baseTimestamp) {
|
||
if (segmentInfo.audio && segmentInfo.audio.length) {
|
||
var audioBaseTimestamp = baseTimestamp;
|
||
if (typeof audioBaseTimestamp === 'undefined') {
|
||
audioBaseTimestamp = segmentInfo.audio[0].dts;
|
||
}
|
||
segmentInfo.audio.forEach(function(info) {
|
||
info.dts = handleRollover(info.dts, audioBaseTimestamp);
|
||
info.pts = handleRollover(info.pts, audioBaseTimestamp);
|
||
// time in seconds
|
||
info.dtsTime = info.dts / PES_TIMESCALE;
|
||
info.ptsTime = info.pts / PES_TIMESCALE;
|
||
});
|
||
}
|
||
|
||
if (segmentInfo.video && segmentInfo.video.length) {
|
||
var videoBaseTimestamp = baseTimestamp;
|
||
if (typeof videoBaseTimestamp === 'undefined') {
|
||
videoBaseTimestamp = segmentInfo.video[0].dts;
|
||
}
|
||
segmentInfo.video.forEach(function(info) {
|
||
info.dts = handleRollover(info.dts, videoBaseTimestamp);
|
||
info.pts = handleRollover(info.pts, videoBaseTimestamp);
|
||
// time in seconds
|
||
info.dtsTime = info.dts / PES_TIMESCALE;
|
||
info.ptsTime = info.pts / PES_TIMESCALE;
|
||
});
|
||
if (segmentInfo.firstKeyFrame) {
|
||
var frame = segmentInfo.firstKeyFrame;
|
||
frame.dts = handleRollover(frame.dts, videoBaseTimestamp);
|
||
frame.pts = handleRollover(frame.pts, videoBaseTimestamp);
|
||
// time in seconds
|
||
frame.dtsTime = frame.dts / PES_TIMESCALE;
|
||
frame.ptsTime = frame.dts / PES_TIMESCALE;
|
||
}
|
||
}
|
||
};
|
||
|
||
/**
|
||
* inspects the aac data stream for start and end time information
|
||
*/
|
||
var inspectAac_ = function(bytes) {
|
||
var
|
||
endLoop = false,
|
||
audioCount = 0,
|
||
sampleRate = null,
|
||
timestamp = null,
|
||
frameSize = 0,
|
||
byteIndex = 0,
|
||
packet;
|
||
|
||
while (bytes.length - byteIndex >= 3) {
|
||
var type = probe.aac.parseType(bytes, byteIndex);
|
||
switch (type) {
|
||
case 'timed-metadata':
|
||
// Exit early because we don't have enough to parse
|
||
// the ID3 tag header
|
||
if (bytes.length - byteIndex < 10) {
|
||
endLoop = true;
|
||
break;
|
||
}
|
||
|
||
frameSize = probe.aac.parseId3TagSize(bytes, byteIndex);
|
||
|
||
// Exit early if we don't have enough in the buffer
|
||
// to emit a full packet
|
||
if (frameSize > bytes.length) {
|
||
endLoop = true;
|
||
break;
|
||
}
|
||
if (timestamp === null) {
|
||
packet = bytes.subarray(byteIndex, byteIndex + frameSize);
|
||
timestamp = probe.aac.parseAacTimestamp(packet);
|
||
}
|
||
byteIndex += frameSize;
|
||
break;
|
||
case 'audio':
|
||
// Exit early because we don't have enough to parse
|
||
// the ADTS frame header
|
||
if (bytes.length - byteIndex < 7) {
|
||
endLoop = true;
|
||
break;
|
||
}
|
||
|
||
frameSize = probe.aac.parseAdtsSize(bytes, byteIndex);
|
||
|
||
// Exit early if we don't have enough in the buffer
|
||
// to emit a full packet
|
||
if (frameSize > bytes.length) {
|
||
endLoop = true;
|
||
break;
|
||
}
|
||
if (sampleRate === null) {
|
||
packet = bytes.subarray(byteIndex, byteIndex + frameSize);
|
||
sampleRate = probe.aac.parseSampleRate(packet);
|
||
}
|
||
audioCount++;
|
||
byteIndex += frameSize;
|
||
break;
|
||
default:
|
||
byteIndex++;
|
||
break;
|
||
}
|
||
if (endLoop) {
|
||
return null;
|
||
}
|
||
}
|
||
if (sampleRate === null || timestamp === null) {
|
||
return null;
|
||
}
|
||
|
||
var audioTimescale = PES_TIMESCALE / sampleRate;
|
||
|
||
var result = {
|
||
audio: [
|
||
{
|
||
type: 'audio',
|
||
dts: timestamp,
|
||
pts: timestamp
|
||
},
|
||
{
|
||
type: 'audio',
|
||
dts: timestamp + (audioCount * 1024 * audioTimescale),
|
||
pts: timestamp + (audioCount * 1024 * audioTimescale)
|
||
}
|
||
]
|
||
};
|
||
|
||
return result;
|
||
};
|
||
|
||
/**
|
||
* inspects the transport stream segment data for start and end time information
|
||
* of the audio and video tracks (when present) as well as the first key frame's
|
||
* start time.
|
||
*/
|
||
var inspectTs_ = function(bytes) {
|
||
var pmt = {
|
||
pid: null,
|
||
table: null
|
||
};
|
||
|
||
var result = {};
|
||
|
||
parsePsi_(bytes, pmt);
|
||
|
||
for (var pid in pmt.table) {
|
||
if (pmt.table.hasOwnProperty(pid)) {
|
||
var type = pmt.table[pid];
|
||
switch (type) {
|
||
case StreamTypes.H264_STREAM_TYPE:
|
||
result.video = [];
|
||
parseVideoPes_(bytes, pmt, result);
|
||
if (result.video.length === 0) {
|
||
delete result.video;
|
||
}
|
||
break;
|
||
case StreamTypes.ADTS_STREAM_TYPE:
|
||
result.audio = [];
|
||
parseAudioPes_(bytes, pmt, result);
|
||
if (result.audio.length === 0) {
|
||
delete result.audio;
|
||
}
|
||
break;
|
||
default:
|
||
break;
|
||
}
|
||
}
|
||
}
|
||
return result;
|
||
};
|
||
|
||
/**
|
||
* Inspects segment byte data and returns an object with start and end timing information
|
||
*
|
||
* @param {Uint8Array} bytes The segment byte data
|
||
* @param {Number} baseTimestamp Relative reference timestamp used when adjusting frame
|
||
* timestamps for rollover. This value must be in 90khz clock.
|
||
* @return {Object} Object containing start and end frame timing info of segment.
|
||
*/
|
||
var inspect = function(bytes, baseTimestamp) {
|
||
var isAacData = isLikelyAacData(bytes);
|
||
|
||
var result;
|
||
|
||
if (isAacData) {
|
||
result = inspectAac_(bytes);
|
||
} else {
|
||
result = inspectTs_(bytes);
|
||
}
|
||
|
||
if (!result || (!result.audio && !result.video)) {
|
||
return null;
|
||
}
|
||
|
||
adjustTimestamp_(result, baseTimestamp);
|
||
|
||
return result;
|
||
};
|
||
|
||
module.exports = {
|
||
inspect: inspect
|
||
};
|
||
|
||
},{"../aac/probe.js":5,"../m2ts/probe.js":21,"../m2ts/stream-types.js":22,"../m2ts/timestamp-rollover-stream.js":23}],31:[function(require,module,exports){
|
||
var
|
||
ONE_SECOND_IN_TS = 90000, // 90kHz clock
|
||
secondsToVideoTs,
|
||
secondsToAudioTs,
|
||
videoTsToSeconds,
|
||
audioTsToSeconds,
|
||
audioTsToVideoTs,
|
||
videoTsToAudioTs;
|
||
|
||
secondsToVideoTs = function(seconds) {
|
||
return seconds * ONE_SECOND_IN_TS;
|
||
};
|
||
|
||
secondsToAudioTs = function(seconds, sampleRate) {
|
||
return seconds * sampleRate;
|
||
};
|
||
|
||
videoTsToSeconds = function(timestamp) {
|
||
return timestamp / ONE_SECOND_IN_TS;
|
||
};
|
||
|
||
audioTsToSeconds = function(timestamp, sampleRate) {
|
||
return timestamp / sampleRate;
|
||
};
|
||
|
||
audioTsToVideoTs = function(timestamp, sampleRate) {
|
||
return secondsToVideoTs(audioTsToSeconds(timestamp, sampleRate));
|
||
};
|
||
|
||
videoTsToAudioTs = function(timestamp, sampleRate) {
|
||
return secondsToAudioTs(videoTsToSeconds(timestamp), sampleRate);
|
||
};
|
||
|
||
module.exports = {
|
||
secondsToVideoTs: secondsToVideoTs,
|
||
secondsToAudioTs: secondsToAudioTs,
|
||
videoTsToSeconds: videoTsToSeconds,
|
||
audioTsToSeconds: audioTsToSeconds,
|
||
audioTsToVideoTs: audioTsToVideoTs,
|
||
videoTsToAudioTs: videoTsToAudioTs
|
||
};
|
||
|
||
},{}],32:[function(require,module,exports){
|
||
'use strict';
|
||
|
||
var ExpGolomb;
|
||
|
||
/**
|
||
* Parser for exponential Golomb codes, a variable-bitwidth number encoding
|
||
* scheme used by h264.
|
||
*/
|
||
ExpGolomb = function(workingData) {
|
||
var
|
||
// the number of bytes left to examine in workingData
|
||
workingBytesAvailable = workingData.byteLength,
|
||
|
||
// the current word being examined
|
||
workingWord = 0, // :uint
|
||
|
||
// the number of bits left to examine in the current word
|
||
workingBitsAvailable = 0; // :uint;
|
||
|
||
// ():uint
|
||
this.length = function() {
|
||
return (8 * workingBytesAvailable);
|
||
};
|
||
|
||
// ():uint
|
||
this.bitsAvailable = function() {
|
||
return (8 * workingBytesAvailable) + workingBitsAvailable;
|
||
};
|
||
|
||
// ():void
|
||
this.loadWord = function() {
|
||
var
|
||
position = workingData.byteLength - workingBytesAvailable,
|
||
workingBytes = new Uint8Array(4),
|
||
availableBytes = Math.min(4, workingBytesAvailable);
|
||
|
||
if (availableBytes === 0) {
|
||
throw new Error('no bytes available');
|
||
}
|
||
|
||
workingBytes.set(workingData.subarray(position,
|
||
position + availableBytes));
|
||
workingWord = new DataView(workingBytes.buffer).getUint32(0);
|
||
|
||
// track the amount of workingData that has been processed
|
||
workingBitsAvailable = availableBytes * 8;
|
||
workingBytesAvailable -= availableBytes;
|
||
};
|
||
|
||
// (count:int):void
|
||
this.skipBits = function(count) {
|
||
var skipBytes; // :int
|
||
if (workingBitsAvailable > count) {
|
||
workingWord <<= count;
|
||
workingBitsAvailable -= count;
|
||
} else {
|
||
count -= workingBitsAvailable;
|
||
skipBytes = Math.floor(count / 8);
|
||
|
||
count -= (skipBytes * 8);
|
||
workingBytesAvailable -= skipBytes;
|
||
|
||
this.loadWord();
|
||
|
||
workingWord <<= count;
|
||
workingBitsAvailable -= count;
|
||
}
|
||
};
|
||
|
||
// (size:int):uint
|
||
this.readBits = function(size) {
|
||
var
|
||
bits = Math.min(workingBitsAvailable, size), // :uint
|
||
valu = workingWord >>> (32 - bits); // :uint
|
||
// if size > 31, handle error
|
||
workingBitsAvailable -= bits;
|
||
if (workingBitsAvailable > 0) {
|
||
workingWord <<= bits;
|
||
} else if (workingBytesAvailable > 0) {
|
||
this.loadWord();
|
||
}
|
||
|
||
bits = size - bits;
|
||
if (bits > 0) {
|
||
return valu << bits | this.readBits(bits);
|
||
}
|
||
return valu;
|
||
};
|
||
|
||
// ():uint
|
||
this.skipLeadingZeros = function() {
|
||
var leadingZeroCount; // :uint
|
||
for (leadingZeroCount = 0; leadingZeroCount < workingBitsAvailable; ++leadingZeroCount) {
|
||
if ((workingWord & (0x80000000 >>> leadingZeroCount)) !== 0) {
|
||
// the first bit of working word is 1
|
||
workingWord <<= leadingZeroCount;
|
||
workingBitsAvailable -= leadingZeroCount;
|
||
return leadingZeroCount;
|
||
}
|
||
}
|
||
|
||
// we exhausted workingWord and still have not found a 1
|
||
this.loadWord();
|
||
return leadingZeroCount + this.skipLeadingZeros();
|
||
};
|
||
|
||
// ():void
|
||
this.skipUnsignedExpGolomb = function() {
|
||
this.skipBits(1 + this.skipLeadingZeros());
|
||
};
|
||
|
||
// ():void
|
||
this.skipExpGolomb = function() {
|
||
this.skipBits(1 + this.skipLeadingZeros());
|
||
};
|
||
|
||
// ():uint
|
||
this.readUnsignedExpGolomb = function() {
|
||
var clz = this.skipLeadingZeros(); // :uint
|
||
return this.readBits(clz + 1) - 1;
|
||
};
|
||
|
||
// ():int
|
||
this.readExpGolomb = function() {
|
||
var valu = this.readUnsignedExpGolomb(); // :int
|
||
if (0x01 & valu) {
|
||
// the number is odd if the low order bit is set
|
||
return (1 + valu) >>> 1; // add 1 to make it even, and divide by 2
|
||
}
|
||
return -1 * (valu >>> 1); // divide by two then make it negative
|
||
};
|
||
|
||
// Some convenience functions
|
||
// :Boolean
|
||
this.readBoolean = function() {
|
||
return this.readBits(1) === 1;
|
||
};
|
||
|
||
// ():int
|
||
this.readUnsignedByte = function() {
|
||
return this.readBits(8);
|
||
};
|
||
|
||
this.loadWord();
|
||
};
|
||
|
||
module.exports = ExpGolomb;
|
||
|
||
},{}],33:[function(require,module,exports){
|
||
/**
|
||
* mux.js
|
||
*
|
||
* Copyright (c) 2014 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* A lightweight readable stream implemention that handles event dispatching.
|
||
* Objects that inherit from streams should call init in their constructors.
|
||
*/
|
||
'use strict';
|
||
|
||
var Stream = function() {
|
||
this.init = function() {
|
||
var listeners = {};
|
||
/**
|
||
* Add a listener for a specified event type.
|
||
* @param type {string} the event name
|
||
* @param listener {function} the callback to be invoked when an event of
|
||
* the specified type occurs
|
||
*/
|
||
this.on = function(type, listener) {
|
||
if (!listeners[type]) {
|
||
listeners[type] = [];
|
||
}
|
||
listeners[type] = listeners[type].concat(listener);
|
||
};
|
||
/**
|
||
* Remove a listener for a specified event type.
|
||
* @param type {string} the event name
|
||
* @param listener {function} a function previously registered for this
|
||
* type of event through `on`
|
||
*/
|
||
this.off = function(type, listener) {
|
||
var index;
|
||
if (!listeners[type]) {
|
||
return false;
|
||
}
|
||
index = listeners[type].indexOf(listener);
|
||
listeners[type] = listeners[type].slice();
|
||
listeners[type].splice(index, 1);
|
||
return index > -1;
|
||
};
|
||
/**
|
||
* Trigger an event of the specified type on this stream. Any additional
|
||
* arguments to this function are passed as parameters to event listeners.
|
||
* @param type {string} the event name
|
||
*/
|
||
this.trigger = function(type) {
|
||
var callbacks, i, length, args;
|
||
callbacks = listeners[type];
|
||
if (!callbacks) {
|
||
return;
|
||
}
|
||
// Slicing the arguments on every invocation of this method
|
||
// can add a significant amount of overhead. Avoid the
|
||
// intermediate object creation for the common case of a
|
||
// single callback argument
|
||
if (arguments.length === 2) {
|
||
length = callbacks.length;
|
||
for (i = 0; i < length; ++i) {
|
||
callbacks[i].call(this, arguments[1]);
|
||
}
|
||
} else {
|
||
args = [];
|
||
i = arguments.length;
|
||
for (i = 1; i < arguments.length; ++i) {
|
||
args.push(arguments[i]);
|
||
}
|
||
length = callbacks.length;
|
||
for (i = 0; i < length; ++i) {
|
||
callbacks[i].apply(this, args);
|
||
}
|
||
}
|
||
};
|
||
/**
|
||
* Destroys the stream and cleans up.
|
||
*/
|
||
this.dispose = function() {
|
||
listeners = {};
|
||
};
|
||
};
|
||
};
|
||
|
||
/**
|
||
* Forwards all `data` events on this stream to the destination stream. The
|
||
* destination stream should provide a method `push` to receive the data
|
||
* events as they arrive.
|
||
* @param destination {stream} the stream that will receive all `data` events
|
||
* @param autoFlush {boolean} if false, we will not call `flush` on the destination
|
||
* when the current stream emits a 'done' event
|
||
* @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options
|
||
*/
|
||
Stream.prototype.pipe = function(destination) {
|
||
this.on('data', function(data) {
|
||
destination.push(data);
|
||
});
|
||
|
||
this.on('done', function(flushSource) {
|
||
destination.flush(flushSource);
|
||
});
|
||
|
||
return destination;
|
||
};
|
||
|
||
// Default stream functions that are expected to be overridden to perform
|
||
// actual work. These are provided by the prototype as a sort of no-op
|
||
// implementation so that we don't have to check for their existence in the
|
||
// `pipe` function above.
|
||
Stream.prototype.push = function(data) {
|
||
this.trigger('data', data);
|
||
};
|
||
|
||
Stream.prototype.flush = function(flushSource) {
|
||
this.trigger('done', flushSource);
|
||
};
|
||
|
||
module.exports = Stream;
|
||
|
||
},{}],34:[function(require,module,exports){
|
||
// By default assume browserify was used to bundle app. These arguments are passed to
|
||
// the module by browserify.
|
||
var bundleFn = arguments[3];
|
||
var sources = arguments[4];
|
||
var cache = arguments[5];
|
||
var stringify = JSON.stringify;
|
||
var webpack = false;
|
||
|
||
// webpackBootstrap
|
||
var webpackBootstrapFn = function(modules) {
|
||
// The module cache
|
||
var installedModules = {};
|
||
|
||
// The require function
|
||
function __webpack_require__(moduleId) {
|
||
|
||
// Check if module is in cache
|
||
if(installedModules[moduleId]) {
|
||
return installedModules[moduleId].exports;
|
||
}
|
||
// Create a new module (and put it into the cache)
|
||
var module = installedModules[moduleId] = {
|
||
i: moduleId,
|
||
l: false,
|
||
exports: {}
|
||
};
|
||
|
||
// Execute the module function
|
||
modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
|
||
|
||
// Flag the module as loaded
|
||
module.l = true;
|
||
|
||
// Return the exports of the module
|
||
return module.exports;
|
||
}
|
||
|
||
|
||
// expose the modules object (__webpack_modules__)
|
||
__webpack_require__.m = modules;
|
||
|
||
// expose the module cache
|
||
__webpack_require__.c = installedModules;
|
||
|
||
// define getter function for harmony exports
|
||
__webpack_require__.d = function(exports, name, getter) {
|
||
if(!__webpack_require__.o(exports, name)) {
|
||
Object.defineProperty(exports, name, {
|
||
configurable: false,
|
||
enumerable: true,
|
||
get: getter
|
||
});
|
||
}
|
||
};
|
||
|
||
// getDefaultExport function for compatibility with non-harmony modules
|
||
__webpack_require__.n = function(module) {
|
||
var getter = module && module.__esModule ?
|
||
function getDefault() { return module['default']; } :
|
||
function getModuleExports() { return module; };
|
||
|
||
__webpack_require__.d(getter, 'a', getter);
|
||
return getter;
|
||
};
|
||
|
||
// Object.prototype.hasOwnProperty.call
|
||
__webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
|
||
|
||
// __webpack_public_path__
|
||
__webpack_require__.p = "";
|
||
|
||
// Load entry module and return exports
|
||
return __webpack_require__(__webpack_require__.s = entryModule);
|
||
}
|
||
|
||
if (typeof bundleFn === 'undefined') {
|
||
// Assume this was bundled with webpack and not browserify
|
||
webpack = true;
|
||
bundleFn = webpackBootstrapFn;
|
||
sources = __webpack_modules__;
|
||
}
|
||
|
||
var bundleWithBrowserify = function(fn) {
|
||
// with browserify we must find the module key ourselves
|
||
var cacheKeys = Object.keys(cache);
|
||
var fnModuleKey;
|
||
|
||
for (var i = 0; i < cacheKeys.length; i++) {
|
||
var cacheKey = cacheKeys[i];
|
||
var cacheExports = cache[cacheKey].exports;
|
||
|
||
// Using babel as a transpiler to use esmodule, the export will always
|
||
// be an object with the default export as a property of it. To ensure
|
||
// the existing api and babel esmodule exports are both supported we
|
||
// check for both
|
||
if (cacheExports === fn || cacheExports && cacheExports.default === fn) {
|
||
fnModuleKey = cacheKey;
|
||
break;
|
||
}
|
||
}
|
||
|
||
// if we couldn't find one, lets make one
|
||
if (!fnModuleKey) {
|
||
fnModuleKey = Math.floor(Math.pow(16, 8) * Math.random()).toString(16);
|
||
|
||
var fnModuleCache = {};
|
||
|
||
for (var i = 0; i < cacheKeys.length; i++) {
|
||
var cacheKey = cacheKeys[i];
|
||
|
||
fnModuleCache[cacheKey] = cacheKey;
|
||
}
|
||
|
||
sources[fnModuleKey] = [
|
||
'function(require,module,exports){' + fn + '(self); }',
|
||
fnModuleCache
|
||
];
|
||
}
|
||
|
||
var entryKey = Math.floor(Math.pow(16, 8) * Math.random()).toString(16);
|
||
var entryCache = {};
|
||
|
||
entryCache[fnModuleKey] = fnModuleKey;
|
||
sources[entryKey] = [
|
||
'function(require,module,exports){' +
|
||
// try to call default if defined to also support babel esmodule exports
|
||
'var f = require(' + stringify(fnModuleKey) + ');' +
|
||
'(f.default ? f.default : f)(self);' +
|
||
'}',
|
||
entryCache
|
||
];
|
||
|
||
return '(' + bundleFn + ')({'
|
||
+ Object.keys(sources).map(function(key) {
|
||
return stringify(key) + ':['
|
||
+ sources[key][0] + ','
|
||
+ stringify(sources[key][1]) + ']';
|
||
}).join(',')
|
||
+ '},{},[' + stringify(entryKey) + '])';
|
||
};
|
||
|
||
var bundleWithWebpack = function(fn, fnModuleId) {
|
||
var devMode = typeof fnModuleId === 'string';
|
||
var sourceStrings;
|
||
|
||
if (devMode) {
|
||
sourceStrings = {};
|
||
} else {
|
||
sourceStrings = [];
|
||
}
|
||
|
||
Object.keys(sources).forEach(function(sKey) {
|
||
if (!sources[sKey]) {
|
||
return;
|
||
}
|
||
sourceStrings[sKey] = sources[sKey].toString();
|
||
});
|
||
|
||
var fnModuleExports = __webpack_require__(fnModuleId);
|
||
|
||
// Using babel as a transpiler to use esmodule, the export will always
|
||
// be an object with the default export as a property of it. To ensure
|
||
// the existing api and babel esmodule exports are both supported we
|
||
// check for both
|
||
if (!(fnModuleExports && (fnModuleExports === fn || fnModuleExports.default === fn))) {
|
||
var fnSourceString = sourceStrings[fnModuleId];
|
||
|
||
sourceStrings[fnModuleId] = fnSourceString.substring(0, fnSourceString.length - 1) +
|
||
'\n' + fn.name + '();\n}';
|
||
}
|
||
|
||
var modulesString;
|
||
|
||
if (devMode) {
|
||
// must escape quotes to support webpack loader options
|
||
fnModuleId = stringify(fnModuleId);
|
||
// dev mode in webpack4, modules are passed as an object
|
||
var mappedSourceStrings = Object.keys(sourceStrings).map(function(sKey) {
|
||
return stringify(sKey) + ':' + sourceStrings[sKey];
|
||
});
|
||
|
||
modulesString = '{' + mappedSourceStrings.join(',') + '}';
|
||
} else {
|
||
modulesString = '[' + sourceStrings.join(',') + ']';
|
||
}
|
||
|
||
return 'var fn = (' + bundleFn.toString().replace('entryModule', fnModuleId) + ')('
|
||
+ modulesString
|
||
+ ');\n'
|
||
// not a function when calling a function from the current scope
|
||
+ '(typeof fn === "function") && fn(self);';
|
||
|
||
};
|
||
|
||
module.exports = function webwackify(fn, fnModuleId) {
|
||
var src;
|
||
|
||
if (webpack) {
|
||
src = bundleWithWebpack(fn, fnModuleId);
|
||
} else {
|
||
src = bundleWithBrowserify(fn);
|
||
}
|
||
|
||
var blob = new Blob([src], { type: 'text/javascript' });
|
||
var URL = window.URL || window.webkitURL || window.mozURL || window.msURL;
|
||
var workerUrl = URL.createObjectURL(blob);
|
||
var worker = new Worker(workerUrl);
|
||
worker.objectURL = workerUrl;
|
||
return worker;
|
||
};
|
||
|
||
},{}],35:[function(require,module,exports){
|
||
(function (global){
|
||
/**
|
||
* @file add-text-track-data.js
|
||
*/
|
||
'use strict';
|
||
|
||
Object.defineProperty(exports, '__esModule', {
|
||
value: true
|
||
});
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
var _globalWindow = require('global/window');
|
||
|
||
var _globalWindow2 = _interopRequireDefault(_globalWindow);
|
||
|
||
var _videoJs = (typeof window !== "undefined" ? window['videojs'] : typeof global !== "undefined" ? global['videojs'] : null);
|
||
|
||
var _videoJs2 = _interopRequireDefault(_videoJs);
|
||
|
||
/**
|
||
* Define properties on a cue for backwards compatability,
|
||
* but warn the user that the way that they are using it
|
||
* is depricated and will be removed at a later date.
|
||
*
|
||
* @param {Cue} cue the cue to add the properties on
|
||
* @private
|
||
*/
|
||
var deprecateOldCue = function deprecateOldCue(cue) {
|
||
Object.defineProperties(cue.frame, {
|
||
id: {
|
||
get: function get() {
|
||
_videoJs2['default'].log.warn('cue.frame.id is deprecated. Use cue.value.key instead.');
|
||
return cue.value.key;
|
||
}
|
||
},
|
||
value: {
|
||
get: function get() {
|
||
_videoJs2['default'].log.warn('cue.frame.value is deprecated. Use cue.value.data instead.');
|
||
return cue.value.data;
|
||
}
|
||
},
|
||
privateData: {
|
||
get: function get() {
|
||
_videoJs2['default'].log.warn('cue.frame.privateData is deprecated. Use cue.value.data instead.');
|
||
return cue.value.data;
|
||
}
|
||
}
|
||
});
|
||
};
|
||
|
||
var durationOfVideo = function durationOfVideo(duration) {
|
||
var dur = undefined;
|
||
|
||
if (isNaN(duration) || Math.abs(duration) === Infinity) {
|
||
dur = Number.MAX_VALUE;
|
||
} else {
|
||
dur = duration;
|
||
}
|
||
return dur;
|
||
};
|
||
/**
|
||
* Add text track data to a source handler given the captions and
|
||
* metadata from the buffer.
|
||
*
|
||
* @param {Object} sourceHandler the flash or virtual source buffer
|
||
* @param {Array} captionArray an array of caption data
|
||
* @param {Array} metadataArray an array of meta data
|
||
* @private
|
||
*/
|
||
var addTextTrackData = function addTextTrackData(sourceHandler, captionArray, metadataArray) {
|
||
var Cue = _globalWindow2['default'].WebKitDataCue || _globalWindow2['default'].VTTCue;
|
||
|
||
if (captionArray) {
|
||
captionArray.forEach(function (caption) {
|
||
var track = caption.stream;
|
||
|
||
this.inbandTextTracks_[track].addCue(new Cue(caption.startTime + this.timestampOffset, caption.endTime + this.timestampOffset, caption.text));
|
||
}, sourceHandler);
|
||
}
|
||
|
||
if (metadataArray) {
|
||
(function () {
|
||
var videoDuration = durationOfVideo(sourceHandler.mediaSource_.duration);
|
||
|
||
metadataArray.forEach(function (metadata) {
|
||
var time = metadata.cueTime + this.timestampOffset;
|
||
|
||
metadata.frames.forEach(function (frame) {
|
||
var cue = new Cue(time, time, frame.value || frame.url || frame.data || '');
|
||
|
||
cue.frame = frame;
|
||
cue.value = frame;
|
||
deprecateOldCue(cue);
|
||
|
||
this.metadataTrack_.addCue(cue);
|
||
}, this);
|
||
}, sourceHandler);
|
||
|
||
// Updating the metadeta cues so that
|
||
// the endTime of each cue is the startTime of the next cue
|
||
// the endTime of last cue is the duration of the video
|
||
if (sourceHandler.metadataTrack_ && sourceHandler.metadataTrack_.cues && sourceHandler.metadataTrack_.cues.length) {
|
||
(function () {
|
||
var cues = sourceHandler.metadataTrack_.cues;
|
||
var cuesArray = [];
|
||
|
||
// Create a copy of the TextTrackCueList...
|
||
// ...disregarding cues with a falsey value
|
||
for (var i = 0; i < cues.length; i++) {
|
||
if (cues[i]) {
|
||
cuesArray.push(cues[i]);
|
||
}
|
||
}
|
||
|
||
// Group cues by their startTime value
|
||
var cuesGroupedByStartTime = cuesArray.reduce(function (obj, cue) {
|
||
var timeSlot = obj[cue.startTime] || [];
|
||
|
||
timeSlot.push(cue);
|
||
obj[cue.startTime] = timeSlot;
|
||
|
||
return obj;
|
||
}, {});
|
||
|
||
// Sort startTimes by ascending order
|
||
var sortedStartTimes = Object.keys(cuesGroupedByStartTime).sort(function (a, b) {
|
||
return Number(a) - Number(b);
|
||
});
|
||
|
||
// Map each cue group's endTime to the next group's startTime
|
||
sortedStartTimes.forEach(function (startTime, idx) {
|
||
var cueGroup = cuesGroupedByStartTime[startTime];
|
||
var nextTime = Number(sortedStartTimes[idx + 1]) || videoDuration;
|
||
|
||
// Map each cue's endTime the next group's startTime
|
||
cueGroup.forEach(function (cue) {
|
||
cue.endTime = nextTime;
|
||
});
|
||
});
|
||
})();
|
||
}
|
||
})();
|
||
}
|
||
};
|
||
|
||
exports['default'] = {
|
||
addTextTrackData: addTextTrackData,
|
||
durationOfVideo: durationOfVideo
|
||
};
|
||
module.exports = exports['default'];
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"global/window":3}],36:[function(require,module,exports){
|
||
/**
|
||
* @file codec-utils.js
|
||
*/
|
||
|
||
/**
|
||
* Check if a codec string refers to an audio codec.
|
||
*
|
||
* @param {String} codec codec string to check
|
||
* @return {Boolean} if this is an audio codec
|
||
* @private
|
||
*/
|
||
'use strict';
|
||
|
||
Object.defineProperty(exports, '__esModule', {
|
||
value: true
|
||
});
|
||
var isAudioCodec = function isAudioCodec(codec) {
|
||
return (/mp4a\.\d+.\d+/i.test(codec)
|
||
);
|
||
};
|
||
|
||
/**
|
||
* Check if a codec string refers to a video codec.
|
||
*
|
||
* @param {String} codec codec string to check
|
||
* @return {Boolean} if this is a video codec
|
||
* @private
|
||
*/
|
||
var isVideoCodec = function isVideoCodec(codec) {
|
||
return (/avc1\.[\da-f]+/i.test(codec)
|
||
);
|
||
};
|
||
|
||
/**
|
||
* Parse a content type header into a type and parameters
|
||
* object
|
||
*
|
||
* @param {String} type the content type header
|
||
* @return {Object} the parsed content-type
|
||
* @private
|
||
*/
|
||
var parseContentType = function parseContentType(type) {
|
||
var object = { type: '', parameters: {} };
|
||
var parameters = type.trim().split(';');
|
||
|
||
// first parameter should always be content-type
|
||
object.type = parameters.shift().trim();
|
||
parameters.forEach(function (parameter) {
|
||
var pair = parameter.trim().split('=');
|
||
|
||
if (pair.length > 1) {
|
||
var _name = pair[0].replace(/"/g, '').trim();
|
||
var value = pair[1].replace(/"/g, '').trim();
|
||
|
||
object.parameters[_name] = value;
|
||
}
|
||
});
|
||
|
||
return object;
|
||
};
|
||
|
||
/**
|
||
* Replace the old apple-style `avc1.<dd>.<dd>` codec string with the standard
|
||
* `avc1.<hhhhhh>`
|
||
*
|
||
* @param {Array} codecs an array of codec strings to fix
|
||
* @return {Array} the translated codec array
|
||
* @private
|
||
*/
|
||
var translateLegacyCodecs = function translateLegacyCodecs(codecs) {
|
||
return codecs.map(function (codec) {
|
||
return codec.replace(/avc1\.(\d+)\.(\d+)/i, function (orig, profile, avcLevel) {
|
||
var profileHex = ('00' + Number(profile).toString(16)).slice(-2);
|
||
var avcLevelHex = ('00' + Number(avcLevel).toString(16)).slice(-2);
|
||
|
||
return 'avc1.' + profileHex + '00' + avcLevelHex;
|
||
});
|
||
});
|
||
};
|
||
|
||
exports['default'] = {
|
||
isAudioCodec: isAudioCodec,
|
||
parseContentType: parseContentType,
|
||
isVideoCodec: isVideoCodec,
|
||
translateLegacyCodecs: translateLegacyCodecs
|
||
};
|
||
module.exports = exports['default'];
|
||
|
||
},{}],37:[function(require,module,exports){
|
||
/**
|
||
* @file create-text-tracks-if-necessary.js
|
||
*/
|
||
|
||
/**
|
||
* Create text tracks on video.js if they exist on a segment.
|
||
*
|
||
* @param {Object} sourceBuffer the VSB or FSB
|
||
* @param {Object} mediaSource the HTML or Flash media source
|
||
* @param {Object} segment the segment that may contain the text track
|
||
* @private
|
||
*/
|
||
'use strict';
|
||
|
||
Object.defineProperty(exports, '__esModule', {
|
||
value: true
|
||
});
|
||
var createTextTracksIfNecessary = function createTextTracksIfNecessary(sourceBuffer, mediaSource, segment) {
|
||
var player = mediaSource.player_;
|
||
|
||
// create an in-band caption track if one is present in the segment
|
||
if (segment.captions && segment.captions.length) {
|
||
if (!sourceBuffer.inbandTextTracks_) {
|
||
sourceBuffer.inbandTextTracks_ = {};
|
||
}
|
||
|
||
for (var trackId in segment.captionStreams) {
|
||
if (!sourceBuffer.inbandTextTracks_[trackId]) {
|
||
player.tech_.trigger({ type: 'usage', name: 'hls-608' });
|
||
var track = player.textTracks().getTrackById(trackId);
|
||
|
||
if (track) {
|
||
// Resuse an existing track with a CC# id because this was
|
||
// very likely created by videojs-contrib-hls from information
|
||
// in the m3u8 for us to use
|
||
sourceBuffer.inbandTextTracks_[trackId] = track;
|
||
} else {
|
||
// Otherwise, create a track with the default `CC#` label and
|
||
// without a language
|
||
sourceBuffer.inbandTextTracks_[trackId] = player.addRemoteTextTrack({
|
||
kind: 'captions',
|
||
id: trackId,
|
||
label: trackId
|
||
}, false).track;
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
if (segment.metadata && segment.metadata.length && !sourceBuffer.metadataTrack_) {
|
||
sourceBuffer.metadataTrack_ = player.addRemoteTextTrack({
|
||
kind: 'metadata',
|
||
label: 'Timed Metadata'
|
||
}, false).track;
|
||
sourceBuffer.metadataTrack_.inBandMetadataTrackDispatchType = segment.metadata.dispatchType;
|
||
}
|
||
};
|
||
|
||
exports['default'] = createTextTracksIfNecessary;
|
||
module.exports = exports['default'];
|
||
|
||
},{}],38:[function(require,module,exports){
|
||
/**
|
||
* @file flash-constants.js
|
||
*/
|
||
/**
|
||
* The maximum size in bytes for append operations to the video.js
|
||
* SWF. Calling through to Flash blocks and can be expensive so
|
||
* we chunk data and pass through 4KB at a time, yielding to the
|
||
* browser between chunks. This gives a theoretical maximum rate of
|
||
* 1MB/s into Flash. Any higher and we begin to drop frames and UI
|
||
* responsiveness suffers.
|
||
*
|
||
* @private
|
||
*/
|
||
"use strict";
|
||
|
||
Object.defineProperty(exports, "__esModule", {
|
||
value: true
|
||
});
|
||
var flashConstants = {
|
||
// times in milliseconds
|
||
TIME_BETWEEN_CHUNKS: 1,
|
||
BYTES_PER_CHUNK: 1024 * 32
|
||
};
|
||
|
||
exports["default"] = flashConstants;
|
||
module.exports = exports["default"];
|
||
|
||
},{}],39:[function(require,module,exports){
|
||
(function (global){
|
||
/**
|
||
* @file flash-media-source.js
|
||
*/
|
||
'use strict';
|
||
|
||
Object.defineProperty(exports, '__esModule', {
|
||
value: true
|
||
});
|
||
|
||
var _createClass = (function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ('value' in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; })();
|
||
|
||
var _get = function get(_x, _x2, _x3) { var _again = true; _function: while (_again) { var object = _x, property = _x2, receiver = _x3; _again = false; if (object === null) object = Function.prototype; var desc = Object.getOwnPropertyDescriptor(object, property); if (desc === undefined) { var parent = Object.getPrototypeOf(object); if (parent === null) { return undefined; } else { _x = parent; _x2 = property; _x3 = receiver; _again = true; desc = parent = undefined; continue _function; } } else if ('value' in desc) { return desc.value; } else { var getter = desc.get; if (getter === undefined) { return undefined; } return getter.call(receiver); } } };
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
|
||
|
||
function _inherits(subClass, superClass) { if (typeof superClass !== 'function' && superClass !== null) { throw new TypeError('Super expression must either be null or a function, not ' + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
|
||
|
||
var _globalDocument = require('global/document');
|
||
|
||
var _globalDocument2 = _interopRequireDefault(_globalDocument);
|
||
|
||
var _videoJs = (typeof window !== "undefined" ? window['videojs'] : typeof global !== "undefined" ? global['videojs'] : null);
|
||
|
||
var _videoJs2 = _interopRequireDefault(_videoJs);
|
||
|
||
var _flashSourceBuffer = require('./flash-source-buffer');
|
||
|
||
var _flashSourceBuffer2 = _interopRequireDefault(_flashSourceBuffer);
|
||
|
||
var _flashConstants = require('./flash-constants');
|
||
|
||
var _flashConstants2 = _interopRequireDefault(_flashConstants);
|
||
|
||
var _codecUtils = require('./codec-utils');
|
||
|
||
/**
|
||
* A flash implmentation of HTML MediaSources and a polyfill
|
||
* for browsers that don't support native or HTML MediaSources..
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/MediaSource
|
||
* @class FlashMediaSource
|
||
* @extends videojs.EventTarget
|
||
*/
|
||
|
||
var FlashMediaSource = (function (_videojs$EventTarget) {
|
||
_inherits(FlashMediaSource, _videojs$EventTarget);
|
||
|
||
function FlashMediaSource() {
|
||
var _this = this;
|
||
|
||
_classCallCheck(this, FlashMediaSource);
|
||
|
||
_get(Object.getPrototypeOf(FlashMediaSource.prototype), 'constructor', this).call(this);
|
||
this.sourceBuffers = [];
|
||
this.readyState = 'closed';
|
||
|
||
this.on(['sourceopen', 'webkitsourceopen'], function (event) {
|
||
// find the swf where we will push media data
|
||
_this.swfObj = _globalDocument2['default'].getElementById(event.swfId);
|
||
_this.player_ = (0, _videoJs2['default'])(_this.swfObj.parentNode);
|
||
_this.tech_ = _this.swfObj.tech;
|
||
_this.readyState = 'open';
|
||
|
||
_this.tech_.on('seeking', function () {
|
||
var i = _this.sourceBuffers.length;
|
||
|
||
while (i--) {
|
||
_this.sourceBuffers[i].abort();
|
||
}
|
||
});
|
||
|
||
// trigger load events
|
||
if (_this.swfObj) {
|
||
_this.swfObj.vjs_load();
|
||
}
|
||
});
|
||
}
|
||
|
||
/**
|
||
* Set or return the presentation duration.
|
||
*
|
||
* @param {Double} value the duration of the media in seconds
|
||
* @param {Double} the current presentation duration
|
||
* @link http://www.w3.org/TR/media-source/#widl-MediaSource-duration
|
||
*/
|
||
|
||
/**
|
||
* We have this function so that the html and flash interfaces
|
||
* are the same.
|
||
*
|
||
* @private
|
||
*/
|
||
|
||
_createClass(FlashMediaSource, [{
|
||
key: 'addSeekableRange_',
|
||
value: function addSeekableRange_() {}
|
||
// intentional no-op
|
||
|
||
/**
|
||
* Create a new flash source buffer and add it to our flash media source.
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/MediaSource/addSourceBuffer
|
||
* @param {String} type the content-type of the source
|
||
* @return {Object} the flash source buffer
|
||
*/
|
||
|
||
}, {
|
||
key: 'addSourceBuffer',
|
||
value: function addSourceBuffer(type) {
|
||
var parsedType = (0, _codecUtils.parseContentType)(type);
|
||
var sourceBuffer = undefined;
|
||
|
||
// if this is an FLV type, we'll push data to flash
|
||
if (parsedType.type === 'video/mp2t' || parsedType.type === 'audio/mp2t') {
|
||
// Flash source buffers
|
||
sourceBuffer = new _flashSourceBuffer2['default'](this);
|
||
} else {
|
||
throw new Error('NotSupportedError (Video.js)');
|
||
}
|
||
|
||
this.sourceBuffers.push(sourceBuffer);
|
||
return sourceBuffer;
|
||
}
|
||
|
||
/**
|
||
* Signals the end of the stream.
|
||
*
|
||
* @link https://w3c.github.io/media-source/#widl-MediaSource-endOfStream-void-EndOfStreamError-error
|
||
* @param {String=} error Signals that a playback error
|
||
* has occurred. If specified, it must be either "network" or
|
||
* "decode".
|
||
*/
|
||
}, {
|
||
key: 'endOfStream',
|
||
value: function endOfStream(error) {
|
||
if (error === 'network') {
|
||
// MEDIA_ERR_NETWORK
|
||
this.tech_.error(2);
|
||
} else if (error === 'decode') {
|
||
// MEDIA_ERR_DECODE
|
||
this.tech_.error(3);
|
||
}
|
||
if (this.readyState !== 'ended') {
|
||
this.readyState = 'ended';
|
||
this.swfObj.vjs_endOfStream();
|
||
}
|
||
}
|
||
}]);
|
||
|
||
return FlashMediaSource;
|
||
})(_videoJs2['default'].EventTarget);
|
||
|
||
exports['default'] = FlashMediaSource;
|
||
try {
|
||
Object.defineProperty(FlashMediaSource.prototype, 'duration', {
|
||
/**
|
||
* Return the presentation duration.
|
||
*
|
||
* @return {Double} the duration of the media in seconds
|
||
* @link http://www.w3.org/TR/media-source/#widl-MediaSource-duration
|
||
*/
|
||
get: function get() {
|
||
if (!this.swfObj) {
|
||
return NaN;
|
||
}
|
||
// get the current duration from the SWF
|
||
return this.swfObj.vjs_getProperty('duration');
|
||
},
|
||
/**
|
||
* Set the presentation duration.
|
||
*
|
||
* @param {Double} value the duration of the media in seconds
|
||
* @return {Double} the duration of the media in seconds
|
||
* @link http://www.w3.org/TR/media-source/#widl-MediaSource-duration
|
||
*/
|
||
set: function set(value) {
|
||
var i = undefined;
|
||
var oldDuration = this.swfObj.vjs_getProperty('duration');
|
||
|
||
this.swfObj.vjs_setProperty('duration', value);
|
||
|
||
if (value < oldDuration) {
|
||
// In MSE, this triggers the range removal algorithm which causes
|
||
// an update to occur
|
||
for (i = 0; i < this.sourceBuffers.length; i++) {
|
||
this.sourceBuffers[i].remove(value, oldDuration);
|
||
}
|
||
}
|
||
|
||
return value;
|
||
}
|
||
});
|
||
} catch (e) {
|
||
// IE8 throws if defineProperty is called on a non-DOM node. We
|
||
// don't support IE8 but we shouldn't throw an error if loaded
|
||
// there.
|
||
FlashMediaSource.prototype.duration = NaN;
|
||
}
|
||
|
||
for (var property in _flashConstants2['default']) {
|
||
FlashMediaSource[property] = _flashConstants2['default'][property];
|
||
}
|
||
module.exports = exports['default'];
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"./codec-utils":36,"./flash-constants":38,"./flash-source-buffer":40,"global/document":2}],40:[function(require,module,exports){
|
||
(function (global){
|
||
/**
|
||
* @file flash-source-buffer.js
|
||
*/
|
||
'use strict';
|
||
|
||
Object.defineProperty(exports, '__esModule', {
|
||
value: true
|
||
});
|
||
|
||
var _createClass = (function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ('value' in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; })();
|
||
|
||
var _get = function get(_x, _x2, _x3) { var _again = true; _function: while (_again) { var object = _x, property = _x2, receiver = _x3; _again = false; if (object === null) object = Function.prototype; var desc = Object.getOwnPropertyDescriptor(object, property); if (desc === undefined) { var parent = Object.getPrototypeOf(object); if (parent === null) { return undefined; } else { _x = parent; _x2 = property; _x3 = receiver; _again = true; desc = parent = undefined; continue _function; } } else if ('value' in desc) { return desc.value; } else { var getter = desc.get; if (getter === undefined) { return undefined; } return getter.call(receiver); } } };
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
|
||
|
||
function _inherits(subClass, superClass) { if (typeof superClass !== 'function' && superClass !== null) { throw new TypeError('Super expression must either be null or a function, not ' + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
|
||
|
||
var _globalWindow = require('global/window');
|
||
|
||
var _globalWindow2 = _interopRequireDefault(_globalWindow);
|
||
|
||
var _videoJs = (typeof window !== "undefined" ? window['videojs'] : typeof global !== "undefined" ? global['videojs'] : null);
|
||
|
||
var _videoJs2 = _interopRequireDefault(_videoJs);
|
||
|
||
var _muxJsLibFlv = require('mux.js/lib/flv');
|
||
|
||
var _muxJsLibFlv2 = _interopRequireDefault(_muxJsLibFlv);
|
||
|
||
var _removeCuesFromTrack = require('./remove-cues-from-track');
|
||
|
||
var _removeCuesFromTrack2 = _interopRequireDefault(_removeCuesFromTrack);
|
||
|
||
var _createTextTracksIfNecessary = require('./create-text-tracks-if-necessary');
|
||
|
||
var _createTextTracksIfNecessary2 = _interopRequireDefault(_createTextTracksIfNecessary);
|
||
|
||
var _addTextTrackData = require('./add-text-track-data');
|
||
|
||
var _flashTransmuxerWorker = require('./flash-transmuxer-worker');
|
||
|
||
var _flashTransmuxerWorker2 = _interopRequireDefault(_flashTransmuxerWorker);
|
||
|
||
var _webwackify = require('webwackify');
|
||
|
||
var _webwackify2 = _interopRequireDefault(_webwackify);
|
||
|
||
var _flashConstants = require('./flash-constants');
|
||
|
||
var _flashConstants2 = _interopRequireDefault(_flashConstants);
|
||
|
||
var resolveFlashTransmuxWorker = function resolveFlashTransmuxWorker() {
|
||
var result = undefined;
|
||
|
||
try {
|
||
result = require.resolve('./flash-transmuxer-worker');
|
||
} catch (e) {
|
||
// no result
|
||
}
|
||
|
||
return result;
|
||
};
|
||
|
||
/**
|
||
* A wrapper around the setTimeout function that uses
|
||
* the flash constant time between ticks value.
|
||
*
|
||
* @param {Function} func the function callback to run
|
||
* @private
|
||
*/
|
||
var scheduleTick = function scheduleTick(func) {
|
||
// Chrome doesn't invoke requestAnimationFrame callbacks
|
||
// in background tabs, so use setTimeout.
|
||
_globalWindow2['default'].setTimeout(func, _flashConstants2['default'].TIME_BETWEEN_CHUNKS);
|
||
};
|
||
|
||
/**
|
||
* Generates a random string of max length 6
|
||
*
|
||
* @return {String} the randomly generated string
|
||
* @function generateRandomString
|
||
* @private
|
||
*/
|
||
var generateRandomString = function generateRandomString() {
|
||
return Math.random().toString(36).slice(2, 8);
|
||
};
|
||
|
||
/**
|
||
* Round a number to a specified number of places much like
|
||
* toFixed but return a number instead of a string representation.
|
||
*
|
||
* @param {Number} num A number
|
||
* @param {Number} places The number of decimal places which to
|
||
* round
|
||
* @private
|
||
*/
|
||
var toDecimalPlaces = function toDecimalPlaces(num, places) {
|
||
if (typeof places !== 'number' || places < 0) {
|
||
places = 0;
|
||
}
|
||
|
||
var scale = Math.pow(10, places);
|
||
|
||
return Math.round(num * scale) / scale;
|
||
};
|
||
|
||
/**
|
||
* A SourceBuffer implementation for Flash rather than HTML.
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/MediaSource
|
||
* @param {Object} mediaSource the flash media source
|
||
* @class FlashSourceBuffer
|
||
* @extends videojs.EventTarget
|
||
*/
|
||
|
||
var FlashSourceBuffer = (function (_videojs$EventTarget) {
|
||
_inherits(FlashSourceBuffer, _videojs$EventTarget);
|
||
|
||
function FlashSourceBuffer(mediaSource) {
|
||
var _this = this;
|
||
|
||
_classCallCheck(this, FlashSourceBuffer);
|
||
|
||
_get(Object.getPrototypeOf(FlashSourceBuffer.prototype), 'constructor', this).call(this);
|
||
var encodedHeader = undefined;
|
||
|
||
// Start off using the globally defined value but refine
|
||
// as we append data into flash
|
||
this.chunkSize_ = _flashConstants2['default'].BYTES_PER_CHUNK;
|
||
|
||
// byte arrays queued to be appended
|
||
this.buffer_ = [];
|
||
|
||
// the total number of queued bytes
|
||
this.bufferSize_ = 0;
|
||
|
||
// to be able to determine the correct position to seek to, we
|
||
// need to retain information about the mapping between the
|
||
// media timeline and PTS values
|
||
this.basePtsOffset_ = NaN;
|
||
|
||
this.mediaSource_ = mediaSource;
|
||
|
||
this.audioBufferEnd_ = NaN;
|
||
this.videoBufferEnd_ = NaN;
|
||
|
||
// indicates whether the asynchronous continuation of an operation
|
||
// is still being processed
|
||
// see https://w3c.github.io/media-source/#widl-SourceBuffer-updating
|
||
this.updating = false;
|
||
this.timestampOffset_ = 0;
|
||
|
||
encodedHeader = _globalWindow2['default'].btoa(String.fromCharCode.apply(null, Array.prototype.slice.call(_muxJsLibFlv2['default'].getFlvHeader())));
|
||
|
||
// create function names with added randomness for the global callbacks flash will use
|
||
// to get data from javascript into the swf. Random strings are added as a safety
|
||
// measure for pages with multiple players since these functions will be global
|
||
// instead of per instance. When making a call to the swf, the browser generates a
|
||
// try catch code snippet, but just takes the function name and writes out an unquoted
|
||
// call to that function. If the player id has any special characters, this will result
|
||
// in an error, so safePlayerId replaces all special characters to '_'
|
||
var safePlayerId = this.mediaSource_.player_.id().replace(/[^a-zA-Z0-9]/g, '_');
|
||
|
||
this.flashEncodedHeaderName_ = 'vjs_flashEncodedHeader_' + safePlayerId + generateRandomString();
|
||
this.flashEncodedDataName_ = 'vjs_flashEncodedData_' + safePlayerId + generateRandomString();
|
||
|
||
_globalWindow2['default'][this.flashEncodedHeaderName_] = function () {
|
||
delete _globalWindow2['default'][_this.flashEncodedHeaderName_];
|
||
return encodedHeader;
|
||
};
|
||
|
||
this.mediaSource_.swfObj.vjs_appendChunkReady(this.flashEncodedHeaderName_);
|
||
|
||
this.transmuxer_ = (0, _webwackify2['default'])(_flashTransmuxerWorker2['default'], resolveFlashTransmuxWorker());
|
||
this.transmuxer_.postMessage({ action: 'init', options: {} });
|
||
this.transmuxer_.onmessage = function (event) {
|
||
if (event.data.action === 'data') {
|
||
_this.receiveBuffer_(event.data.segment);
|
||
}
|
||
};
|
||
|
||
this.one('updateend', function () {
|
||
_this.mediaSource_.tech_.trigger('loadedmetadata');
|
||
});
|
||
|
||
Object.defineProperty(this, 'timestampOffset', {
|
||
get: function get() {
|
||
return this.timestampOffset_;
|
||
},
|
||
set: function set(val) {
|
||
if (typeof val === 'number' && val >= 0) {
|
||
this.timestampOffset_ = val;
|
||
// We have to tell flash to expect a discontinuity
|
||
this.mediaSource_.swfObj.vjs_discontinuity();
|
||
// the media <-> PTS mapping must be re-established after
|
||
// the discontinuity
|
||
this.basePtsOffset_ = NaN;
|
||
this.audioBufferEnd_ = NaN;
|
||
this.videoBufferEnd_ = NaN;
|
||
|
||
this.transmuxer_.postMessage({ action: 'reset' });
|
||
}
|
||
}
|
||
});
|
||
|
||
Object.defineProperty(this, 'buffered', {
|
||
get: function get() {
|
||
if (!this.mediaSource_ || !this.mediaSource_.swfObj || !('vjs_getProperty' in this.mediaSource_.swfObj)) {
|
||
return _videoJs2['default'].createTimeRange();
|
||
}
|
||
|
||
var buffered = this.mediaSource_.swfObj.vjs_getProperty('buffered');
|
||
|
||
if (buffered && buffered.length) {
|
||
buffered[0][0] = toDecimalPlaces(buffered[0][0], 3);
|
||
buffered[0][1] = toDecimalPlaces(buffered[0][1], 3);
|
||
}
|
||
return _videoJs2['default'].createTimeRanges(buffered);
|
||
}
|
||
});
|
||
|
||
// On a seek we remove all text track data since flash has no concept
|
||
// of a buffered-range and everything else is reset on seek
|
||
this.mediaSource_.player_.on('seeked', function () {
|
||
(0, _removeCuesFromTrack2['default'])(0, Infinity, _this.metadataTrack_);
|
||
if (_this.inbandTextTracks_) {
|
||
for (var track in _this.inbandTextTracks_) {
|
||
(0, _removeCuesFromTrack2['default'])(0, Infinity, _this.inbandTextTracks_[track]);
|
||
}
|
||
}
|
||
});
|
||
|
||
var onHlsReset = this.onHlsReset_.bind(this);
|
||
|
||
// hls-reset is fired by videojs.Hls on to the tech after the main SegmentLoader
|
||
// resets its state and flushes the buffer
|
||
this.mediaSource_.player_.tech_.on('hls-reset', onHlsReset);
|
||
|
||
this.mediaSource_.player_.tech_.hls.on('dispose', function () {
|
||
_this.transmuxer_.terminate();
|
||
_this.mediaSource_.player_.tech_.off('hls-reset', onHlsReset);
|
||
});
|
||
}
|
||
|
||
/**
|
||
* Append bytes to the sourcebuffers buffer, in this case we
|
||
* have to append it to swf object.
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/SourceBuffer/appendBuffer
|
||
* @param {Array} bytes
|
||
*/
|
||
|
||
_createClass(FlashSourceBuffer, [{
|
||
key: 'appendBuffer',
|
||
value: function appendBuffer(bytes) {
|
||
var error = undefined;
|
||
|
||
if (this.updating) {
|
||
error = new Error('SourceBuffer.append() cannot be called ' + 'while an update is in progress');
|
||
error.name = 'InvalidStateError';
|
||
error.code = 11;
|
||
throw error;
|
||
}
|
||
this.updating = true;
|
||
this.mediaSource_.readyState = 'open';
|
||
this.trigger({ type: 'update' });
|
||
|
||
this.transmuxer_.postMessage({
|
||
action: 'push',
|
||
data: bytes.buffer,
|
||
byteOffset: bytes.byteOffset,
|
||
byteLength: bytes.byteLength
|
||
}, [bytes.buffer]);
|
||
this.transmuxer_.postMessage({ action: 'flush' });
|
||
}
|
||
|
||
/**
|
||
* Reset the parser and remove any data queued to be sent to the SWF.
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/SourceBuffer/abort
|
||
*/
|
||
}, {
|
||
key: 'abort',
|
||
value: function abort() {
|
||
this.buffer_ = [];
|
||
this.bufferSize_ = 0;
|
||
this.mediaSource_.swfObj.vjs_abort();
|
||
|
||
// report any outstanding updates have ended
|
||
if (this.updating) {
|
||
this.updating = false;
|
||
this.trigger({ type: 'updateend' });
|
||
}
|
||
}
|
||
|
||
/**
|
||
* Flash cannot remove ranges already buffered in the NetStream
|
||
* but seeking clears the buffer entirely. For most purposes,
|
||
* having this operation act as a no-op is acceptable.
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/SourceBuffer/remove
|
||
* @param {Double} start start of the section to remove
|
||
* @param {Double} end end of the section to remove
|
||
*/
|
||
}, {
|
||
key: 'remove',
|
||
value: function remove(start, end) {
|
||
(0, _removeCuesFromTrack2['default'])(start, end, this.metadataTrack_);
|
||
if (this.inbandTextTracks_) {
|
||
for (var track in this.inbandTextTracks_) {
|
||
(0, _removeCuesFromTrack2['default'])(start, end, this.inbandTextTracks_[track]);
|
||
}
|
||
}
|
||
this.trigger({ type: 'update' });
|
||
this.trigger({ type: 'updateend' });
|
||
}
|
||
|
||
/**
|
||
* Receive a buffer from the flv.
|
||
*
|
||
* @param {Object} segment
|
||
* @private
|
||
*/
|
||
}, {
|
||
key: 'receiveBuffer_',
|
||
value: function receiveBuffer_(segment) {
|
||
var _this2 = this;
|
||
|
||
// create an in-band caption track if one is present in the segment
|
||
(0, _createTextTracksIfNecessary2['default'])(this, this.mediaSource_, segment);
|
||
(0, _addTextTrackData.addTextTrackData)(this, segment.captions, segment.metadata);
|
||
|
||
// Do this asynchronously since convertTagsToData_ can be time consuming
|
||
scheduleTick(function () {
|
||
var flvBytes = _this2.convertTagsToData_(segment);
|
||
|
||
if (_this2.buffer_.length === 0) {
|
||
scheduleTick(_this2.processBuffer_.bind(_this2));
|
||
}
|
||
|
||
if (flvBytes) {
|
||
_this2.buffer_.push(flvBytes);
|
||
_this2.bufferSize_ += flvBytes.byteLength;
|
||
}
|
||
});
|
||
}
|
||
|
||
/**
|
||
* Append a portion of the current buffer to the SWF.
|
||
*
|
||
* @private
|
||
*/
|
||
}, {
|
||
key: 'processBuffer_',
|
||
value: function processBuffer_() {
|
||
var _this3 = this;
|
||
|
||
var chunkSize = _flashConstants2['default'].BYTES_PER_CHUNK;
|
||
|
||
if (!this.buffer_.length) {
|
||
if (this.updating !== false) {
|
||
this.updating = false;
|
||
this.trigger({ type: 'updateend' });
|
||
}
|
||
// do nothing if the buffer is empty
|
||
return;
|
||
}
|
||
|
||
// concatenate appends up to the max append size
|
||
var chunk = this.buffer_[0].subarray(0, chunkSize);
|
||
|
||
// requeue any bytes that won't make it this round
|
||
if (chunk.byteLength < chunkSize || this.buffer_[0].byteLength === chunkSize) {
|
||
this.buffer_.shift();
|
||
} else {
|
||
this.buffer_[0] = this.buffer_[0].subarray(chunkSize);
|
||
}
|
||
|
||
this.bufferSize_ -= chunk.byteLength;
|
||
|
||
// base64 encode the bytes
|
||
var binary = [];
|
||
var length = chunk.byteLength;
|
||
|
||
for (var i = 0; i < length; i++) {
|
||
binary.push(String.fromCharCode(chunk[i]));
|
||
}
|
||
var b64str = _globalWindow2['default'].btoa(binary.join(''));
|
||
|
||
_globalWindow2['default'][this.flashEncodedDataName_] = function () {
|
||
// schedule another processBuffer to process any left over data or to
|
||
// trigger updateend
|
||
scheduleTick(_this3.processBuffer_.bind(_this3));
|
||
delete _globalWindow2['default'][_this3.flashEncodedDataName_];
|
||
return b64str;
|
||
};
|
||
|
||
// Notify the swf that segment data is ready to be appended
|
||
this.mediaSource_.swfObj.vjs_appendChunkReady(this.flashEncodedDataName_);
|
||
}
|
||
|
||
/**
|
||
* Turns an array of flv tags into a Uint8Array representing the
|
||
* flv data. Also removes any tags that are before the current
|
||
* time so that playback begins at or slightly after the right
|
||
* place on a seek
|
||
*
|
||
* @private
|
||
* @param {Object} segmentData object of segment data
|
||
*/
|
||
}, {
|
||
key: 'convertTagsToData_',
|
||
value: function convertTagsToData_(segmentData) {
|
||
var segmentByteLength = 0;
|
||
var tech = this.mediaSource_.tech_;
|
||
var videoTargetPts = 0;
|
||
var segment = undefined;
|
||
var videoTags = segmentData.tags.videoTags;
|
||
var audioTags = segmentData.tags.audioTags;
|
||
|
||
// Establish the media timeline to PTS translation if we don't
|
||
// have one already
|
||
if (isNaN(this.basePtsOffset_) && (videoTags.length || audioTags.length)) {
|
||
// We know there is at least one video or audio tag, but since we may not have both,
|
||
// we use pts: Infinity for the missing tag. The will force the following Math.min
|
||
// call will to use the proper pts value since it will always be less than Infinity
|
||
var firstVideoTag = videoTags[0] || { pts: Infinity };
|
||
var firstAudioTag = audioTags[0] || { pts: Infinity };
|
||
|
||
this.basePtsOffset_ = Math.min(firstAudioTag.pts, firstVideoTag.pts);
|
||
}
|
||
|
||
if (tech.seeking()) {
|
||
// Do not use previously saved buffer end values while seeking since buffer
|
||
// is cleared on all seeks
|
||
this.videoBufferEnd_ = NaN;
|
||
this.audioBufferEnd_ = NaN;
|
||
}
|
||
|
||
if (isNaN(this.videoBufferEnd_)) {
|
||
if (tech.buffered().length) {
|
||
videoTargetPts = tech.buffered().end(0) - this.timestampOffset;
|
||
}
|
||
|
||
// Trim to currentTime if seeking
|
||
if (tech.seeking()) {
|
||
videoTargetPts = Math.max(videoTargetPts, tech.currentTime() - this.timestampOffset);
|
||
}
|
||
|
||
// PTS values are represented in milliseconds
|
||
videoTargetPts *= 1e3;
|
||
videoTargetPts += this.basePtsOffset_;
|
||
} else {
|
||
// Add a fudge factor of 0.1 to the last video pts appended since a rendition change
|
||
// could append an overlapping segment, in which case there is a high likelyhood
|
||
// a tag could have a matching pts to videoBufferEnd_, which would cause
|
||
// that tag to get appended by the tag.pts >= targetPts check below even though it
|
||
// is a duplicate of what was previously appended
|
||
videoTargetPts = this.videoBufferEnd_ + 0.1;
|
||
}
|
||
|
||
// filter complete GOPs with a presentation time less than the seek target/end of buffer
|
||
var currentIndex = videoTags.length;
|
||
|
||
// if the last tag is beyond videoTargetPts, then do not search the list for a GOP
|
||
// since our videoTargetPts lies in a future segment
|
||
if (currentIndex && videoTags[currentIndex - 1].pts >= videoTargetPts) {
|
||
// Start by walking backwards from the end of the list until we reach a tag that
|
||
// is equal to or less than videoTargetPts
|
||
while (--currentIndex) {
|
||
var currentTag = videoTags[currentIndex];
|
||
|
||
if (currentTag.pts > videoTargetPts) {
|
||
continue;
|
||
}
|
||
|
||
// if we see a keyFrame or metadata tag once we've gone below videoTargetPts,
|
||
// exit the loop as this is the start of the GOP that we want to append
|
||
if (currentTag.keyFrame || currentTag.metaDataTag) {
|
||
break;
|
||
}
|
||
}
|
||
|
||
// We need to check if there are any metadata tags that come before currentIndex
|
||
// as those will be metadata tags associated with the GOP we are appending
|
||
// There could be 0 to 2 metadata tags that come before the currentIndex depending
|
||
// on what videoTargetPts is and whether the transmuxer prepended metadata tags to this
|
||
// key frame
|
||
while (currentIndex) {
|
||
var nextTag = videoTags[currentIndex - 1];
|
||
|
||
if (!nextTag.metaDataTag) {
|
||
break;
|
||
}
|
||
|
||
currentIndex--;
|
||
}
|
||
}
|
||
|
||
var filteredVideoTags = videoTags.slice(currentIndex);
|
||
|
||
var audioTargetPts = undefined;
|
||
|
||
if (isNaN(this.audioBufferEnd_)) {
|
||
audioTargetPts = videoTargetPts;
|
||
} else {
|
||
// Add a fudge factor of 0.1 to the last video pts appended since a rendition change
|
||
// could append an overlapping segment, in which case there is a high likelyhood
|
||
// a tag could have a matching pts to videoBufferEnd_, which would cause
|
||
// that tag to get appended by the tag.pts >= targetPts check below even though it
|
||
// is a duplicate of what was previously appended
|
||
audioTargetPts = this.audioBufferEnd_ + 0.1;
|
||
}
|
||
|
||
if (filteredVideoTags.length) {
|
||
// If targetPts intersects a GOP and we appended the tags for the GOP that came
|
||
// before targetPts, we want to make sure to trim audio tags at the pts
|
||
// of the first video tag to avoid brief moments of silence
|
||
audioTargetPts = Math.min(audioTargetPts, filteredVideoTags[0].pts);
|
||
}
|
||
|
||
// skip tags with a presentation time less than the seek target/end of buffer
|
||
currentIndex = 0;
|
||
|
||
while (currentIndex < audioTags.length) {
|
||
if (audioTags[currentIndex].pts >= audioTargetPts) {
|
||
break;
|
||
}
|
||
|
||
currentIndex++;
|
||
}
|
||
|
||
var filteredAudioTags = audioTags.slice(currentIndex);
|
||
|
||
// update the audio and video buffer ends
|
||
if (filteredAudioTags.length) {
|
||
this.audioBufferEnd_ = filteredAudioTags[filteredAudioTags.length - 1].pts;
|
||
}
|
||
if (filteredVideoTags.length) {
|
||
this.videoBufferEnd_ = filteredVideoTags[filteredVideoTags.length - 1].pts;
|
||
}
|
||
|
||
var tags = this.getOrderedTags_(filteredVideoTags, filteredAudioTags);
|
||
|
||
if (tags.length === 0) {
|
||
return;
|
||
}
|
||
|
||
// If we are appending data that comes before our target pts, we want to tell
|
||
// the swf to adjust its notion of current time to account for the extra tags
|
||
// we are appending to complete the GOP that intersects with targetPts
|
||
if (tags[0].pts < videoTargetPts && tech.seeking()) {
|
||
var fudgeFactor = 1 / 30;
|
||
var currentTime = tech.currentTime();
|
||
var diff = (videoTargetPts - tags[0].pts) / 1e3;
|
||
var adjustedTime = currentTime - diff;
|
||
|
||
if (adjustedTime < fudgeFactor) {
|
||
adjustedTime = 0;
|
||
}
|
||
|
||
try {
|
||
this.mediaSource_.swfObj.vjs_adjustCurrentTime(adjustedTime);
|
||
} catch (e) {
|
||
// no-op for backwards compatability of swf. If adjustCurrentTime fails,
|
||
// the swf may incorrectly report currentTime and buffered ranges
|
||
// but should not affect playback over than the time displayed on the
|
||
// progress bar is inaccurate
|
||
}
|
||
}
|
||
|
||
// concatenate the bytes into a single segment
|
||
for (var i = 0; i < tags.length; i++) {
|
||
segmentByteLength += tags[i].bytes.byteLength;
|
||
}
|
||
segment = new Uint8Array(segmentByteLength);
|
||
for (var i = 0, j = 0; i < tags.length; i++) {
|
||
segment.set(tags[i].bytes, j);
|
||
j += tags[i].bytes.byteLength;
|
||
}
|
||
|
||
return segment;
|
||
}
|
||
|
||
/**
|
||
* Assemble the FLV tags in decoder order.
|
||
*
|
||
* @private
|
||
* @param {Array} videoTags list of video tags
|
||
* @param {Array} audioTags list of audio tags
|
||
*/
|
||
}, {
|
||
key: 'getOrderedTags_',
|
||
value: function getOrderedTags_(videoTags, audioTags) {
|
||
var tag = undefined;
|
||
var tags = [];
|
||
|
||
while (videoTags.length || audioTags.length) {
|
||
if (!videoTags.length) {
|
||
// only audio tags remain
|
||
tag = audioTags.shift();
|
||
} else if (!audioTags.length) {
|
||
// only video tags remain
|
||
tag = videoTags.shift();
|
||
} else if (audioTags[0].dts < videoTags[0].dts) {
|
||
// audio should be decoded next
|
||
tag = audioTags.shift();
|
||
} else {
|
||
// video should be decoded next
|
||
tag = videoTags.shift();
|
||
}
|
||
|
||
tags.push(tag);
|
||
}
|
||
|
||
return tags;
|
||
}
|
||
}, {
|
||
key: 'onHlsReset_',
|
||
value: function onHlsReset_() {
|
||
this.transmuxer_.postMessage({ action: 'resetCaptions' });
|
||
}
|
||
}]);
|
||
|
||
return FlashSourceBuffer;
|
||
})(_videoJs2['default'].EventTarget);
|
||
|
||
exports['default'] = FlashSourceBuffer;
|
||
module.exports = exports['default'];
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"./add-text-track-data":35,"./create-text-tracks-if-necessary":37,"./flash-constants":38,"./flash-transmuxer-worker":41,"./remove-cues-from-track":43,"global/window":3,"mux.js/lib/flv":13,"webwackify":34}],41:[function(require,module,exports){
|
||
/**
|
||
* @file flash-transmuxer-worker.js
|
||
*/
|
||
'use strict';
|
||
|
||
Object.defineProperty(exports, '__esModule', {
|
||
value: true
|
||
});
|
||
|
||
var _createClass = (function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ('value' in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; })();
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
|
||
|
||
var _globalWindow = require('global/window');
|
||
|
||
var _globalWindow2 = _interopRequireDefault(_globalWindow);
|
||
|
||
var _muxJsLibFlv = require('mux.js/lib/flv');
|
||
|
||
var _muxJsLibFlv2 = _interopRequireDefault(_muxJsLibFlv);
|
||
|
||
/**
|
||
* Re-emits transmuxer events by converting them into messages to the
|
||
* world outside the worker.
|
||
*
|
||
* @param {Object} transmuxer the transmuxer to wire events on
|
||
* @private
|
||
*/
|
||
var wireTransmuxerEvents = function wireTransmuxerEvents(transmuxer) {
|
||
transmuxer.on('data', function (segment) {
|
||
_globalWindow2['default'].postMessage({
|
||
action: 'data',
|
||
segment: segment
|
||
});
|
||
});
|
||
|
||
transmuxer.on('done', function (data) {
|
||
_globalWindow2['default'].postMessage({ action: 'done' });
|
||
});
|
||
};
|
||
|
||
/**
|
||
* All incoming messages route through this hash. If no function exists
|
||
* to handle an incoming message, then we ignore the message.
|
||
*
|
||
* @class MessageHandlers
|
||
* @param {Object} options the options to initialize with
|
||
*/
|
||
|
||
var MessageHandlers = (function () {
|
||
function MessageHandlers(options) {
|
||
_classCallCheck(this, MessageHandlers);
|
||
|
||
this.options = options || {};
|
||
this.init();
|
||
}
|
||
|
||
/**
|
||
* Our web wroker interface so that things can talk to mux.js
|
||
* that will be running in a web worker. The scope is passed to this by
|
||
* webworkify.
|
||
*
|
||
* @param {Object} self the scope for the web worker
|
||
*/
|
||
|
||
/**
|
||
* initialize our web worker and wire all the events.
|
||
*/
|
||
|
||
_createClass(MessageHandlers, [{
|
||
key: 'init',
|
||
value: function init() {
|
||
if (this.transmuxer) {
|
||
this.transmuxer.dispose();
|
||
}
|
||
this.transmuxer = new _muxJsLibFlv2['default'].Transmuxer(this.options);
|
||
wireTransmuxerEvents(this.transmuxer);
|
||
}
|
||
|
||
/**
|
||
* Adds data (a ts segment) to the start of the transmuxer pipeline for
|
||
* processing.
|
||
*
|
||
* @param {ArrayBuffer} data data to push into the muxer
|
||
*/
|
||
}, {
|
||
key: 'push',
|
||
value: function push(data) {
|
||
// Cast array buffer to correct type for transmuxer
|
||
var segment = new Uint8Array(data.data, data.byteOffset, data.byteLength);
|
||
|
||
this.transmuxer.push(segment);
|
||
}
|
||
|
||
/**
|
||
* Recreate the transmuxer so that the next segment added via `push`
|
||
* start with a fresh transmuxer.
|
||
*/
|
||
}, {
|
||
key: 'reset',
|
||
value: function reset() {
|
||
this.init();
|
||
}
|
||
|
||
/**
|
||
* Forces the pipeline to finish processing the last segment and emit its
|
||
* results.
|
||
*/
|
||
}, {
|
||
key: 'flush',
|
||
value: function flush() {
|
||
this.transmuxer.flush();
|
||
}
|
||
}, {
|
||
key: 'resetCaptions',
|
||
value: function resetCaptions() {
|
||
this.transmuxer.resetCaptions();
|
||
}
|
||
}]);
|
||
|
||
return MessageHandlers;
|
||
})();
|
||
|
||
var FlashTransmuxerWorker = function FlashTransmuxerWorker(self) {
|
||
self.onmessage = function (event) {
|
||
if (event.data.action === 'init' && event.data.options) {
|
||
this.messageHandlers = new MessageHandlers(event.data.options);
|
||
return;
|
||
}
|
||
|
||
if (!this.messageHandlers) {
|
||
this.messageHandlers = new MessageHandlers();
|
||
}
|
||
|
||
if (event.data && event.data.action && event.data.action !== 'init') {
|
||
if (this.messageHandlers[event.data.action]) {
|
||
this.messageHandlers[event.data.action](event.data);
|
||
}
|
||
}
|
||
};
|
||
};
|
||
|
||
exports['default'] = function (self) {
|
||
return new FlashTransmuxerWorker(self);
|
||
};
|
||
|
||
module.exports = exports['default'];
|
||
|
||
},{"global/window":3,"mux.js/lib/flv":13}],42:[function(require,module,exports){
|
||
(function (global){
|
||
/**
|
||
* @file html-media-source.js
|
||
*/
|
||
'use strict';
|
||
|
||
Object.defineProperty(exports, '__esModule', {
|
||
value: true
|
||
});
|
||
|
||
var _createClass = (function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ('value' in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; })();
|
||
|
||
var _get = function get(_x, _x2, _x3) { var _again = true; _function: while (_again) { var object = _x, property = _x2, receiver = _x3; _again = false; if (object === null) object = Function.prototype; var desc = Object.getOwnPropertyDescriptor(object, property); if (desc === undefined) { var parent = Object.getPrototypeOf(object); if (parent === null) { return undefined; } else { _x = parent; _x2 = property; _x3 = receiver; _again = true; desc = parent = undefined; continue _function; } } else if ('value' in desc) { return desc.value; } else { var getter = desc.get; if (getter === undefined) { return undefined; } return getter.call(receiver); } } };
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
|
||
|
||
function _inherits(subClass, superClass) { if (typeof superClass !== 'function' && superClass !== null) { throw new TypeError('Super expression must either be null or a function, not ' + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
|
||
|
||
var _globalWindow = require('global/window');
|
||
|
||
var _globalWindow2 = _interopRequireDefault(_globalWindow);
|
||
|
||
var _globalDocument = require('global/document');
|
||
|
||
var _globalDocument2 = _interopRequireDefault(_globalDocument);
|
||
|
||
var _videoJs = (typeof window !== "undefined" ? window['videojs'] : typeof global !== "undefined" ? global['videojs'] : null);
|
||
|
||
var _videoJs2 = _interopRequireDefault(_videoJs);
|
||
|
||
var _virtualSourceBuffer = require('./virtual-source-buffer');
|
||
|
||
var _virtualSourceBuffer2 = _interopRequireDefault(_virtualSourceBuffer);
|
||
|
||
var _addTextTrackData = require('./add-text-track-data');
|
||
|
||
var _codecUtils = require('./codec-utils');
|
||
|
||
/**
|
||
* Our MediaSource implementation in HTML, mimics native
|
||
* MediaSource where/if possible.
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/MediaSource
|
||
* @class HtmlMediaSource
|
||
* @extends videojs.EventTarget
|
||
*/
|
||
|
||
var HtmlMediaSource = (function (_videojs$EventTarget) {
|
||
_inherits(HtmlMediaSource, _videojs$EventTarget);
|
||
|
||
function HtmlMediaSource() {
|
||
var _this = this;
|
||
|
||
_classCallCheck(this, HtmlMediaSource);
|
||
|
||
_get(Object.getPrototypeOf(HtmlMediaSource.prototype), 'constructor', this).call(this);
|
||
var property = undefined;
|
||
|
||
this.nativeMediaSource_ = new _globalWindow2['default'].MediaSource();
|
||
// delegate to the native MediaSource's methods by default
|
||
for (property in this.nativeMediaSource_) {
|
||
if (!(property in HtmlMediaSource.prototype) && typeof this.nativeMediaSource_[property] === 'function') {
|
||
this[property] = this.nativeMediaSource_[property].bind(this.nativeMediaSource_);
|
||
}
|
||
}
|
||
|
||
// emulate `duration` and `seekable` until seeking can be
|
||
// handled uniformly for live streams
|
||
// see https://github.com/w3c/media-source/issues/5
|
||
this.duration_ = NaN;
|
||
Object.defineProperty(this, 'duration', {
|
||
get: function get() {
|
||
if (this.duration_ === Infinity) {
|
||
return this.duration_;
|
||
}
|
||
return this.nativeMediaSource_.duration;
|
||
},
|
||
set: function set(duration) {
|
||
this.duration_ = duration;
|
||
if (duration !== Infinity) {
|
||
this.nativeMediaSource_.duration = duration;
|
||
return;
|
||
}
|
||
}
|
||
});
|
||
Object.defineProperty(this, 'seekable', {
|
||
get: function get() {
|
||
if (this.duration_ === Infinity) {
|
||
return _videoJs2['default'].createTimeRanges([[0, this.nativeMediaSource_.duration]]);
|
||
}
|
||
return this.nativeMediaSource_.seekable;
|
||
}
|
||
});
|
||
|
||
Object.defineProperty(this, 'readyState', {
|
||
get: function get() {
|
||
return this.nativeMediaSource_.readyState;
|
||
}
|
||
});
|
||
|
||
Object.defineProperty(this, 'activeSourceBuffers', {
|
||
get: function get() {
|
||
return this.activeSourceBuffers_;
|
||
}
|
||
});
|
||
|
||
// the list of virtual and native SourceBuffers created by this
|
||
// MediaSource
|
||
this.sourceBuffers = [];
|
||
|
||
this.activeSourceBuffers_ = [];
|
||
|
||
/**
|
||
* update the list of active source buffers based upon various
|
||
* imformation from HLS and video.js
|
||
*
|
||
* @private
|
||
*/
|
||
this.updateActiveSourceBuffers_ = function () {
|
||
// Retain the reference but empty the array
|
||
_this.activeSourceBuffers_.length = 0;
|
||
|
||
// If there is only one source buffer, then it will always be active and audio will
|
||
// be disabled based on the codec of the source buffer
|
||
if (_this.sourceBuffers.length === 1) {
|
||
var sourceBuffer = _this.sourceBuffers[0];
|
||
|
||
sourceBuffer.appendAudioInitSegment_ = true;
|
||
sourceBuffer.audioDisabled_ = !sourceBuffer.audioCodec_;
|
||
_this.activeSourceBuffers_.push(sourceBuffer);
|
||
return;
|
||
}
|
||
|
||
// There are 2 source buffers, a combined (possibly video only) source buffer and
|
||
// and an audio only source buffer.
|
||
// By default, the audio in the combined virtual source buffer is enabled
|
||
// and the audio-only source buffer (if it exists) is disabled.
|
||
var disableCombined = false;
|
||
var disableAudioOnly = true;
|
||
|
||
// TODO: maybe we can store the sourcebuffers on the track objects?
|
||
// safari may do something like this
|
||
for (var i = 0; i < _this.player_.audioTracks().length; i++) {
|
||
var track = _this.player_.audioTracks()[i];
|
||
|
||
if (track.enabled && track.kind !== 'main') {
|
||
// The enabled track is an alternate audio track so disable the audio in
|
||
// the combined source buffer and enable the audio-only source buffer.
|
||
disableCombined = true;
|
||
disableAudioOnly = false;
|
||
break;
|
||
}
|
||
}
|
||
|
||
_this.sourceBuffers.forEach(function (sourceBuffer) {
|
||
/* eslinst-disable */
|
||
// TODO once codecs are required, we can switch to using the codecs to determine
|
||
// what stream is the video stream, rather than relying on videoTracks
|
||
/* eslinst-enable */
|
||
|
||
sourceBuffer.appendAudioInitSegment_ = true;
|
||
|
||
if (sourceBuffer.videoCodec_ && sourceBuffer.audioCodec_) {
|
||
// combined
|
||
sourceBuffer.audioDisabled_ = disableCombined;
|
||
} else if (sourceBuffer.videoCodec_ && !sourceBuffer.audioCodec_) {
|
||
// If the "combined" source buffer is video only, then we do not want
|
||
// disable the audio-only source buffer (this is mostly for demuxed
|
||
// audio and video hls)
|
||
sourceBuffer.audioDisabled_ = true;
|
||
disableAudioOnly = false;
|
||
} else if (!sourceBuffer.videoCodec_ && sourceBuffer.audioCodec_) {
|
||
// audio only
|
||
sourceBuffer.audioDisabled_ = disableAudioOnly;
|
||
if (disableAudioOnly) {
|
||
return;
|
||
}
|
||
}
|
||
|
||
_this.activeSourceBuffers_.push(sourceBuffer);
|
||
});
|
||
};
|
||
|
||
this.onPlayerMediachange_ = function () {
|
||
_this.sourceBuffers.forEach(function (sourceBuffer) {
|
||
sourceBuffer.appendAudioInitSegment_ = true;
|
||
});
|
||
};
|
||
|
||
this.onHlsReset_ = function () {
|
||
_this.sourceBuffers.forEach(function (sourceBuffer) {
|
||
if (sourceBuffer.transmuxer_) {
|
||
sourceBuffer.transmuxer_.postMessage({ action: 'resetCaptions' });
|
||
}
|
||
});
|
||
};
|
||
|
||
this.onHlsSegmentTimeMapping_ = function (event) {
|
||
_this.sourceBuffers.forEach(function (buffer) {
|
||
return buffer.timeMapping_ = event.mapping;
|
||
});
|
||
};
|
||
|
||
// Re-emit MediaSource events on the polyfill
|
||
['sourceopen', 'sourceclose', 'sourceended'].forEach(function (eventName) {
|
||
this.nativeMediaSource_.addEventListener(eventName, this.trigger.bind(this));
|
||
}, this);
|
||
|
||
// capture the associated player when the MediaSource is
|
||
// successfully attached
|
||
this.on('sourceopen', function (event) {
|
||
// Get the player this MediaSource is attached to
|
||
var video = _globalDocument2['default'].querySelector('[src="' + _this.url_ + '"]');
|
||
|
||
if (!video) {
|
||
return;
|
||
}
|
||
|
||
_this.player_ = (0, _videoJs2['default'])(video.parentNode);
|
||
|
||
// hls-reset is fired by videojs.Hls on to the tech after the main SegmentLoader
|
||
// resets its state and flushes the buffer
|
||
_this.player_.tech_.on('hls-reset', _this.onHlsReset_);
|
||
// hls-segment-time-mapping is fired by videojs.Hls on to the tech after the main
|
||
// SegmentLoader inspects an MTS segment and has an accurate stream to display
|
||
// time mapping
|
||
_this.player_.tech_.on('hls-segment-time-mapping', _this.onHlsSegmentTimeMapping_);
|
||
|
||
if (_this.player_.audioTracks && _this.player_.audioTracks()) {
|
||
_this.player_.audioTracks().on('change', _this.updateActiveSourceBuffers_);
|
||
_this.player_.audioTracks().on('addtrack', _this.updateActiveSourceBuffers_);
|
||
_this.player_.audioTracks().on('removetrack', _this.updateActiveSourceBuffers_);
|
||
}
|
||
|
||
_this.player_.on('mediachange', _this.onPlayerMediachange_);
|
||
});
|
||
|
||
this.on('sourceended', function (event) {
|
||
var duration = (0, _addTextTrackData.durationOfVideo)(_this.duration);
|
||
|
||
for (var i = 0; i < _this.sourceBuffers.length; i++) {
|
||
var sourcebuffer = _this.sourceBuffers[i];
|
||
var cues = sourcebuffer.metadataTrack_ && sourcebuffer.metadataTrack_.cues;
|
||
|
||
if (cues && cues.length) {
|
||
cues[cues.length - 1].endTime = duration;
|
||
}
|
||
}
|
||
});
|
||
|
||
// explicitly terminate any WebWorkers that were created
|
||
// by SourceHandlers
|
||
this.on('sourceclose', function (event) {
|
||
this.sourceBuffers.forEach(function (sourceBuffer) {
|
||
if (sourceBuffer.transmuxer_) {
|
||
sourceBuffer.transmuxer_.terminate();
|
||
}
|
||
});
|
||
|
||
this.sourceBuffers.length = 0;
|
||
if (!this.player_) {
|
||
return;
|
||
}
|
||
|
||
if (this.player_.audioTracks && this.player_.audioTracks()) {
|
||
this.player_.audioTracks().off('change', this.updateActiveSourceBuffers_);
|
||
this.player_.audioTracks().off('addtrack', this.updateActiveSourceBuffers_);
|
||
this.player_.audioTracks().off('removetrack', this.updateActiveSourceBuffers_);
|
||
}
|
||
|
||
// We can only change this if the player hasn't been disposed of yet
|
||
// because `off` eventually tries to use the el_ property. If it has
|
||
// been disposed of, then don't worry about it because there are no
|
||
// event handlers left to unbind anyway
|
||
if (this.player_.el_) {
|
||
this.player_.off('mediachange', this.onPlayerMediachange_);
|
||
this.player_.tech_.off('hls-reset', this.onHlsReset_);
|
||
this.player_.tech_.off('hls-segment-time-mapping', this.onHlsSegmentTimeMapping_);
|
||
}
|
||
});
|
||
}
|
||
|
||
/**
|
||
* Add a range that that can now be seeked to.
|
||
*
|
||
* @param {Double} start where to start the addition
|
||
* @param {Double} end where to end the addition
|
||
* @private
|
||
*/
|
||
|
||
_createClass(HtmlMediaSource, [{
|
||
key: 'addSeekableRange_',
|
||
value: function addSeekableRange_(start, end) {
|
||
var error = undefined;
|
||
|
||
if (this.duration !== Infinity) {
|
||
error = new Error('MediaSource.addSeekableRange() can only be invoked ' + 'when the duration is Infinity');
|
||
error.name = 'InvalidStateError';
|
||
error.code = 11;
|
||
throw error;
|
||
}
|
||
|
||
if (end > this.nativeMediaSource_.duration || isNaN(this.nativeMediaSource_.duration)) {
|
||
this.nativeMediaSource_.duration = end;
|
||
}
|
||
}
|
||
|
||
/**
|
||
* Add a source buffer to the media source.
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/MediaSource/addSourceBuffer
|
||
* @param {String} type the content-type of the content
|
||
* @return {Object} the created source buffer
|
||
*/
|
||
}, {
|
||
key: 'addSourceBuffer',
|
||
value: function addSourceBuffer(type) {
|
||
var buffer = undefined;
|
||
var parsedType = (0, _codecUtils.parseContentType)(type);
|
||
|
||
// Create a VirtualSourceBuffer to transmux MPEG-2 transport
|
||
// stream segments into fragmented MP4s
|
||
if (/^(video|audio)\/mp2t$/i.test(parsedType.type)) {
|
||
var codecs = [];
|
||
|
||
if (parsedType.parameters && parsedType.parameters.codecs) {
|
||
codecs = parsedType.parameters.codecs.split(',');
|
||
codecs = (0, _codecUtils.translateLegacyCodecs)(codecs);
|
||
codecs = codecs.filter(function (codec) {
|
||
return (0, _codecUtils.isAudioCodec)(codec) || (0, _codecUtils.isVideoCodec)(codec);
|
||
});
|
||
}
|
||
|
||
if (codecs.length === 0) {
|
||
codecs = ['avc1.4d400d', 'mp4a.40.2'];
|
||
}
|
||
|
||
buffer = new _virtualSourceBuffer2['default'](this, codecs);
|
||
|
||
if (this.sourceBuffers.length !== 0) {
|
||
// If another VirtualSourceBuffer already exists, then we are creating a
|
||
// SourceBuffer for an alternate audio track and therefore we know that
|
||
// the source has both an audio and video track.
|
||
// That means we should trigger the manual creation of the real
|
||
// SourceBuffers instead of waiting for the transmuxer to return data
|
||
this.sourceBuffers[0].createRealSourceBuffers_();
|
||
buffer.createRealSourceBuffers_();
|
||
|
||
// Automatically disable the audio on the first source buffer if
|
||
// a second source buffer is ever created
|
||
this.sourceBuffers[0].audioDisabled_ = true;
|
||
}
|
||
} else {
|
||
// delegate to the native implementation
|
||
buffer = this.nativeMediaSource_.addSourceBuffer(type);
|
||
}
|
||
|
||
this.sourceBuffers.push(buffer);
|
||
return buffer;
|
||
}
|
||
}]);
|
||
|
||
return HtmlMediaSource;
|
||
})(_videoJs2['default'].EventTarget);
|
||
|
||
exports['default'] = HtmlMediaSource;
|
||
module.exports = exports['default'];
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"./add-text-track-data":35,"./codec-utils":36,"./virtual-source-buffer":46,"global/document":2,"global/window":3}],43:[function(require,module,exports){
|
||
/**
|
||
* @file remove-cues-from-track.js
|
||
*/
|
||
|
||
/**
|
||
* Remove cues from a track on video.js.
|
||
*
|
||
* @param {Double} start start of where we should remove the cue
|
||
* @param {Double} end end of where the we should remove the cue
|
||
* @param {Object} track the text track to remove the cues from
|
||
* @private
|
||
*/
|
||
"use strict";
|
||
|
||
Object.defineProperty(exports, "__esModule", {
|
||
value: true
|
||
});
|
||
var removeCuesFromTrack = function removeCuesFromTrack(start, end, track) {
|
||
var i = undefined;
|
||
var cue = undefined;
|
||
|
||
if (!track) {
|
||
return;
|
||
}
|
||
|
||
if (!track.cues) {
|
||
return;
|
||
}
|
||
|
||
i = track.cues.length;
|
||
|
||
while (i--) {
|
||
cue = track.cues[i];
|
||
|
||
// Remove any overlapping cue
|
||
if (cue.startTime <= end && cue.endTime >= start) {
|
||
track.removeCue(cue);
|
||
}
|
||
}
|
||
};
|
||
|
||
exports["default"] = removeCuesFromTrack;
|
||
module.exports = exports["default"];
|
||
|
||
},{}],44:[function(require,module,exports){
|
||
/**
|
||
* @file transmuxer-worker.js
|
||
*/
|
||
|
||
/**
|
||
* videojs-contrib-media-sources
|
||
*
|
||
* Copyright (c) 2015 Brightcove
|
||
* All rights reserved.
|
||
*
|
||
* Handles communication between the browser-world and the mux.js
|
||
* transmuxer running inside of a WebWorker by exposing a simple
|
||
* message-based interface to a Transmuxer object.
|
||
*/
|
||
'use strict';
|
||
|
||
Object.defineProperty(exports, '__esModule', {
|
||
value: true
|
||
});
|
||
|
||
var _createClass = (function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ('value' in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; })();
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
|
||
|
||
var _globalWindow = require('global/window');
|
||
|
||
var _globalWindow2 = _interopRequireDefault(_globalWindow);
|
||
|
||
var _muxJsLibMp4 = require('mux.js/lib/mp4');
|
||
|
||
var _muxJsLibMp42 = _interopRequireDefault(_muxJsLibMp4);
|
||
|
||
/**
|
||
* Re-emits transmuxer events by converting them into messages to the
|
||
* world outside the worker.
|
||
*
|
||
* @param {Object} transmuxer the transmuxer to wire events on
|
||
* @private
|
||
*/
|
||
var wireTransmuxerEvents = function wireTransmuxerEvents(transmuxer) {
|
||
transmuxer.on('data', function (segment) {
|
||
// transfer ownership of the underlying ArrayBuffer
|
||
// instead of doing a copy to save memory
|
||
// ArrayBuffers are transferable but generic TypedArrays are not
|
||
// @link https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers#Passing_data_by_transferring_ownership_(transferable_objects)
|
||
var initArray = segment.initSegment;
|
||
|
||
segment.initSegment = {
|
||
data: initArray.buffer,
|
||
byteOffset: initArray.byteOffset,
|
||
byteLength: initArray.byteLength
|
||
};
|
||
|
||
var typedArray = segment.data;
|
||
|
||
segment.data = typedArray.buffer;
|
||
_globalWindow2['default'].postMessage({
|
||
action: 'data',
|
||
segment: segment,
|
||
byteOffset: typedArray.byteOffset,
|
||
byteLength: typedArray.byteLength
|
||
}, [segment.data]);
|
||
});
|
||
|
||
if (transmuxer.captionStream) {
|
||
transmuxer.captionStream.on('data', function (caption) {
|
||
_globalWindow2['default'].postMessage({
|
||
action: 'caption',
|
||
data: caption
|
||
});
|
||
});
|
||
}
|
||
|
||
transmuxer.on('done', function (data) {
|
||
_globalWindow2['default'].postMessage({ action: 'done' });
|
||
});
|
||
|
||
transmuxer.on('gopInfo', function (gopInfo) {
|
||
_globalWindow2['default'].postMessage({
|
||
action: 'gopInfo',
|
||
gopInfo: gopInfo
|
||
});
|
||
});
|
||
};
|
||
|
||
/**
|
||
* All incoming messages route through this hash. If no function exists
|
||
* to handle an incoming message, then we ignore the message.
|
||
*
|
||
* @class MessageHandlers
|
||
* @param {Object} options the options to initialize with
|
||
*/
|
||
|
||
var MessageHandlers = (function () {
|
||
function MessageHandlers(options) {
|
||
_classCallCheck(this, MessageHandlers);
|
||
|
||
this.options = options || {};
|
||
this.init();
|
||
}
|
||
|
||
/**
|
||
* Our web wroker interface so that things can talk to mux.js
|
||
* that will be running in a web worker. the scope is passed to this by
|
||
* webworkify.
|
||
*
|
||
* @param {Object} self the scope for the web worker
|
||
*/
|
||
|
||
/**
|
||
* initialize our web worker and wire all the events.
|
||
*/
|
||
|
||
_createClass(MessageHandlers, [{
|
||
key: 'init',
|
||
value: function init() {
|
||
if (this.transmuxer) {
|
||
this.transmuxer.dispose();
|
||
}
|
||
this.transmuxer = new _muxJsLibMp42['default'].Transmuxer(this.options);
|
||
wireTransmuxerEvents(this.transmuxer);
|
||
}
|
||
|
||
/**
|
||
* Adds data (a ts segment) to the start of the transmuxer pipeline for
|
||
* processing.
|
||
*
|
||
* @param {ArrayBuffer} data data to push into the muxer
|
||
*/
|
||
}, {
|
||
key: 'push',
|
||
value: function push(data) {
|
||
// Cast array buffer to correct type for transmuxer
|
||
var segment = new Uint8Array(data.data, data.byteOffset, data.byteLength);
|
||
|
||
this.transmuxer.push(segment);
|
||
}
|
||
|
||
/**
|
||
* Recreate the transmuxer so that the next segment added via `push`
|
||
* start with a fresh transmuxer.
|
||
*/
|
||
}, {
|
||
key: 'reset',
|
||
value: function reset() {
|
||
this.init();
|
||
}
|
||
|
||
/**
|
||
* Set the value that will be used as the `baseMediaDecodeTime` time for the
|
||
* next segment pushed in. Subsequent segments will have their `baseMediaDecodeTime`
|
||
* set relative to the first based on the PTS values.
|
||
*
|
||
* @param {Object} data used to set the timestamp offset in the muxer
|
||
*/
|
||
}, {
|
||
key: 'setTimestampOffset',
|
||
value: function setTimestampOffset(data) {
|
||
var timestampOffset = data.timestampOffset || 0;
|
||
|
||
this.transmuxer.setBaseMediaDecodeTime(Math.round(timestampOffset * 90000));
|
||
}
|
||
}, {
|
||
key: 'setAudioAppendStart',
|
||
value: function setAudioAppendStart(data) {
|
||
this.transmuxer.setAudioAppendStart(Math.ceil(data.appendStart * 90000));
|
||
}
|
||
|
||
/**
|
||
* Forces the pipeline to finish processing the last segment and emit it's
|
||
* results.
|
||
*
|
||
* @param {Object} data event data, not really used
|
||
*/
|
||
}, {
|
||
key: 'flush',
|
||
value: function flush(data) {
|
||
this.transmuxer.flush();
|
||
}
|
||
}, {
|
||
key: 'resetCaptions',
|
||
value: function resetCaptions() {
|
||
this.transmuxer.resetCaptions();
|
||
}
|
||
}, {
|
||
key: 'alignGopsWith',
|
||
value: function alignGopsWith(data) {
|
||
this.transmuxer.alignGopsWith(data.gopsToAlignWith.slice());
|
||
}
|
||
}]);
|
||
|
||
return MessageHandlers;
|
||
})();
|
||
|
||
var TransmuxerWorker = function TransmuxerWorker(self) {
|
||
self.onmessage = function (event) {
|
||
if (event.data.action === 'init' && event.data.options) {
|
||
this.messageHandlers = new MessageHandlers(event.data.options);
|
||
return;
|
||
}
|
||
|
||
if (!this.messageHandlers) {
|
||
this.messageHandlers = new MessageHandlers();
|
||
}
|
||
|
||
if (event.data && event.data.action && event.data.action !== 'init') {
|
||
if (this.messageHandlers[event.data.action]) {
|
||
this.messageHandlers[event.data.action](event.data);
|
||
}
|
||
}
|
||
};
|
||
};
|
||
|
||
exports['default'] = function (self) {
|
||
return new TransmuxerWorker(self);
|
||
};
|
||
|
||
module.exports = exports['default'];
|
||
|
||
},{"global/window":3,"mux.js/lib/mp4":24}],45:[function(require,module,exports){
|
||
(function (global){
|
||
/**
|
||
* @file videojs-contrib-media-sources.js
|
||
*/
|
||
'use strict';
|
||
|
||
Object.defineProperty(exports, '__esModule', {
|
||
value: true
|
||
});
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
var _globalWindow = require('global/window');
|
||
|
||
var _globalWindow2 = _interopRequireDefault(_globalWindow);
|
||
|
||
var _flashMediaSource = require('./flash-media-source');
|
||
|
||
var _flashMediaSource2 = _interopRequireDefault(_flashMediaSource);
|
||
|
||
var _htmlMediaSource = require('./html-media-source');
|
||
|
||
var _htmlMediaSource2 = _interopRequireDefault(_htmlMediaSource);
|
||
|
||
var _videoJs = (typeof window !== "undefined" ? window['videojs'] : typeof global !== "undefined" ? global['videojs'] : null);
|
||
|
||
var _videoJs2 = _interopRequireDefault(_videoJs);
|
||
|
||
var urlCount = 0;
|
||
|
||
// ------------
|
||
// Media Source
|
||
// ------------
|
||
|
||
var defaults = {
|
||
// how to determine the MediaSource implementation to use. There
|
||
// are three available modes:
|
||
// - auto: use native MediaSources where available and Flash
|
||
// everywhere else
|
||
// - html5: always use native MediaSources
|
||
// - flash: always use the Flash MediaSource polyfill
|
||
mode: 'auto'
|
||
};
|
||
|
||
// store references to the media sources so they can be connected
|
||
// to a video element (a swf object)
|
||
// TODO: can we store this somewhere local to this module?
|
||
_videoJs2['default'].mediaSources = {};
|
||
|
||
/**
|
||
* Provide a method for a swf object to notify JS that a
|
||
* media source is now open.
|
||
*
|
||
* @param {String} msObjectURL string referencing the MSE Object URL
|
||
* @param {String} swfId the swf id
|
||
*/
|
||
var open = function open(msObjectURL, swfId) {
|
||
var mediaSource = _videoJs2['default'].mediaSources[msObjectURL];
|
||
|
||
if (mediaSource) {
|
||
mediaSource.trigger({ type: 'sourceopen', swfId: swfId });
|
||
} else {
|
||
throw new Error('Media Source not found (Video.js)');
|
||
}
|
||
};
|
||
|
||
/**
|
||
* Check to see if the native MediaSource object exists and supports
|
||
* an MP4 container with both H.264 video and AAC-LC audio.
|
||
*
|
||
* @return {Boolean} if native media sources are supported
|
||
*/
|
||
var supportsNativeMediaSources = function supportsNativeMediaSources() {
|
||
return !!_globalWindow2['default'].MediaSource && !!_globalWindow2['default'].MediaSource.isTypeSupported && _globalWindow2['default'].MediaSource.isTypeSupported('video/mp4;codecs="avc1.4d400d,mp4a.40.2"');
|
||
};
|
||
|
||
/**
|
||
* An emulation of the MediaSource API so that we can support
|
||
* native and non-native functionality such as flash and
|
||
* video/mp2t videos. returns an instance of HtmlMediaSource or
|
||
* FlashMediaSource depending on what is supported and what options
|
||
* are passed in.
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/MediaSource/MediaSource
|
||
* @param {Object} options options to use during setup.
|
||
*/
|
||
var MediaSource = function MediaSource(options) {
|
||
var settings = _videoJs2['default'].mergeOptions(defaults, options);
|
||
|
||
this.MediaSource = {
|
||
open: open,
|
||
supportsNativeMediaSources: supportsNativeMediaSources
|
||
};
|
||
|
||
// determine whether HTML MediaSources should be used
|
||
if (settings.mode === 'html5' || settings.mode === 'auto' && supportsNativeMediaSources()) {
|
||
return new _htmlMediaSource2['default']();
|
||
} else if (_videoJs2['default'].getTech('Flash')) {
|
||
return new _flashMediaSource2['default']();
|
||
}
|
||
|
||
throw new Error('Cannot use Flash or Html5 to create a MediaSource for this video');
|
||
};
|
||
|
||
exports.MediaSource = MediaSource;
|
||
MediaSource.open = open;
|
||
MediaSource.supportsNativeMediaSources = supportsNativeMediaSources;
|
||
|
||
/**
|
||
* A wrapper around the native URL for our MSE object
|
||
* implementation, this object is exposed under videojs.URL
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/URL/URL
|
||
*/
|
||
var URL = {
|
||
/**
|
||
* A wrapper around the native createObjectURL for our objects.
|
||
* This function maps a native or emulated mediaSource to a blob
|
||
* url so that it can be loaded into video.js
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/URL/createObjectURL
|
||
* @param {MediaSource} object the object to create a blob url to
|
||
*/
|
||
createObjectURL: function createObjectURL(object) {
|
||
var objectUrlPrefix = 'blob:vjs-media-source/';
|
||
var url = undefined;
|
||
|
||
// use the native MediaSource to generate an object URL
|
||
if (object instanceof _htmlMediaSource2['default']) {
|
||
url = _globalWindow2['default'].URL.createObjectURL(object.nativeMediaSource_);
|
||
object.url_ = url;
|
||
return url;
|
||
}
|
||
// if the object isn't an emulated MediaSource, delegate to the
|
||
// native implementation
|
||
if (!(object instanceof _flashMediaSource2['default'])) {
|
||
url = _globalWindow2['default'].URL.createObjectURL(object);
|
||
object.url_ = url;
|
||
return url;
|
||
}
|
||
|
||
// build a URL that can be used to map back to the emulated
|
||
// MediaSource
|
||
url = objectUrlPrefix + urlCount;
|
||
|
||
urlCount++;
|
||
|
||
// setup the mapping back to object
|
||
_videoJs2['default'].mediaSources[url] = object;
|
||
|
||
return url;
|
||
}
|
||
};
|
||
|
||
exports.URL = URL;
|
||
_videoJs2['default'].MediaSource = MediaSource;
|
||
_videoJs2['default'].URL = URL;
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"./flash-media-source":39,"./html-media-source":42,"global/window":3}],46:[function(require,module,exports){
|
||
(function (global){
|
||
/**
|
||
* @file virtual-source-buffer.js
|
||
*/
|
||
'use strict';
|
||
|
||
Object.defineProperty(exports, '__esModule', {
|
||
value: true
|
||
});
|
||
|
||
var _createClass = (function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ('value' in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; })();
|
||
|
||
var _get = function get(_x, _x2, _x3) { var _again = true; _function: while (_again) { var object = _x, property = _x2, receiver = _x3; _again = false; if (object === null) object = Function.prototype; var desc = Object.getOwnPropertyDescriptor(object, property); if (desc === undefined) { var parent = Object.getPrototypeOf(object); if (parent === null) { return undefined; } else { _x = parent; _x2 = property; _x3 = receiver; _again = true; desc = parent = undefined; continue _function; } } else if ('value' in desc) { return desc.value; } else { var getter = desc.get; if (getter === undefined) { return undefined; } return getter.call(receiver); } } };
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
|
||
|
||
function _inherits(subClass, superClass) { if (typeof superClass !== 'function' && superClass !== null) { throw new TypeError('Super expression must either be null or a function, not ' + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
|
||
|
||
var _videoJs = (typeof window !== "undefined" ? window['videojs'] : typeof global !== "undefined" ? global['videojs'] : null);
|
||
|
||
var _videoJs2 = _interopRequireDefault(_videoJs);
|
||
|
||
var _createTextTracksIfNecessary = require('./create-text-tracks-if-necessary');
|
||
|
||
var _createTextTracksIfNecessary2 = _interopRequireDefault(_createTextTracksIfNecessary);
|
||
|
||
var _removeCuesFromTrack = require('./remove-cues-from-track');
|
||
|
||
var _removeCuesFromTrack2 = _interopRequireDefault(_removeCuesFromTrack);
|
||
|
||
var _addTextTrackData = require('./add-text-track-data');
|
||
|
||
var _webwackify = require('webwackify');
|
||
|
||
var _webwackify2 = _interopRequireDefault(_webwackify);
|
||
|
||
var _transmuxerWorker = require('./transmuxer-worker');
|
||
|
||
var _transmuxerWorker2 = _interopRequireDefault(_transmuxerWorker);
|
||
|
||
var _codecUtils = require('./codec-utils');
|
||
|
||
var resolveTransmuxWorker = function resolveTransmuxWorker() {
|
||
var result = undefined;
|
||
|
||
try {
|
||
result = require.resolve('./transmuxer-worker');
|
||
} catch (e) {
|
||
// no result
|
||
}
|
||
|
||
return result;
|
||
};
|
||
|
||
// We create a wrapper around the SourceBuffer so that we can manage the
|
||
// state of the `updating` property manually. We have to do this because
|
||
// Firefox changes `updating` to false long before triggering `updateend`
|
||
// events and that was causing strange problems in videojs-contrib-hls
|
||
var makeWrappedSourceBuffer = function makeWrappedSourceBuffer(mediaSource, mimeType) {
|
||
var sourceBuffer = mediaSource.addSourceBuffer(mimeType);
|
||
var wrapper = Object.create(null);
|
||
|
||
wrapper.updating = false;
|
||
wrapper.realBuffer_ = sourceBuffer;
|
||
|
||
var _loop = function (key) {
|
||
if (typeof sourceBuffer[key] === 'function') {
|
||
wrapper[key] = function () {
|
||
return sourceBuffer[key].apply(sourceBuffer, arguments);
|
||
};
|
||
} else if (typeof wrapper[key] === 'undefined') {
|
||
Object.defineProperty(wrapper, key, {
|
||
get: function get() {
|
||
return sourceBuffer[key];
|
||
},
|
||
set: function set(v) {
|
||
return sourceBuffer[key] = v;
|
||
}
|
||
});
|
||
}
|
||
};
|
||
|
||
for (var key in sourceBuffer) {
|
||
_loop(key);
|
||
}
|
||
|
||
return wrapper;
|
||
};
|
||
|
||
/**
|
||
* Returns a list of gops in the buffer that have a pts value of 3 seconds or more in
|
||
* front of current time.
|
||
*
|
||
* @param {Array} buffer
|
||
* The current buffer of gop information
|
||
* @param {Player} player
|
||
* The player instance
|
||
* @param {Double} mapping
|
||
* Offset to map display time to stream presentation time
|
||
* @return {Array}
|
||
* List of gops considered safe to append over
|
||
*/
|
||
var gopsSafeToAlignWith = function gopsSafeToAlignWith(buffer, player, mapping) {
|
||
if (!player || !buffer.length) {
|
||
return [];
|
||
}
|
||
|
||
// pts value for current time + 3 seconds to give a bit more wiggle room
|
||
var currentTimePts = Math.ceil((player.currentTime() - mapping + 3) * 90000);
|
||
|
||
var i = undefined;
|
||
|
||
for (i = 0; i < buffer.length; i++) {
|
||
if (buffer[i].pts > currentTimePts) {
|
||
break;
|
||
}
|
||
}
|
||
|
||
return buffer.slice(i);
|
||
};
|
||
|
||
exports.gopsSafeToAlignWith = gopsSafeToAlignWith;
|
||
/**
|
||
* Appends gop information (timing and byteLength) received by the transmuxer for the
|
||
* gops appended in the last call to appendBuffer
|
||
*
|
||
* @param {Array} buffer
|
||
* The current buffer of gop information
|
||
* @param {Array} gops
|
||
* List of new gop information
|
||
* @param {boolean} replace
|
||
* If true, replace the buffer with the new gop information. If false, append the
|
||
* new gop information to the buffer in the right location of time.
|
||
* @return {Array}
|
||
* Updated list of gop information
|
||
*/
|
||
var updateGopBuffer = function updateGopBuffer(buffer, gops, replace) {
|
||
if (!gops.length) {
|
||
return buffer;
|
||
}
|
||
|
||
if (replace) {
|
||
// If we are in safe append mode, then completely overwrite the gop buffer
|
||
// with the most recent appeneded data. This will make sure that when appending
|
||
// future segments, we only try to align with gops that are both ahead of current
|
||
// time and in the last segment appended.
|
||
return gops.slice();
|
||
}
|
||
|
||
var start = gops[0].pts;
|
||
|
||
var i = 0;
|
||
|
||
for (i; i < buffer.length; i++) {
|
||
if (buffer[i].pts >= start) {
|
||
break;
|
||
}
|
||
}
|
||
|
||
return buffer.slice(0, i).concat(gops);
|
||
};
|
||
|
||
exports.updateGopBuffer = updateGopBuffer;
|
||
/**
|
||
* Removes gop information in buffer that overlaps with provided start and end
|
||
*
|
||
* @param {Array} buffer
|
||
* The current buffer of gop information
|
||
* @param {Double} start
|
||
* position to start the remove at
|
||
* @param {Double} end
|
||
* position to end the remove at
|
||
* @param {Double} mapping
|
||
* Offset to map display time to stream presentation time
|
||
*/
|
||
var removeGopBuffer = function removeGopBuffer(buffer, start, end, mapping) {
|
||
var startPts = Math.ceil((start - mapping) * 90000);
|
||
var endPts = Math.ceil((end - mapping) * 90000);
|
||
var updatedBuffer = buffer.slice();
|
||
|
||
var i = buffer.length;
|
||
|
||
while (i--) {
|
||
if (buffer[i].pts <= endPts) {
|
||
break;
|
||
}
|
||
}
|
||
|
||
if (i === -1) {
|
||
// no removal because end of remove range is before start of buffer
|
||
return updatedBuffer;
|
||
}
|
||
|
||
var j = i + 1;
|
||
|
||
while (j--) {
|
||
if (buffer[j].pts <= startPts) {
|
||
break;
|
||
}
|
||
}
|
||
|
||
// clamp remove range start to 0 index
|
||
j = Math.max(j, 0);
|
||
|
||
updatedBuffer.splice(j, i - j + 1);
|
||
|
||
return updatedBuffer;
|
||
};
|
||
|
||
exports.removeGopBuffer = removeGopBuffer;
|
||
/**
|
||
* VirtualSourceBuffers exist so that we can transmux non native formats
|
||
* into a native format, but keep the same api as a native source buffer.
|
||
* It creates a transmuxer, that works in its own thread (a web worker) and
|
||
* that transmuxer muxes the data into a native format. VirtualSourceBuffer will
|
||
* then send all of that data to the naive sourcebuffer so that it is
|
||
* indestinguishable from a natively supported format.
|
||
*
|
||
* @param {HtmlMediaSource} mediaSource the parent mediaSource
|
||
* @param {Array} codecs array of codecs that we will be dealing with
|
||
* @class VirtualSourceBuffer
|
||
* @extends video.js.EventTarget
|
||
*/
|
||
|
||
var VirtualSourceBuffer = (function (_videojs$EventTarget) {
|
||
_inherits(VirtualSourceBuffer, _videojs$EventTarget);
|
||
|
||
function VirtualSourceBuffer(mediaSource, codecs) {
|
||
var _this = this;
|
||
|
||
_classCallCheck(this, VirtualSourceBuffer);
|
||
|
||
_get(Object.getPrototypeOf(VirtualSourceBuffer.prototype), 'constructor', this).call(this, _videoJs2['default'].EventTarget);
|
||
this.timestampOffset_ = 0;
|
||
this.pendingBuffers_ = [];
|
||
this.bufferUpdating_ = false;
|
||
|
||
this.mediaSource_ = mediaSource;
|
||
this.codecs_ = codecs;
|
||
this.audioCodec_ = null;
|
||
this.videoCodec_ = null;
|
||
this.audioDisabled_ = false;
|
||
this.appendAudioInitSegment_ = true;
|
||
this.gopBuffer_ = [];
|
||
this.timeMapping_ = 0;
|
||
this.safeAppend_ = _videoJs2['default'].browser.IE_VERSION >= 11;
|
||
|
||
var options = {
|
||
remux: false,
|
||
alignGopsAtEnd: this.safeAppend_
|
||
};
|
||
|
||
this.codecs_.forEach(function (codec) {
|
||
if ((0, _codecUtils.isAudioCodec)(codec)) {
|
||
_this.audioCodec_ = codec;
|
||
} else if ((0, _codecUtils.isVideoCodec)(codec)) {
|
||
_this.videoCodec_ = codec;
|
||
}
|
||
});
|
||
|
||
// append muxed segments to their respective native buffers as
|
||
// soon as they are available
|
||
this.transmuxer_ = (0, _webwackify2['default'])(_transmuxerWorker2['default'], resolveTransmuxWorker());
|
||
this.transmuxer_.postMessage({ action: 'init', options: options });
|
||
|
||
this.transmuxer_.onmessage = function (event) {
|
||
if (event.data.action === 'data') {
|
||
return _this.data_(event);
|
||
}
|
||
|
||
if (event.data.action === 'done') {
|
||
return _this.done_(event);
|
||
}
|
||
|
||
if (event.data.action === 'gopInfo') {
|
||
return _this.appendGopInfo_(event);
|
||
}
|
||
};
|
||
|
||
// this timestampOffset is a property with the side-effect of resetting
|
||
// baseMediaDecodeTime in the transmuxer on the setter
|
||
Object.defineProperty(this, 'timestampOffset', {
|
||
get: function get() {
|
||
return this.timestampOffset_;
|
||
},
|
||
set: function set(val) {
|
||
if (typeof val === 'number' && val >= 0) {
|
||
this.timestampOffset_ = val;
|
||
this.appendAudioInitSegment_ = true;
|
||
|
||
// reset gop buffer on timestampoffset as this signals a change in timeline
|
||
this.gopBuffer_.length = 0;
|
||
this.timeMapping_ = 0;
|
||
|
||
// We have to tell the transmuxer to set the baseMediaDecodeTime to
|
||
// the desired timestampOffset for the next segment
|
||
this.transmuxer_.postMessage({
|
||
action: 'setTimestampOffset',
|
||
timestampOffset: val
|
||
});
|
||
}
|
||
}
|
||
});
|
||
|
||
// setting the append window affects both source buffers
|
||
Object.defineProperty(this, 'appendWindowStart', {
|
||
get: function get() {
|
||
return (this.videoBuffer_ || this.audioBuffer_).appendWindowStart;
|
||
},
|
||
set: function set(start) {
|
||
if (this.videoBuffer_) {
|
||
this.videoBuffer_.appendWindowStart = start;
|
||
}
|
||
if (this.audioBuffer_) {
|
||
this.audioBuffer_.appendWindowStart = start;
|
||
}
|
||
}
|
||
});
|
||
|
||
// this buffer is "updating" if either of its native buffers are
|
||
Object.defineProperty(this, 'updating', {
|
||
get: function get() {
|
||
return !!(this.bufferUpdating_ || !this.audioDisabled_ && this.audioBuffer_ && this.audioBuffer_.updating || this.videoBuffer_ && this.videoBuffer_.updating);
|
||
}
|
||
});
|
||
|
||
// the buffered property is the intersection of the buffered
|
||
// ranges of the native source buffers
|
||
Object.defineProperty(this, 'buffered', {
|
||
get: function get() {
|
||
var start = null;
|
||
var end = null;
|
||
var arity = 0;
|
||
var extents = [];
|
||
var ranges = [];
|
||
|
||
// neither buffer has been created yet
|
||
if (!this.videoBuffer_ && !this.audioBuffer_) {
|
||
return _videoJs2['default'].createTimeRange();
|
||
}
|
||
|
||
// only one buffer is configured
|
||
if (!this.videoBuffer_) {
|
||
return this.audioBuffer_.buffered;
|
||
}
|
||
if (!this.audioBuffer_) {
|
||
return this.videoBuffer_.buffered;
|
||
}
|
||
|
||
// both buffers are configured
|
||
if (this.audioDisabled_) {
|
||
return this.videoBuffer_.buffered;
|
||
}
|
||
|
||
// both buffers are empty
|
||
if (this.videoBuffer_.buffered.length === 0 && this.audioBuffer_.buffered.length === 0) {
|
||
return _videoJs2['default'].createTimeRange();
|
||
}
|
||
|
||
// Handle the case where we have both buffers and create an
|
||
// intersection of the two
|
||
var videoBuffered = this.videoBuffer_.buffered;
|
||
var audioBuffered = this.audioBuffer_.buffered;
|
||
var count = videoBuffered.length;
|
||
|
||
// A) Gather up all start and end times
|
||
while (count--) {
|
||
extents.push({ time: videoBuffered.start(count), type: 'start' });
|
||
extents.push({ time: videoBuffered.end(count), type: 'end' });
|
||
}
|
||
count = audioBuffered.length;
|
||
while (count--) {
|
||
extents.push({ time: audioBuffered.start(count), type: 'start' });
|
||
extents.push({ time: audioBuffered.end(count), type: 'end' });
|
||
}
|
||
// B) Sort them by time
|
||
extents.sort(function (a, b) {
|
||
return a.time - b.time;
|
||
});
|
||
|
||
// C) Go along one by one incrementing arity for start and decrementing
|
||
// arity for ends
|
||
for (count = 0; count < extents.length; count++) {
|
||
if (extents[count].type === 'start') {
|
||
arity++;
|
||
|
||
// D) If arity is ever incremented to 2 we are entering an
|
||
// overlapping range
|
||
if (arity === 2) {
|
||
start = extents[count].time;
|
||
}
|
||
} else if (extents[count].type === 'end') {
|
||
arity--;
|
||
|
||
// E) If arity is ever decremented to 1 we leaving an
|
||
// overlapping range
|
||
if (arity === 1) {
|
||
end = extents[count].time;
|
||
}
|
||
}
|
||
|
||
// F) Record overlapping ranges
|
||
if (start !== null && end !== null) {
|
||
ranges.push([start, end]);
|
||
start = null;
|
||
end = null;
|
||
}
|
||
}
|
||
|
||
return _videoJs2['default'].createTimeRanges(ranges);
|
||
}
|
||
});
|
||
}
|
||
|
||
/**
|
||
* When we get a data event from the transmuxer
|
||
* we call this function and handle the data that
|
||
* was sent to us
|
||
*
|
||
* @private
|
||
* @param {Event} event the data event from the transmuxer
|
||
*/
|
||
|
||
_createClass(VirtualSourceBuffer, [{
|
||
key: 'data_',
|
||
value: function data_(event) {
|
||
var segment = event.data.segment;
|
||
|
||
// Cast ArrayBuffer to TypedArray
|
||
segment.data = new Uint8Array(segment.data, event.data.byteOffset, event.data.byteLength);
|
||
|
||
segment.initSegment = new Uint8Array(segment.initSegment.data, segment.initSegment.byteOffset, segment.initSegment.byteLength);
|
||
|
||
(0, _createTextTracksIfNecessary2['default'])(this, this.mediaSource_, segment);
|
||
|
||
// Add the segments to the pendingBuffers array
|
||
this.pendingBuffers_.push(segment);
|
||
return;
|
||
}
|
||
|
||
/**
|
||
* When we get a done event from the transmuxer
|
||
* we call this function and we process all
|
||
* of the pending data that we have been saving in the
|
||
* data_ function
|
||
*
|
||
* @private
|
||
* @param {Event} event the done event from the transmuxer
|
||
*/
|
||
}, {
|
||
key: 'done_',
|
||
value: function done_(event) {
|
||
// Don't process and append data if the mediaSource is closed
|
||
if (this.mediaSource_.readyState === 'closed') {
|
||
this.pendingBuffers_.length = 0;
|
||
return;
|
||
}
|
||
|
||
// All buffers should have been flushed from the muxer
|
||
// start processing anything we have received
|
||
this.processPendingSegments_();
|
||
return;
|
||
}
|
||
|
||
/**
|
||
* Create our internal native audio/video source buffers and add
|
||
* event handlers to them with the following conditions:
|
||
* 1. they do not already exist on the mediaSource
|
||
* 2. this VSB has a codec for them
|
||
*
|
||
* @private
|
||
*/
|
||
}, {
|
||
key: 'createRealSourceBuffers_',
|
||
value: function createRealSourceBuffers_() {
|
||
var _this2 = this;
|
||
|
||
var types = ['audio', 'video'];
|
||
|
||
types.forEach(function (type) {
|
||
// Don't create a SourceBuffer of this type if we don't have a
|
||
// codec for it
|
||
if (!_this2[type + 'Codec_']) {
|
||
return;
|
||
}
|
||
|
||
// Do nothing if a SourceBuffer of this type already exists
|
||
if (_this2[type + 'Buffer_']) {
|
||
return;
|
||
}
|
||
|
||
var buffer = null;
|
||
|
||
// If the mediasource already has a SourceBuffer for the codec
|
||
// use that
|
||
if (_this2.mediaSource_[type + 'Buffer_']) {
|
||
buffer = _this2.mediaSource_[type + 'Buffer_'];
|
||
// In multiple audio track cases, the audio source buffer is disabled
|
||
// on the main VirtualSourceBuffer by the HTMLMediaSource much earlier
|
||
// than createRealSourceBuffers_ is called to create the second
|
||
// VirtualSourceBuffer because that happens as a side-effect of
|
||
// videojs-contrib-hls starting the audioSegmentLoader. As a result,
|
||
// the audioBuffer is essentially "ownerless" and no one will toggle
|
||
// the `updating` state back to false once the `updateend` event is received
|
||
//
|
||
// Setting `updating` to false manually will work around this
|
||
// situation and allow work to continue
|
||
buffer.updating = false;
|
||
} else {
|
||
var codecProperty = type + 'Codec_';
|
||
var mimeType = type + '/mp4;codecs="' + _this2[codecProperty] + '"';
|
||
|
||
buffer = makeWrappedSourceBuffer(_this2.mediaSource_.nativeMediaSource_, mimeType);
|
||
|
||
_this2.mediaSource_[type + 'Buffer_'] = buffer;
|
||
}
|
||
|
||
_this2[type + 'Buffer_'] = buffer;
|
||
|
||
// Wire up the events to the SourceBuffer
|
||
['update', 'updatestart', 'updateend'].forEach(function (event) {
|
||
buffer.addEventListener(event, function () {
|
||
// if audio is disabled
|
||
if (type === 'audio' && _this2.audioDisabled_) {
|
||
return;
|
||
}
|
||
|
||
if (event === 'updateend') {
|
||
_this2[type + 'Buffer_'].updating = false;
|
||
}
|
||
|
||
var shouldTrigger = types.every(function (t) {
|
||
// skip checking audio's updating status if audio
|
||
// is not enabled
|
||
if (t === 'audio' && _this2.audioDisabled_) {
|
||
return true;
|
||
}
|
||
// if the other type if updating we don't trigger
|
||
if (type !== t && _this2[t + 'Buffer_'] && _this2[t + 'Buffer_'].updating) {
|
||
return false;
|
||
}
|
||
return true;
|
||
});
|
||
|
||
if (shouldTrigger) {
|
||
return _this2.trigger(event);
|
||
}
|
||
});
|
||
});
|
||
});
|
||
}
|
||
|
||
/**
|
||
* Emulate the native mediasource function, but our function will
|
||
* send all of the proposed segments to the transmuxer so that we
|
||
* can transmux them before we append them to our internal
|
||
* native source buffers in the correct format.
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/SourceBuffer/appendBuffer
|
||
* @param {Uint8Array} segment the segment to append to the buffer
|
||
*/
|
||
}, {
|
||
key: 'appendBuffer',
|
||
value: function appendBuffer(segment) {
|
||
// Start the internal "updating" state
|
||
this.bufferUpdating_ = true;
|
||
|
||
if (this.audioBuffer_ && this.audioBuffer_.buffered.length) {
|
||
var audioBuffered = this.audioBuffer_.buffered;
|
||
|
||
this.transmuxer_.postMessage({
|
||
action: 'setAudioAppendStart',
|
||
appendStart: audioBuffered.end(audioBuffered.length - 1)
|
||
});
|
||
}
|
||
|
||
if (this.videoBuffer_) {
|
||
this.transmuxer_.postMessage({
|
||
action: 'alignGopsWith',
|
||
gopsToAlignWith: gopsSafeToAlignWith(this.gopBuffer_, this.mediaSource_.player_, this.timeMapping_)
|
||
});
|
||
}
|
||
|
||
this.transmuxer_.postMessage({
|
||
action: 'push',
|
||
// Send the typed-array of data as an ArrayBuffer so that
|
||
// it can be sent as a "Transferable" and avoid the costly
|
||
// memory copy
|
||
data: segment.buffer,
|
||
|
||
// To recreate the original typed-array, we need information
|
||
// about what portion of the ArrayBuffer it was a view into
|
||
byteOffset: segment.byteOffset,
|
||
byteLength: segment.byteLength
|
||
}, [segment.buffer]);
|
||
this.transmuxer_.postMessage({ action: 'flush' });
|
||
}
|
||
|
||
/**
|
||
* Appends gop information (timing and byteLength) received by the transmuxer for the
|
||
* gops appended in the last call to appendBuffer
|
||
*
|
||
* @param {Event} event
|
||
* The gopInfo event from the transmuxer
|
||
* @param {Array} event.data.gopInfo
|
||
* List of gop info to append
|
||
*/
|
||
}, {
|
||
key: 'appendGopInfo_',
|
||
value: function appendGopInfo_(event) {
|
||
this.gopBuffer_ = updateGopBuffer(this.gopBuffer_, event.data.gopInfo, this.safeAppend_);
|
||
}
|
||
|
||
/**
|
||
* Emulate the native mediasource function and remove parts
|
||
* of the buffer from any of our internal buffers that exist
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/SourceBuffer/remove
|
||
* @param {Double} start position to start the remove at
|
||
* @param {Double} end position to end the remove at
|
||
*/
|
||
}, {
|
||
key: 'remove',
|
||
value: function remove(start, end) {
|
||
if (this.videoBuffer_) {
|
||
this.videoBuffer_.updating = true;
|
||
this.videoBuffer_.remove(start, end);
|
||
this.gopBuffer_ = removeGopBuffer(this.gopBuffer_, start, end, this.timeMapping_);
|
||
}
|
||
if (!this.audioDisabled_ && this.audioBuffer_) {
|
||
this.audioBuffer_.updating = true;
|
||
this.audioBuffer_.remove(start, end);
|
||
}
|
||
|
||
// Remove Metadata Cues (id3)
|
||
(0, _removeCuesFromTrack2['default'])(start, end, this.metadataTrack_);
|
||
|
||
// Remove Any Captions
|
||
if (this.inbandTextTracks_) {
|
||
for (var track in this.inbandTextTracks_) {
|
||
(0, _removeCuesFromTrack2['default'])(start, end, this.inbandTextTracks_[track]);
|
||
}
|
||
}
|
||
}
|
||
|
||
/**
|
||
* Process any segments that the muxer has output
|
||
* Concatenate segments together based on type and append them into
|
||
* their respective sourceBuffers
|
||
*
|
||
* @private
|
||
*/
|
||
}, {
|
||
key: 'processPendingSegments_',
|
||
value: function processPendingSegments_() {
|
||
var sortedSegments = {
|
||
video: {
|
||
segments: [],
|
||
bytes: 0
|
||
},
|
||
audio: {
|
||
segments: [],
|
||
bytes: 0
|
||
},
|
||
captions: [],
|
||
metadata: []
|
||
};
|
||
|
||
// Sort segments into separate video/audio arrays and
|
||
// keep track of their total byte lengths
|
||
sortedSegments = this.pendingBuffers_.reduce(function (segmentObj, segment) {
|
||
var type = segment.type;
|
||
var data = segment.data;
|
||
var initSegment = segment.initSegment;
|
||
|
||
segmentObj[type].segments.push(data);
|
||
segmentObj[type].bytes += data.byteLength;
|
||
|
||
segmentObj[type].initSegment = initSegment;
|
||
|
||
// Gather any captions into a single array
|
||
if (segment.captions) {
|
||
segmentObj.captions = segmentObj.captions.concat(segment.captions);
|
||
}
|
||
|
||
if (segment.info) {
|
||
segmentObj[type].info = segment.info;
|
||
}
|
||
|
||
// Gather any metadata into a single array
|
||
if (segment.metadata) {
|
||
segmentObj.metadata = segmentObj.metadata.concat(segment.metadata);
|
||
}
|
||
|
||
return segmentObj;
|
||
}, sortedSegments);
|
||
|
||
// Create the real source buffers if they don't exist by now since we
|
||
// finally are sure what tracks are contained in the source
|
||
if (!this.videoBuffer_ && !this.audioBuffer_) {
|
||
// Remove any codecs that may have been specified by default but
|
||
// are no longer applicable now
|
||
if (sortedSegments.video.bytes === 0) {
|
||
this.videoCodec_ = null;
|
||
}
|
||
if (sortedSegments.audio.bytes === 0) {
|
||
this.audioCodec_ = null;
|
||
}
|
||
|
||
this.createRealSourceBuffers_();
|
||
}
|
||
|
||
if (sortedSegments.audio.info) {
|
||
this.mediaSource_.trigger({ type: 'audioinfo', info: sortedSegments.audio.info });
|
||
}
|
||
if (sortedSegments.video.info) {
|
||
this.mediaSource_.trigger({ type: 'videoinfo', info: sortedSegments.video.info });
|
||
}
|
||
|
||
if (this.appendAudioInitSegment_) {
|
||
if (!this.audioDisabled_ && this.audioBuffer_) {
|
||
sortedSegments.audio.segments.unshift(sortedSegments.audio.initSegment);
|
||
sortedSegments.audio.bytes += sortedSegments.audio.initSegment.byteLength;
|
||
}
|
||
this.appendAudioInitSegment_ = false;
|
||
}
|
||
|
||
var triggerUpdateend = false;
|
||
|
||
// Merge multiple video and audio segments into one and append
|
||
if (this.videoBuffer_ && sortedSegments.video.bytes) {
|
||
sortedSegments.video.segments.unshift(sortedSegments.video.initSegment);
|
||
sortedSegments.video.bytes += sortedSegments.video.initSegment.byteLength;
|
||
this.concatAndAppendSegments_(sortedSegments.video, this.videoBuffer_);
|
||
// TODO: are video tracks the only ones with text tracks?
|
||
(0, _addTextTrackData.addTextTrackData)(this, sortedSegments.captions, sortedSegments.metadata);
|
||
} else if (this.videoBuffer_ && (this.audioDisabled_ || !this.audioBuffer_)) {
|
||
// The transmuxer did not return any bytes of video, meaning it was all trimmed
|
||
// for gop alignment. Since we have a video buffer and audio is disabled, updateend
|
||
// will never be triggered by this source buffer, which will cause contrib-hls
|
||
// to be stuck forever waiting for updateend. If audio is not disabled, updateend
|
||
// will be triggered by the audio buffer, which will be sent upwards since the video
|
||
// buffer will not be in an updating state.
|
||
triggerUpdateend = true;
|
||
}
|
||
|
||
if (!this.audioDisabled_ && this.audioBuffer_) {
|
||
this.concatAndAppendSegments_(sortedSegments.audio, this.audioBuffer_);
|
||
}
|
||
|
||
this.pendingBuffers_.length = 0;
|
||
|
||
if (triggerUpdateend) {
|
||
this.trigger('updateend');
|
||
}
|
||
|
||
// We are no longer in the internal "updating" state
|
||
this.bufferUpdating_ = false;
|
||
}
|
||
|
||
/**
|
||
* Combine all segments into a single Uint8Array and then append them
|
||
* to the destination buffer
|
||
*
|
||
* @param {Object} segmentObj
|
||
* @param {SourceBuffer} destinationBuffer native source buffer to append data to
|
||
* @private
|
||
*/
|
||
}, {
|
||
key: 'concatAndAppendSegments_',
|
||
value: function concatAndAppendSegments_(segmentObj, destinationBuffer) {
|
||
var offset = 0;
|
||
var tempBuffer = undefined;
|
||
|
||
if (segmentObj.bytes) {
|
||
tempBuffer = new Uint8Array(segmentObj.bytes);
|
||
|
||
// Combine the individual segments into one large typed-array
|
||
segmentObj.segments.forEach(function (segment) {
|
||
tempBuffer.set(segment, offset);
|
||
offset += segment.byteLength;
|
||
});
|
||
|
||
try {
|
||
destinationBuffer.updating = true;
|
||
destinationBuffer.appendBuffer(tempBuffer);
|
||
} catch (error) {
|
||
if (this.mediaSource_.player_) {
|
||
this.mediaSource_.player_.error({
|
||
code: -3,
|
||
type: 'APPEND_BUFFER_ERR',
|
||
message: error.message,
|
||
originalError: error
|
||
});
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
/**
|
||
* Emulate the native mediasource function. abort any soureBuffer
|
||
* actions and throw out any un-appended data.
|
||
*
|
||
* @link https://developer.mozilla.org/en-US/docs/Web/API/SourceBuffer/abort
|
||
*/
|
||
}, {
|
||
key: 'abort',
|
||
value: function abort() {
|
||
if (this.videoBuffer_) {
|
||
this.videoBuffer_.abort();
|
||
}
|
||
if (!this.audioDisabled_ && this.audioBuffer_) {
|
||
this.audioBuffer_.abort();
|
||
}
|
||
if (this.transmuxer_) {
|
||
this.transmuxer_.postMessage({ action: 'reset' });
|
||
}
|
||
this.pendingBuffers_.length = 0;
|
||
this.bufferUpdating_ = false;
|
||
}
|
||
}]);
|
||
|
||
return VirtualSourceBuffer;
|
||
})(_videoJs2['default'].EventTarget);
|
||
|
||
exports['default'] = VirtualSourceBuffer;
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"./add-text-track-data":35,"./codec-utils":36,"./create-text-tracks-if-necessary":37,"./remove-cues-from-track":43,"./transmuxer-worker":44,"webwackify":34}],47:[function(require,module,exports){
|
||
(function (global){
|
||
'use strict';
|
||
|
||
var _createClass = (function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ('value' in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; })();
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
|
||
|
||
var _qunit = (typeof window !== "undefined" ? window['QUnit'] : typeof global !== "undefined" ? global['QUnit'] : null);
|
||
|
||
var _qunit2 = _interopRequireDefault(_qunit);
|
||
|
||
var _srcAddTextTrackData = require('../src/add-text-track-data');
|
||
|
||
var equal = _qunit2['default'].equal;
|
||
var _module = _qunit2['default'].module;
|
||
var test = _qunit2['default'].test;
|
||
|
||
var MockTextTrack = (function () {
|
||
function MockTextTrack() {
|
||
_classCallCheck(this, MockTextTrack);
|
||
|
||
this.cues = [];
|
||
}
|
||
|
||
_createClass(MockTextTrack, [{
|
||
key: 'addCue',
|
||
value: function addCue(cue) {
|
||
this.cues.push(cue);
|
||
}
|
||
}]);
|
||
|
||
return MockTextTrack;
|
||
})();
|
||
|
||
_module('Text Track Data', {
|
||
beforeEach: function beforeEach() {
|
||
this.sourceHandler = {
|
||
inbandTextTracks_: {
|
||
CC1: new MockTextTrack(),
|
||
CC2: new MockTextTrack(),
|
||
CC3: new MockTextTrack(),
|
||
CC4: new MockTextTrack()
|
||
},
|
||
metadataTrack_: new MockTextTrack(),
|
||
mediaSource_: {
|
||
duration: NaN
|
||
},
|
||
timestampOffset: 0
|
||
};
|
||
}
|
||
});
|
||
|
||
test('does nothing if no cues are specified', function () {
|
||
(0, _srcAddTextTrackData.addTextTrackData)(this.sourceHandler, [], []);
|
||
equal(this.sourceHandler.inbandTextTracks_.CC1.cues.length, 0, 'added no 608 cues');
|
||
equal(this.sourceHandler.metadataTrack_.cues.length, 0, 'added no metadata cues');
|
||
});
|
||
|
||
test('creates cues for 608 captions with "stream" property in ccX', function () {
|
||
(0, _srcAddTextTrackData.addTextTrackData)(this.sourceHandler, [{
|
||
startTime: 0,
|
||
endTime: 1,
|
||
text: 'CC1 text',
|
||
stream: 'CC1'
|
||
}, {
|
||
startTime: 0,
|
||
endTime: 1,
|
||
text: 'CC2 text',
|
||
stream: 'CC2'
|
||
}, {
|
||
startTime: 0,
|
||
endTime: 1,
|
||
text: 'CC3 text',
|
||
stream: 'CC3'
|
||
}, {
|
||
startTime: 0,
|
||
endTime: 1,
|
||
text: 'CC4 text',
|
||
stream: 'CC4'
|
||
}], []);
|
||
equal(this.sourceHandler.inbandTextTracks_.CC1.cues.length, 1, 'added one 608 cue to CC1');
|
||
equal(this.sourceHandler.inbandTextTracks_.CC2.cues.length, 1, 'added one 608 cue to CC2');
|
||
equal(this.sourceHandler.inbandTextTracks_.CC3.cues.length, 1, 'added one 608 cue to CC3');
|
||
equal(this.sourceHandler.inbandTextTracks_.CC4.cues.length, 1, 'added one 608 cue to CC4');
|
||
equal(this.sourceHandler.metadataTrack_.cues.length, 0, 'added no metadata cues');
|
||
});
|
||
|
||
test('creates cues for timed metadata', function () {
|
||
(0, _srcAddTextTrackData.addTextTrackData)(this.sourceHandler, [], [{
|
||
cueTime: 1,
|
||
frames: [{}]
|
||
}]);
|
||
equal(this.sourceHandler.inbandTextTracks_.CC1.cues.length, 0, 'added no 608 cues');
|
||
equal(this.sourceHandler.metadataTrack_.cues.length, 1, 'added one metadata cues');
|
||
});
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"../src/add-text-track-data":35}],48:[function(require,module,exports){
|
||
(function (global){
|
||
'use strict';
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
var _qunit = (typeof window !== "undefined" ? window['QUnit'] : typeof global !== "undefined" ? global['QUnit'] : null);
|
||
|
||
var _qunit2 = _interopRequireDefault(_qunit);
|
||
|
||
var _srcCodecUtils = require('../src/codec-utils');
|
||
|
||
var deepEqual = _qunit2['default'].deepEqual;
|
||
var _module = _qunit2['default'].module;
|
||
var test = _qunit2['default'].test;
|
||
|
||
_module('Codec Utils');
|
||
|
||
test('translates legacy codecs', function () {
|
||
deepEqual((0, _srcCodecUtils.translateLegacyCodecs)(['avc1.66.30', 'avc1.66.30']), ['avc1.42001e', 'avc1.42001e'], 'translates legacy avc1.66.30 codec');
|
||
|
||
deepEqual((0, _srcCodecUtils.translateLegacyCodecs)(['avc1.42C01E', 'avc1.42C01E']), ['avc1.42C01E', 'avc1.42C01E'], 'does not translate modern codecs');
|
||
|
||
deepEqual((0, _srcCodecUtils.translateLegacyCodecs)(['avc1.42C01E', 'avc1.66.30']), ['avc1.42C01E', 'avc1.42001e'], 'only translates legacy codecs when mixed');
|
||
|
||
deepEqual((0, _srcCodecUtils.translateLegacyCodecs)(['avc1.4d0020', 'avc1.100.41', 'avc1.77.41', 'avc1.77.32', 'avc1.77.31', 'avc1.77.30', 'avc1.66.30', 'avc1.66.21', 'avc1.42C01e']), ['avc1.4d0020', 'avc1.640029', 'avc1.4d0029', 'avc1.4d0020', 'avc1.4d001f', 'avc1.4d001e', 'avc1.42001e', 'avc1.420015', 'avc1.42C01e'], 'translates a whole bunch');
|
||
});
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"../src/codec-utils":36}],49:[function(require,module,exports){
|
||
(function (global){
|
||
'use strict';
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
var _globalDocument = require('global/document');
|
||
|
||
var _globalDocument2 = _interopRequireDefault(_globalDocument);
|
||
|
||
var _globalWindow = require('global/window');
|
||
|
||
var _globalWindow2 = _interopRequireDefault(_globalWindow);
|
||
|
||
var _qunit = (typeof window !== "undefined" ? window['QUnit'] : typeof global !== "undefined" ? global['QUnit'] : null);
|
||
|
||
var _qunit2 = _interopRequireDefault(_qunit);
|
||
|
||
var _sinon = (typeof window !== "undefined" ? window['sinon'] : typeof global !== "undefined" ? global['sinon'] : null);
|
||
|
||
var _sinon2 = _interopRequireDefault(_sinon);
|
||
|
||
var _videoJs = (typeof window !== "undefined" ? window['videojs'] : typeof global !== "undefined" ? global['videojs'] : null);
|
||
|
||
var _videoJs2 = _interopRequireDefault(_videoJs);
|
||
|
||
var _srcFlashMediaSource = require('../src/flash-media-source');
|
||
|
||
var _srcFlashMediaSource2 = _interopRequireDefault(_srcFlashMediaSource);
|
||
|
||
var _srcHtmlMediaSource = require('../src/html-media-source');
|
||
|
||
var _srcHtmlMediaSource2 = _interopRequireDefault(_srcHtmlMediaSource);
|
||
|
||
// we disable this because browserify needs to include these files
|
||
// but the exports are not important
|
||
/* eslint-disable no-unused-vars */
|
||
|
||
var _srcVideojsContribMediaSourcesJs = require('../src/videojs-contrib-media-sources.js');
|
||
|
||
/* eslint-disable no-unused-vars */
|
||
|
||
_qunit2['default'].module('createObjectURL', {
|
||
beforeEach: function beforeEach() {
|
||
this.fixture = _globalDocument2['default'].getElementById('qunit-fixture');
|
||
this.video = _globalDocument2['default'].createElement('video');
|
||
this.fixture.appendChild(this.video);
|
||
this.player = (0, _videoJs2['default'])(this.video);
|
||
|
||
// Mock the environment's timers because certain things - particularly
|
||
// player readiness - are asynchronous in video.js 5.
|
||
this.clock = _sinon2['default'].useFakeTimers();
|
||
this.oldMediaSource = _globalWindow2['default'].MediaSource || _globalWindow2['default'].WebKitMediaSource;
|
||
|
||
// force MediaSource support
|
||
if (!_globalWindow2['default'].MediaSource) {
|
||
_globalWindow2['default'].MediaSource = function () {
|
||
var result = new _globalWindow2['default'].Blob();
|
||
|
||
result.addEventListener = function () {};
|
||
result.addSourceBuffer = function () {};
|
||
return result;
|
||
};
|
||
}
|
||
},
|
||
|
||
afterEach: function afterEach() {
|
||
|
||
// The clock _must_ be restored before disposing the player; otherwise,
|
||
// certain timeout listeners that happen inside video.js may throw errors.
|
||
this.clock.restore();
|
||
this.player.dispose();
|
||
_globalWindow2['default'].MediaSource = _globalWindow2['default'].WebKitMediaSource = this.oldMediaSource;
|
||
}
|
||
});
|
||
|
||
_qunit2['default'].test('delegates to the native implementation', function () {
|
||
_qunit2['default'].ok(!/blob:vjs-media-source\//.test(_videoJs2['default'].URL.createObjectURL(new _globalWindow2['default'].Blob())), 'created a native blob URL');
|
||
});
|
||
|
||
_qunit2['default'].test('uses the native MediaSource when available', function () {
|
||
_qunit2['default'].ok(!/blob:vjs-media-source\//.test(_videoJs2['default'].URL.createObjectURL(new _srcHtmlMediaSource2['default']())), 'created a native blob URL');
|
||
});
|
||
|
||
_qunit2['default'].test('emulates a URL for the shim', function () {
|
||
_qunit2['default'].ok(/blob:vjs-media-source\//.test(_videoJs2['default'].URL.createObjectURL(new _srcFlashMediaSource2['default']())), 'created an emulated blob URL');
|
||
});
|
||
|
||
_qunit2['default'].test('stores the associated blob URL on the media source', function () {
|
||
var blob = new _globalWindow2['default'].Blob();
|
||
var url = _videoJs2['default'].URL.createObjectURL(blob);
|
||
|
||
_qunit2['default'].equal(blob.url_, url, 'captured the generated URL');
|
||
});
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"../src/flash-media-source":39,"../src/html-media-source":42,"../src/videojs-contrib-media-sources.js":45,"global/document":2,"global/window":3}],50:[function(require,module,exports){
|
||
(function (global){
|
||
'use strict';
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
var _globalDocument = require('global/document');
|
||
|
||
var _globalDocument2 = _interopRequireDefault(_globalDocument);
|
||
|
||
var _globalWindow = require('global/window');
|
||
|
||
var _globalWindow2 = _interopRequireDefault(_globalWindow);
|
||
|
||
var _qunit = (typeof window !== "undefined" ? window['QUnit'] : typeof global !== "undefined" ? global['QUnit'] : null);
|
||
|
||
var _qunit2 = _interopRequireDefault(_qunit);
|
||
|
||
var _sinon = (typeof window !== "undefined" ? window['sinon'] : typeof global !== "undefined" ? global['sinon'] : null);
|
||
|
||
var _sinon2 = _interopRequireDefault(_sinon);
|
||
|
||
var _videoJs = (typeof window !== "undefined" ? window['videojs'] : typeof global !== "undefined" ? global['videojs'] : null);
|
||
|
||
var _videoJs2 = _interopRequireDefault(_videoJs);
|
||
|
||
var _muxJs = require('mux.js');
|
||
|
||
var _muxJs2 = _interopRequireDefault(_muxJs);
|
||
|
||
var _srcFlashSourceBuffer = require('../src/flash-source-buffer');
|
||
|
||
var _srcFlashSourceBuffer2 = _interopRequireDefault(_srcFlashSourceBuffer);
|
||
|
||
var _srcFlashConstants = require('../src/flash-constants');
|
||
|
||
var _srcFlashConstants2 = _interopRequireDefault(_srcFlashConstants);
|
||
|
||
// we disable this because browserify needs to include these files
|
||
// but the exports are not important
|
||
/* eslint-disable no-unused-vars */
|
||
|
||
var _srcVideojsContribMediaSourcesJs = require('../src/videojs-contrib-media-sources.js');
|
||
|
||
/* eslint-disable no-unused-vars */
|
||
|
||
// return the sequence of calls to append to the SWF
|
||
var appendCalls = function appendCalls(calls) {
|
||
return calls.filter(function (call) {
|
||
return call.callee && call.callee === 'vjs_appendChunkReady';
|
||
});
|
||
};
|
||
|
||
var getFlvHeader = function getFlvHeader() {
|
||
return new Uint8Array([1, 2, 3]);
|
||
};
|
||
|
||
var makeFlvTag = function makeFlvTag(pts, data) {
|
||
return {
|
||
pts: pts,
|
||
dts: pts,
|
||
bytes: data
|
||
};
|
||
};
|
||
|
||
var timers = undefined;
|
||
var oldSTO = undefined;
|
||
|
||
var fakeSTO = function fakeSTO() {
|
||
oldSTO = _globalWindow2['default'].setTimeout;
|
||
timers = [];
|
||
|
||
timers.run = function (num) {
|
||
var timer = undefined;
|
||
|
||
while (num--) {
|
||
timer = this.pop();
|
||
if (timer) {
|
||
timer();
|
||
}
|
||
}
|
||
};
|
||
|
||
timers.runAll = function () {
|
||
while (this.length) {
|
||
this.pop()();
|
||
}
|
||
};
|
||
|
||
_globalWindow2['default'].setTimeout = function (callback) {
|
||
timers.push(callback);
|
||
};
|
||
_globalWindow2['default'].setTimeout.fake = true;
|
||
};
|
||
|
||
var unfakeSTO = function unfakeSTO() {
|
||
timers = [];
|
||
_globalWindow2['default'].setTimeout = oldSTO;
|
||
};
|
||
|
||
// Create a WebWorker-style message that signals the transmuxer is done
|
||
var createDataMessage = function createDataMessage(data, audioData, metadata, captions) {
|
||
var captionStreams = {};
|
||
|
||
if (captions) {
|
||
captions.forEach(function (caption) {
|
||
captionStreams[caption.stream] = true;
|
||
});
|
||
}
|
||
return {
|
||
data: {
|
||
action: 'data',
|
||
segment: {
|
||
tags: {
|
||
videoTags: data.map(function (tag) {
|
||
return makeFlvTag(tag.pts, tag.bytes);
|
||
}),
|
||
audioTags: audioData ? audioData.map(function (tag) {
|
||
return makeFlvTag(tag.pts, tag.bytes);
|
||
}) : []
|
||
},
|
||
metadata: metadata,
|
||
captions: captions,
|
||
captionStreams: captionStreams
|
||
}
|
||
}
|
||
};
|
||
};
|
||
var doneMessage = {
|
||
data: {
|
||
action: 'done'
|
||
}
|
||
};
|
||
var postMessage_ = function postMessage_(msg) {
|
||
var _this = this;
|
||
|
||
if (msg.action === 'push') {
|
||
_globalWindow2['default'].setTimeout(function () {
|
||
_this.onmessage(createDataMessage([{
|
||
bytes: new Uint8Array(msg.data, msg.byteOffset, msg.byteLength),
|
||
pts: 0
|
||
}]));
|
||
}, 1);
|
||
} else if (msg.action === 'flush') {
|
||
_globalWindow2['default'].setTimeout(function () {
|
||
_this.onmessage(doneMessage);
|
||
}, 1);
|
||
}
|
||
};
|
||
|
||
_qunit2['default'].module('Flash MediaSource', {
|
||
beforeEach: function beforeEach(assert) {
|
||
var _this2 = this;
|
||
|
||
var swfObj = undefined;
|
||
|
||
// Mock the environment's timers because certain things - particularly
|
||
// player readiness - are asynchronous in video.js 5.
|
||
this.clock = _sinon2['default'].useFakeTimers();
|
||
|
||
this.fixture = _globalDocument2['default'].getElementById('qunit-fixture');
|
||
this.video = _globalDocument2['default'].createElement('video');
|
||
this.fixture.appendChild(this.video);
|
||
this.player = (0, _videoJs2['default'])(this.video);
|
||
|
||
this.oldMediaSource = _globalWindow2['default'].MediaSource || _globalWindow2['default'].WebKitMediaSource;
|
||
|
||
_globalWindow2['default'].MediaSource = null;
|
||
_globalWindow2['default'].WebKitMediaSource = null;
|
||
|
||
this.Flash = _videoJs2['default'].getTech('Flash');
|
||
this.oldFlashSupport = this.Flash.isSupported;
|
||
this.oldCanPlay = this.Flash.canPlaySource;
|
||
this.Flash.canPlaySource = this.Flash.isSupported = function () {
|
||
return true;
|
||
};
|
||
|
||
this.oldFlashTransmuxerPostMessage = _muxJs2['default'].flv.Transmuxer.postMessage;
|
||
this.oldGetFlvHeader = _muxJs2['default'].flv.getFlvHeader;
|
||
_muxJs2['default'].flv.getFlvHeader = getFlvHeader;
|
||
|
||
this.swfCalls = [];
|
||
this.mediaSource = new _videoJs2['default'].MediaSource();
|
||
this.player.src({
|
||
src: _videoJs2['default'].URL.createObjectURL(this.mediaSource),
|
||
type: 'video/mp2t'
|
||
});
|
||
// vjs6 takes 1 tick to set source async
|
||
this.clock.tick(1);
|
||
swfObj = _globalDocument2['default'].createElement('fake-object');
|
||
swfObj.id = 'fake-swf-' + assert.test.testId;
|
||
this.player.el().replaceChild(swfObj, this.player.tech_.el());
|
||
this.player.tech_.hls = new _videoJs2['default'].EventTarget();
|
||
this.player.tech_.el_ = swfObj;
|
||
swfObj.tech = this.player.tech_;
|
||
|
||
/* eslint-disable camelcase */
|
||
swfObj.vjs_abort = function () {
|
||
_this2.swfCalls.push('abort');
|
||
};
|
||
swfObj.vjs_getProperty = function (attr) {
|
||
if (attr === 'buffered') {
|
||
return [];
|
||
} else if (attr === 'currentTime') {
|
||
return 0;
|
||
// ignored for vjs6
|
||
} else if (attr === 'videoWidth') {
|
||
return 0;
|
||
}
|
||
_this2.swfCalls.push({ attr: attr });
|
||
};
|
||
swfObj.vjs_load = function () {
|
||
_this2.swfCalls.push('load');
|
||
};
|
||
swfObj.vjs_setProperty = function (attr, value) {
|
||
_this2.swfCalls.push({ attr: attr, value: value });
|
||
};
|
||
swfObj.vjs_discontinuity = function (attr, value) {
|
||
_this2.swfCalls.push({ attr: attr, value: value });
|
||
};
|
||
swfObj.vjs_appendChunkReady = function (method) {
|
||
_globalWindow2['default'].setTimeout(function () {
|
||
var chunk = _globalWindow2['default'][method]();
|
||
|
||
// only care about the segment data, not the flv header
|
||
if (method.substr(0, 21) === 'vjs_flashEncodedData_') {
|
||
var call = {
|
||
callee: 'vjs_appendChunkReady',
|
||
arguments: [_globalWindow2['default'].atob(chunk).split('').map(function (c) {
|
||
return c.charCodeAt(0);
|
||
})]
|
||
};
|
||
|
||
_this2.swfCalls.push(call);
|
||
}
|
||
}, 1);
|
||
};
|
||
swfObj.vjs_adjustCurrentTime = function (value) {
|
||
_this2.swfCalls.push({ call: 'adjustCurrentTime', value: value });
|
||
};
|
||
/* eslint-enable camelcase */
|
||
|
||
this.mediaSource.trigger({
|
||
type: 'sourceopen',
|
||
swfId: swfObj.id
|
||
});
|
||
fakeSTO();
|
||
},
|
||
afterEach: function afterEach() {
|
||
_globalWindow2['default'].MediaSource = this.oldMediaSource;
|
||
_globalWindow2['default'].WebKitMediaSource = _globalWindow2['default'].MediaSource;
|
||
this.Flash.isSupported = this.oldFlashSupport;
|
||
this.Flash.canPlaySource = this.oldCanPlay;
|
||
_muxJs2['default'].flv.Transmuxer.postMessage = this.oldFlashTransmuxerPostMessage;
|
||
_muxJs2['default'].flv.getFlvHeader = this.oldGetFlvHeader;
|
||
this.player.dispose();
|
||
this.clock.restore();
|
||
this.swfCalls = [];
|
||
unfakeSTO();
|
||
}
|
||
});
|
||
|
||
_qunit2['default'].test('raises an exception for unrecognized MIME types', function () {
|
||
try {
|
||
this.mediaSource.addSourceBuffer('video/garbage');
|
||
} catch (e) {
|
||
_qunit2['default'].ok(e, 'an error was thrown');
|
||
return;
|
||
}
|
||
_qunit2['default'].ok(false, 'no error was thrown');
|
||
});
|
||
|
||
_qunit2['default'].test('creates FlashSourceBuffers for video/mp2t', function () {
|
||
_qunit2['default'].ok(this.mediaSource.addSourceBuffer('video/mp2t') instanceof _srcFlashSourceBuffer2['default'], 'create source buffer');
|
||
});
|
||
|
||
_qunit2['default'].test('creates FlashSourceBuffers for audio/mp2t', function () {
|
||
_qunit2['default'].ok(this.mediaSource.addSourceBuffer('audio/mp2t') instanceof _srcFlashSourceBuffer2['default'], 'create source buffer');
|
||
});
|
||
|
||
_qunit2['default'].test('waits for the next tick to append', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
_qunit2['default'].equal(this.swfCalls.length, 1, 'made one call on init');
|
||
_qunit2['default'].equal(this.swfCalls[0], 'load', 'called load');
|
||
sourceBuffer.appendBuffer(new Uint8Array([0, 1]));
|
||
this.swfCalls = appendCalls(this.swfCalls);
|
||
_qunit2['default'].strictEqual(this.swfCalls.length, 0, 'no appends were made');
|
||
});
|
||
|
||
_qunit2['default'].test('passes bytes to Flash', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
this.swfCalls.length = 0;
|
||
sourceBuffer.appendBuffer(new Uint8Array([0, 1]));
|
||
timers.runAll();
|
||
timers.runAll();
|
||
|
||
_qunit2['default'].ok(this.swfCalls.length, 'the SWF was called');
|
||
this.swfCalls = appendCalls(this.swfCalls);
|
||
_qunit2['default'].strictEqual(this.swfCalls[0].callee, 'vjs_appendChunkReady', 'called vjs_appendChunkReady');
|
||
_qunit2['default'].deepEqual(this.swfCalls[0].arguments[0], [0, 1], 'passed the base64 encoded data');
|
||
});
|
||
|
||
_qunit2['default'].test('passes chunked bytes to Flash', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
var oldChunkSize = _srcFlashConstants2['default'].BYTES_PER_CHUNK;
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
_srcFlashConstants2['default'].BYTES_PER_CHUNK = 2;
|
||
|
||
this.swfCalls.length = 0;
|
||
sourceBuffer.appendBuffer(new Uint8Array([0, 1, 2, 3, 4]));
|
||
timers.runAll();
|
||
|
||
_qunit2['default'].ok(this.swfCalls.length, 'the SWF was called');
|
||
this.swfCalls = appendCalls(this.swfCalls);
|
||
_qunit2['default'].equal(this.swfCalls.length, 3, 'the SWF received 3 chunks');
|
||
_qunit2['default'].strictEqual(this.swfCalls[0].callee, 'vjs_appendChunkReady', 'called vjs_appendChunkReady');
|
||
_qunit2['default'].deepEqual(this.swfCalls[0].arguments[0], [0, 1], 'passed the base64 encoded data');
|
||
_qunit2['default'].deepEqual(this.swfCalls[1].arguments[0], [2, 3], 'passed the base64 encoded data');
|
||
_qunit2['default'].deepEqual(this.swfCalls[2].arguments[0], [4], 'passed the base64 encoded data');
|
||
|
||
_srcFlashConstants2['default'].BYTES_PER_CHUNK = oldChunkSize;
|
||
});
|
||
|
||
_qunit2['default'].test('clears the SWF on seeking', function () {
|
||
var aborts = 0;
|
||
|
||
this.mediaSource.addSourceBuffer('video/mp2t');
|
||
// track calls to abort()
|
||
|
||
/* eslint-disable camelcase */
|
||
this.mediaSource.swfObj.vjs_abort = function () {
|
||
aborts++;
|
||
};
|
||
/* eslint-enable camelcase */
|
||
|
||
this.mediaSource.tech_.trigger('seeking');
|
||
_qunit2['default'].strictEqual(1, aborts, 'aborted pending buffer');
|
||
});
|
||
|
||
_qunit2['default'].test('drops tags before currentTime when seeking', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
var i = 10;
|
||
var currentTime = undefined;
|
||
var tags_ = [];
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
this.mediaSource.tech_.currentTime = function () {
|
||
return currentTime;
|
||
};
|
||
|
||
// push a tag into the buffer to establish the starting PTS value
|
||
currentTime = 0;
|
||
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage([{
|
||
pts: 19 * 1000,
|
||
bytes: new Uint8Array(1)
|
||
}]));
|
||
|
||
timers.runAll();
|
||
|
||
sourceBuffer.appendBuffer(new Uint8Array(10));
|
||
timers.runAll();
|
||
|
||
// mock out a new segment of FLV tags, starting 10s after the
|
||
// starting PTS value
|
||
while (i--) {
|
||
tags_.unshift({
|
||
pts: i * 1000 + 29 * 1000,
|
||
bytes: new Uint8Array([i])
|
||
});
|
||
}
|
||
|
||
var dataMessage = createDataMessage(tags_);
|
||
|
||
// mock gop start at seek point
|
||
dataMessage.data.segment.tags.videoTags[7].keyFrame = true;
|
||
|
||
sourceBuffer.transmuxer_.onmessage(dataMessage);
|
||
|
||
// seek to 7 seconds into the new swegment
|
||
this.mediaSource.tech_.seeking = function () {
|
||
return true;
|
||
};
|
||
currentTime = 10 + 7;
|
||
this.mediaSource.tech_.trigger('seeking');
|
||
sourceBuffer.appendBuffer(new Uint8Array(10));
|
||
this.swfCalls.length = 0;
|
||
timers.runAll();
|
||
|
||
_qunit2['default'].deepEqual(this.swfCalls[0].arguments[0], [7, 8, 9], 'three tags are appended');
|
||
});
|
||
|
||
_qunit2['default'].test('drops audio and video (complete gops) tags before the buffered end always', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
var endTime = undefined;
|
||
var videoTags_ = [];
|
||
var audioTags_ = [];
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
this.mediaSource.tech_.buffered = function () {
|
||
return _videoJs2['default'].createTimeRange([[0, endTime]]);
|
||
};
|
||
|
||
// push a tag into the buffer to establish the starting PTS value
|
||
endTime = 0;
|
||
|
||
// mock buffering 17 seconds of data so flash source buffer internal end of buffer
|
||
// tracking is accurate
|
||
var i = 17;
|
||
|
||
while (i--) {
|
||
videoTags_.unshift({
|
||
pts: i * 1000 + 19 * 1000,
|
||
bytes: new Uint8Array(1)
|
||
});
|
||
}
|
||
|
||
i = 17;
|
||
|
||
while (i--) {
|
||
audioTags_.unshift({
|
||
pts: i * 1000 + 19 * 1000,
|
||
bytes: new Uint8Array(1)
|
||
});
|
||
}
|
||
|
||
var dataMessage = createDataMessage(videoTags_, audioTags_);
|
||
|
||
sourceBuffer.transmuxer_.onmessage(dataMessage);
|
||
|
||
timers.runAll();
|
||
|
||
sourceBuffer.appendBuffer(new Uint8Array(10));
|
||
timers.runAll();
|
||
|
||
i = 10;
|
||
videoTags_ = [];
|
||
audioTags_ = [];
|
||
|
||
// mock out a new segment of FLV tags, starting 10s after the
|
||
// starting PTS value
|
||
while (i--) {
|
||
videoTags_.unshift({
|
||
pts: i * 1000 + 29 * 1000,
|
||
bytes: new Uint8Array([i])
|
||
});
|
||
}
|
||
|
||
i = 10;
|
||
|
||
while (i--) {
|
||
audioTags_.unshift({
|
||
pts: i * 1000 + 29 * 1000,
|
||
bytes: new Uint8Array([i + 100])
|
||
});
|
||
}
|
||
|
||
dataMessage = createDataMessage(videoTags_, audioTags_);
|
||
|
||
dataMessage.data.segment.tags.videoTags[0].keyFrame = true;
|
||
dataMessage.data.segment.tags.videoTags[3].keyFrame = true;
|
||
dataMessage.data.segment.tags.videoTags[6].keyFrame = true;
|
||
dataMessage.data.segment.tags.videoTags[8].keyFrame = true;
|
||
|
||
sourceBuffer.transmuxer_.onmessage(dataMessage);
|
||
|
||
endTime = 10 + 7;
|
||
sourceBuffer.appendBuffer(new Uint8Array(10));
|
||
this.swfCalls.length = 0;
|
||
timers.runAll();
|
||
|
||
// end of buffer is 17 seconds
|
||
// frames 0-6 for video have pts values less than 17 seconds
|
||
// since frame 6 is a key frame, it should still be appended to preserve the entire gop
|
||
// so we should have appeneded frames 6 - 9
|
||
// frames 100-106 for audio have pts values less than 17 seconds
|
||
// but since we appended an extra video frame, we should also append audio frames
|
||
// to fill in the gap in audio. This means we should be appending audio frames
|
||
// 106, 107, 108, 109
|
||
// Append order is 6, 7, 107, 8, 108, 9, 109 since we order tags based on dts value
|
||
_qunit2['default'].deepEqual(this.swfCalls[0].arguments[0], [6, 106, 7, 107, 8, 108, 9, 109], 'audio and video tags properly dropped');
|
||
});
|
||
|
||
_qunit2['default'].test('seeking into the middle of a GOP adjusts currentTime to the start of the GOP', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
var i = 10;
|
||
var currentTime = undefined;
|
||
var tags_ = [];
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
this.mediaSource.tech_.currentTime = function () {
|
||
return currentTime;
|
||
};
|
||
|
||
// push a tag into the buffer to establish the starting PTS value
|
||
currentTime = 0;
|
||
|
||
var dataMessage = createDataMessage([{
|
||
pts: 19 * 1000,
|
||
bytes: new Uint8Array(1)
|
||
}]);
|
||
|
||
sourceBuffer.transmuxer_.onmessage(dataMessage);
|
||
|
||
timers.runAll();
|
||
|
||
sourceBuffer.appendBuffer(new Uint8Array(10));
|
||
timers.runAll();
|
||
|
||
// mock out a new segment of FLV tags, starting 10s after the
|
||
// starting PTS value
|
||
while (i--) {
|
||
tags_.unshift({
|
||
pts: i * 1000 + 29 * 1000,
|
||
bytes: new Uint8Array([i])
|
||
});
|
||
}
|
||
|
||
dataMessage = createDataMessage(tags_);
|
||
|
||
// mock the GOP structure
|
||
dataMessage.data.segment.tags.videoTags[0].keyFrame = true;
|
||
dataMessage.data.segment.tags.videoTags[3].keyFrame = true;
|
||
dataMessage.data.segment.tags.videoTags[5].keyFrame = true;
|
||
dataMessage.data.segment.tags.videoTags[8].keyFrame = true;
|
||
|
||
sourceBuffer.transmuxer_.onmessage(dataMessage);
|
||
|
||
// seek to 7 seconds into the new swegment
|
||
this.mediaSource.tech_.seeking = function () {
|
||
return true;
|
||
};
|
||
currentTime = 10 + 7;
|
||
this.mediaSource.tech_.trigger('seeking');
|
||
sourceBuffer.appendBuffer(new Uint8Array(10));
|
||
this.swfCalls.length = 0;
|
||
timers.runAll();
|
||
|
||
_qunit2['default'].deepEqual(this.swfCalls[0], { call: 'adjustCurrentTime', value: 15 });
|
||
_qunit2['default'].deepEqual(this.swfCalls[1].arguments[0], [5, 6, 7, 8, 9], '5 tags are appended');
|
||
});
|
||
|
||
_qunit2['default'].test('GOP trimming accounts for metadata tags prepended to key frames by mux.js', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
var i = 10;
|
||
var currentTime = undefined;
|
||
var tags_ = [];
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
this.mediaSource.tech_.currentTime = function () {
|
||
return currentTime;
|
||
};
|
||
|
||
// push a tag into the buffer to establish the starting PTS value
|
||
currentTime = 0;
|
||
|
||
var dataMessage = createDataMessage([{
|
||
pts: 19 * 1000,
|
||
bytes: new Uint8Array(1)
|
||
}]);
|
||
|
||
sourceBuffer.transmuxer_.onmessage(dataMessage);
|
||
|
||
timers.runAll();
|
||
|
||
sourceBuffer.appendBuffer(new Uint8Array(10));
|
||
timers.runAll();
|
||
|
||
// mock out a new segment of FLV tags, starting 10s after the
|
||
// starting PTS value
|
||
while (i--) {
|
||
tags_.unshift({
|
||
pts: i * 1000 + 29 * 1000,
|
||
bytes: new Uint8Array([i])
|
||
});
|
||
}
|
||
|
||
// add in the metadata tags
|
||
tags_.splice(8, 0, {
|
||
pts: tags_[8].pts,
|
||
bytes: new Uint8Array([8])
|
||
}, {
|
||
pts: tags_[8].pts,
|
||
bytes: new Uint8Array([8])
|
||
});
|
||
|
||
tags_.splice(5, 0, {
|
||
pts: tags_[5].pts,
|
||
bytes: new Uint8Array([5])
|
||
}, {
|
||
pts: tags_[5].pts,
|
||
bytes: new Uint8Array([5])
|
||
});
|
||
|
||
tags_.splice(0, 0, {
|
||
pts: tags_[0].pts,
|
||
bytes: new Uint8Array([0])
|
||
}, {
|
||
pts: tags_[0].pts,
|
||
bytes: new Uint8Array([0])
|
||
});
|
||
|
||
dataMessage = createDataMessage(tags_);
|
||
|
||
// mock the GOP structure + metadata tags
|
||
// if we see a metadata tag, that means the next tag will also be a metadata tag with
|
||
// keyFrame true and the tag after that will be the keyFrame
|
||
// e.g.
|
||
// { keyFrame: false, metaDataTag: true},
|
||
// { keyFrame: true, metaDataTag: true},
|
||
// { keyFrame: true, metaDataTag: false}
|
||
dataMessage.data.segment.tags.videoTags[0].metaDataTag = true;
|
||
dataMessage.data.segment.tags.videoTags[1].metaDataTag = true;
|
||
dataMessage.data.segment.tags.videoTags[1].keyFrame = true;
|
||
dataMessage.data.segment.tags.videoTags[2].keyFrame = true;
|
||
|
||
// no metadata tags in front of this key to test the case where mux.js does not prepend
|
||
// the metadata tags
|
||
dataMessage.data.segment.tags.videoTags[5].keyFrame = true;
|
||
|
||
dataMessage.data.segment.tags.videoTags[7].metaDataTag = true;
|
||
dataMessage.data.segment.tags.videoTags[8].metaDataTag = true;
|
||
dataMessage.data.segment.tags.videoTags[8].keyFrame = true;
|
||
dataMessage.data.segment.tags.videoTags[9].keyFrame = true;
|
||
|
||
dataMessage.data.segment.tags.videoTags[12].metaDataTag = true;
|
||
dataMessage.data.segment.tags.videoTags[13].metaDataTag = true;
|
||
dataMessage.data.segment.tags.videoTags[13].keyFrame = true;
|
||
dataMessage.data.segment.tags.videoTags[14].keyFrame = true;
|
||
|
||
sourceBuffer.transmuxer_.onmessage(dataMessage);
|
||
|
||
// seek to 7 seconds into the new swegment
|
||
this.mediaSource.tech_.seeking = function () {
|
||
return true;
|
||
};
|
||
currentTime = 10 + 7;
|
||
this.mediaSource.tech_.trigger('seeking');
|
||
sourceBuffer.appendBuffer(new Uint8Array(10));
|
||
this.swfCalls.length = 0;
|
||
timers.runAll();
|
||
|
||
_qunit2['default'].deepEqual(this.swfCalls[0], { call: 'adjustCurrentTime', value: 15 });
|
||
_qunit2['default'].deepEqual(this.swfCalls[1].arguments[0], [5, 5, 5, 6, 7, 8, 8, 8, 9], '10 tags are appended, 4 of which are metadata tags');
|
||
});
|
||
|
||
_qunit2['default'].test('drops all tags if target pts append time does not fall within segment', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
var i = 10;
|
||
var currentTime = undefined;
|
||
var tags_ = [];
|
||
|
||
this.mediaSource.tech_.currentTime = function () {
|
||
return currentTime;
|
||
};
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
// push a tag into the buffer to establish the starting PTS value
|
||
currentTime = 0;
|
||
|
||
var dataMessage = createDataMessage([{
|
||
pts: 19 * 1000,
|
||
bytes: new Uint8Array(1)
|
||
}]);
|
||
|
||
sourceBuffer.transmuxer_.onmessage(dataMessage);
|
||
|
||
timers.runAll();
|
||
|
||
sourceBuffer.appendBuffer(new Uint8Array(10));
|
||
timers.runAll();
|
||
|
||
// mock out a new segment of FLV tags, starting 10s after the
|
||
// starting PTS value
|
||
while (i--) {
|
||
tags_.unshift({
|
||
pts: i * 1000 + 19 * 1000,
|
||
bytes: new Uint8Array([i])
|
||
});
|
||
}
|
||
|
||
dataMessage = createDataMessage(tags_);
|
||
|
||
// mock the GOP structure
|
||
dataMessage.data.segment.tags.videoTags[0].keyFrame = true;
|
||
dataMessage.data.segment.tags.videoTags[3].keyFrame = true;
|
||
dataMessage.data.segment.tags.videoTags[5].keyFrame = true;
|
||
dataMessage.data.segment.tags.videoTags[8].keyFrame = true;
|
||
|
||
sourceBuffer.transmuxer_.onmessage(dataMessage);
|
||
|
||
// seek to 7 seconds into the new swegment
|
||
this.mediaSource.tech_.seeking = function () {
|
||
return true;
|
||
};
|
||
currentTime = 10 + 7;
|
||
this.mediaSource.tech_.trigger('seeking');
|
||
sourceBuffer.appendBuffer(new Uint8Array(10));
|
||
this.swfCalls.length = 0;
|
||
timers.runAll();
|
||
|
||
_qunit2['default'].equal(this.swfCalls.length, 0, 'dropped all tags and made no swf calls');
|
||
});
|
||
|
||
_qunit2['default'].test('seek targeting accounts for changing timestampOffsets', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
var i = 10;
|
||
var tags_ = [];
|
||
var currentTime = undefined;
|
||
|
||
this.mediaSource.tech_.currentTime = function () {
|
||
return currentTime;
|
||
};
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
var dataMessage = createDataMessage([{
|
||
pts: 19 * 1000,
|
||
bytes: new Uint8Array(1)
|
||
}]);
|
||
|
||
// push a tag into the buffer to establish the starting PTS value
|
||
currentTime = 0;
|
||
sourceBuffer.transmuxer_.onmessage(dataMessage);
|
||
|
||
timers.runAll();
|
||
|
||
// to seek across a discontinuity:
|
||
// 1. set the timestamp offset to the media timeline position for
|
||
// the start of the segment
|
||
// 2. set currentTime to the desired media timeline position
|
||
sourceBuffer.timestampOffset = 22;
|
||
currentTime = sourceBuffer.timestampOffset + 3.5;
|
||
this.mediaSource.tech_.seeking = function () {
|
||
return true;
|
||
};
|
||
|
||
// the new segment FLV tags are at disjoint PTS positions
|
||
while (i--) {
|
||
tags_.unshift({
|
||
// (101 * 1000) !== the old PTS offset
|
||
pts: i * 1000 + 101 * 1000,
|
||
bytes: new Uint8Array([i + sourceBuffer.timestampOffset])
|
||
});
|
||
}
|
||
|
||
dataMessage = createDataMessage(tags_);
|
||
// mock gop start at seek point
|
||
dataMessage.data.segment.tags.videoTags[3].keyFrame = true;
|
||
|
||
sourceBuffer.transmuxer_.onmessage(dataMessage);
|
||
|
||
this.mediaSource.tech_.trigger('seeking');
|
||
this.swfCalls.length = 0;
|
||
timers.runAll();
|
||
|
||
_qunit2['default'].equal(this.swfCalls[0].value, 25, 'adjusted current time');
|
||
_qunit2['default'].deepEqual(this.swfCalls[1].arguments[0], [25, 26, 27, 28, 29, 30, 31], 'filtered the appended tags');
|
||
});
|
||
|
||
_qunit2['default'].test('calling endOfStream sets mediaSource readyState to ended', function () {
|
||
var _this3 = this;
|
||
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
/* eslint-disable camelcase */
|
||
this.mediaSource.swfObj.vjs_endOfStream = function () {
|
||
_this3.swfCalls.push('endOfStream');
|
||
};
|
||
/* eslint-enable camelcase */
|
||
sourceBuffer.addEventListener('updateend', function () {
|
||
_this3.mediaSource.endOfStream();
|
||
});
|
||
|
||
this.swfCalls.length = 0;
|
||
sourceBuffer.appendBuffer(new Uint8Array([0, 1]));
|
||
|
||
timers.runAll();
|
||
|
||
_qunit2['default'].strictEqual(sourceBuffer.mediaSource_.readyState, 'ended', 'readyState is \'ended\'');
|
||
_qunit2['default'].strictEqual(this.swfCalls.length, 2, 'made two calls to swf');
|
||
_qunit2['default'].deepEqual(this.swfCalls.shift().arguments[0], [0, 1], 'contains the data');
|
||
|
||
_qunit2['default'].ok(this.swfCalls.shift().indexOf('endOfStream') === 0, 'the second call should be for the updateend');
|
||
|
||
_qunit2['default'].strictEqual(timers.length, 0, 'no more appends are scheduled');
|
||
});
|
||
|
||
_qunit2['default'].test('opens the stream on sourceBuffer.appendBuffer after endOfStream', function () {
|
||
var _this4 = this;
|
||
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
var foo = function foo() {
|
||
_this4.mediaSource.endOfStream();
|
||
sourceBuffer.removeEventListener('updateend', foo);
|
||
};
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
/* eslint-disable camelcase */
|
||
this.mediaSource.swfObj.vjs_endOfStream = function () {
|
||
_this4.swfCalls.push('endOfStream');
|
||
};
|
||
/* eslint-enable camelcase */
|
||
sourceBuffer.addEventListener('updateend', foo);
|
||
|
||
this.swfCalls.length = 0;
|
||
sourceBuffer.appendBuffer(new Uint8Array([0, 1]));
|
||
|
||
timers.runAll();
|
||
|
||
_qunit2['default'].strictEqual(this.swfCalls.length, 2, 'made two calls to swf');
|
||
_qunit2['default'].deepEqual(this.swfCalls.shift().arguments[0], [0, 1], 'contains the data');
|
||
|
||
_qunit2['default'].equal(this.swfCalls.shift(), 'endOfStream', 'the second call should be for the updateend');
|
||
|
||
sourceBuffer.appendBuffer(new Uint8Array([2, 3]));
|
||
// remove previous video pts save because mock appends don't have actual timing data
|
||
sourceBuffer.videoBufferEnd_ = NaN;
|
||
timers.runAll();
|
||
|
||
_qunit2['default'].strictEqual(this.swfCalls.length, 1, 'made one more append');
|
||
_qunit2['default'].deepEqual(this.swfCalls.shift().arguments[0], [2, 3], 'contains the third and fourth bytes');
|
||
_qunit2['default'].strictEqual(sourceBuffer.mediaSource_.readyState, 'open', 'The streams should be open if more bytes are appended to an "ended" stream');
|
||
_qunit2['default'].strictEqual(timers.length, 0, 'no more appends are scheduled');
|
||
});
|
||
|
||
_qunit2['default'].test('abort() clears any buffered input', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
this.swfCalls.length = 0;
|
||
sourceBuffer.appendBuffer(new Uint8Array([0]));
|
||
sourceBuffer.abort();
|
||
|
||
timers.pop()();
|
||
_qunit2['default'].strictEqual(this.swfCalls.length, 1, 'called the swf');
|
||
_qunit2['default'].strictEqual(this.swfCalls[0], 'abort', 'invoked abort');
|
||
});
|
||
// requestAnimationFrame is heavily throttled or unscheduled when
|
||
// the browser tab running contrib-media-sources is in a background
|
||
// tab. If that happens, video data can continuously build up in
|
||
// memory and cause the tab or browser to crash.
|
||
_qunit2['default'].test('does not use requestAnimationFrame', function () {
|
||
var oldRFA = _globalWindow2['default'].requestAnimationFrame;
|
||
var requests = 0;
|
||
var sourceBuffer = undefined;
|
||
|
||
_globalWindow2['default'].requestAnimationFrame = function () {
|
||
requests++;
|
||
};
|
||
|
||
sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
sourceBuffer.appendBuffer(new Uint8Array([0, 1, 2, 3]));
|
||
while (timers.length) {
|
||
timers.pop()();
|
||
}
|
||
_qunit2['default'].equal(requests, 0, 'no calls to requestAnimationFrame were made');
|
||
_globalWindow2['default'].requestAnimationFrame = oldRFA;
|
||
});
|
||
_qunit2['default'].test('updating is true while an append is in progress', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
var ended = false;
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
sourceBuffer.addEventListener('updateend', function () {
|
||
ended = true;
|
||
});
|
||
|
||
sourceBuffer.appendBuffer(new Uint8Array([0, 1]));
|
||
|
||
_qunit2['default'].equal(sourceBuffer.updating, true, 'updating is set');
|
||
|
||
while (!ended) {
|
||
timers.pop()();
|
||
}
|
||
_qunit2['default'].equal(sourceBuffer.updating, false, 'updating is unset');
|
||
});
|
||
|
||
_qunit2['default'].test('throws an error if append is called while updating', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
sourceBuffer.appendBuffer(new Uint8Array([0, 1]));
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
_qunit2['default'].throws(function () {
|
||
sourceBuffer.appendBuffer(new Uint8Array([0, 1]));
|
||
}, function (e) {
|
||
return e.name === 'InvalidStateError' && e.code === _globalWindow2['default'].DOMException.INVALID_STATE_ERR;
|
||
}, 'threw an InvalidStateError');
|
||
});
|
||
|
||
_qunit2['default'].test('stops updating if abort is called', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
var updateEnds = 0;
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
sourceBuffer.addEventListener('updateend', function () {
|
||
updateEnds++;
|
||
});
|
||
sourceBuffer.appendBuffer(new Uint8Array([0, 1]));
|
||
|
||
sourceBuffer.abort();
|
||
_qunit2['default'].equal(sourceBuffer.updating, false, 'no longer updating');
|
||
_qunit2['default'].equal(updateEnds, 1, 'triggered updateend');
|
||
});
|
||
|
||
_qunit2['default'].test('forwards duration overrides to the SWF', function () {
|
||
/* eslint-disable no-unused-vars */
|
||
var ignored = this.mediaSource.duration;
|
||
/* eslint-enable no-unused-vars */
|
||
|
||
_qunit2['default'].deepEqual(this.swfCalls[1], {
|
||
attr: 'duration'
|
||
}, 'requests duration from the SWF');
|
||
|
||
this.mediaSource.duration = 101.3;
|
||
// Setting a duration results in two calls to the swf
|
||
// Ignore the first call (this.swfCalls[2]) as it was just to get the
|
||
// current duration
|
||
_qunit2['default'].deepEqual(this.swfCalls[3], {
|
||
attr: 'duration', value: 101.3
|
||
}, 'set the duration override');
|
||
});
|
||
|
||
_qunit2['default'].test('returns NaN for duration before the SWF is ready', function () {
|
||
this.mediaSource.swfObj = null;
|
||
|
||
_qunit2['default'].ok(isNaN(this.mediaSource.duration), 'duration is NaN');
|
||
});
|
||
|
||
_qunit2['default'].test('calculates the base PTS for the media', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
var tags_ = [];
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
// seek to 15 seconds
|
||
this.player.tech_.seeking = function () {
|
||
return true;
|
||
};
|
||
this.player.tech_.currentTime = function () {
|
||
return 15;
|
||
};
|
||
// FLV tags for this segment start at 10 seconds in the media
|
||
// timeline
|
||
tags_.push(
|
||
// zero in the media timeline is PTS 3
|
||
{ pts: (10 + 3) * 1000, bytes: new Uint8Array([10]) }, { pts: (15 + 3) * 1000, bytes: new Uint8Array([15]) });
|
||
|
||
var dataMessage = createDataMessage(tags_);
|
||
|
||
// mock gop start at seek point
|
||
dataMessage.data.segment.tags.videoTags[1].keyFrame = true;
|
||
sourceBuffer.transmuxer_.onmessage(dataMessage);
|
||
|
||
// let the source buffer know the segment start time
|
||
sourceBuffer.timestampOffset = 10;
|
||
|
||
this.swfCalls.length = 0;
|
||
timers.runAll();
|
||
|
||
_qunit2['default'].equal(this.swfCalls.length, 1, 'made a SWF call');
|
||
_qunit2['default'].deepEqual(this.swfCalls[0].arguments[0], [15], 'dropped the early tag');
|
||
});
|
||
|
||
_qunit2['default'].test('remove fires update events', function () {
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
var events = [];
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
sourceBuffer.on(['update', 'updateend'], function (event) {
|
||
events.push(event.type);
|
||
});
|
||
|
||
sourceBuffer.remove(0, 1);
|
||
_qunit2['default'].deepEqual(events, ['update', 'updateend'], 'fired update events');
|
||
_qunit2['default'].equal(sourceBuffer.updating, false, 'finished updating');
|
||
});
|
||
|
||
_qunit2['default'].test('passes endOfStream network errors to the tech', function () {
|
||
this.mediaSource.readyState = 'ended';
|
||
this.mediaSource.endOfStream('network');
|
||
_qunit2['default'].equal(this.player.tech_.error().code, 2, 'set a network error');
|
||
});
|
||
|
||
_qunit2['default'].test('passes endOfStream decode errors to the tech', function () {
|
||
this.mediaSource.readyState = 'ended';
|
||
this.mediaSource.endOfStream('decode');
|
||
|
||
_qunit2['default'].equal(this.player.tech_.error().code, 3, 'set a decode error');
|
||
});
|
||
|
||
_qunit2['default'].test('has addSeekableRange()', function () {
|
||
_qunit2['default'].ok(this.mediaSource.addSeekableRange_, 'has addSeekableRange_');
|
||
});
|
||
|
||
_qunit2['default'].test('fires loadedmetadata after first segment append', function () {
|
||
var loadedmetadataCount = 0;
|
||
|
||
this.mediaSource.tech_.on('loadedmetadata', function () {
|
||
return loadedmetadataCount++;
|
||
});
|
||
|
||
var sourceBuffer = this.mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
sourceBuffer.transmuxer_.postMessage = postMessage_;
|
||
|
||
_qunit2['default'].equal(loadedmetadataCount, 0, 'loadedmetadata not called on buffer creation');
|
||
sourceBuffer.appendBuffer(new Uint8Array([0, 1]));
|
||
_qunit2['default'].equal(loadedmetadataCount, 0, 'loadedmetadata not called on segment append');
|
||
timers.runAll();
|
||
_qunit2['default'].equal(loadedmetadataCount, 1, 'loadedmetadata fires after first append');
|
||
sourceBuffer.appendBuffer(new Uint8Array([0, 1]));
|
||
timers.runAll();
|
||
_qunit2['default'].equal(loadedmetadataCount, 1, 'loadedmetadata does not fire after second append');
|
||
});
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"../src/flash-constants":38,"../src/flash-source-buffer":40,"../src/videojs-contrib-media-sources.js":45,"global/document":2,"global/window":3,"mux.js":16}],51:[function(require,module,exports){
|
||
(function (global){
|
||
'use strict';
|
||
|
||
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
|
||
|
||
var _globalDocument = require('global/document');
|
||
|
||
var _globalDocument2 = _interopRequireDefault(_globalDocument);
|
||
|
||
var _globalWindow = require('global/window');
|
||
|
||
var _globalWindow2 = _interopRequireDefault(_globalWindow);
|
||
|
||
var _qunit = (typeof window !== "undefined" ? window['QUnit'] : typeof global !== "undefined" ? global['QUnit'] : null);
|
||
|
||
var _qunit2 = _interopRequireDefault(_qunit);
|
||
|
||
var _sinon = (typeof window !== "undefined" ? window['sinon'] : typeof global !== "undefined" ? global['sinon'] : null);
|
||
|
||
var _sinon2 = _interopRequireDefault(_sinon);
|
||
|
||
var _videoJs = (typeof window !== "undefined" ? window['videojs'] : typeof global !== "undefined" ? global['videojs'] : null);
|
||
|
||
var _videoJs2 = _interopRequireDefault(_videoJs);
|
||
|
||
var _srcHtmlMediaSource = require('../src/html-media-source');
|
||
|
||
var _srcHtmlMediaSource2 = _interopRequireDefault(_srcHtmlMediaSource);
|
||
|
||
var _srcVirtualSourceBuffer = require('../src/virtual-source-buffer');
|
||
|
||
// we disable this because browserify needs to include these files
|
||
// but the exports are not important
|
||
/* eslint-disable no-unused-vars */
|
||
|
||
var _srcVideojsContribMediaSourcesJs = require('../src/videojs-contrib-media-sources.js');
|
||
|
||
/* eslint-disable no-unused-vars */
|
||
|
||
_qunit2['default'].module('videojs-contrib-media-sources - HTML', {
|
||
beforeEach: function beforeEach() {
|
||
this.fixture = _globalDocument2['default'].getElementById('qunit-fixture');
|
||
this.video = _globalDocument2['default'].createElement('video');
|
||
this.fixture.appendChild(this.video);
|
||
this.source = _globalDocument2['default'].createElement('source');
|
||
|
||
this.player = (0, _videoJs2['default'])(this.video);
|
||
// add a fake source so that we can get this.player_ on sourceopen
|
||
this.url = 'fake.ts';
|
||
this.source.src = this.url;
|
||
this.video.appendChild(this.source);
|
||
|
||
// Mock the environment's timers because certain things - particularly
|
||
// player readiness - are asynchronous in video.js 5.
|
||
this.clock = _sinon2['default'].useFakeTimers();
|
||
this.oldMediaSource = _globalWindow2['default'].MediaSource || _globalWindow2['default'].WebKitMediaSource;
|
||
_globalWindow2['default'].MediaSource = _videoJs2['default'].extend(_videoJs2['default'].EventTarget, {
|
||
constructor: function constructor() {
|
||
this.isNative = true;
|
||
this.sourceBuffers = [];
|
||
this.duration = NaN;
|
||
},
|
||
addSourceBuffer: function addSourceBuffer(type) {
|
||
var buffer = new (_videoJs2['default'].extend(_videoJs2['default'].EventTarget, {
|
||
type: type,
|
||
appendBuffer: function appendBuffer() {}
|
||
}))();
|
||
|
||
this.sourceBuffers.push(buffer);
|
||
return buffer;
|
||
}
|
||
});
|
||
_globalWindow2['default'].MediaSource.isTypeSupported = function (mime) {
|
||
return true;
|
||
};
|
||
_globalWindow2['default'].WebKitMediaSource = _globalWindow2['default'].MediaSource;
|
||
},
|
||
afterEach: function afterEach() {
|
||
this.clock.restore();
|
||
this.player.dispose();
|
||
_globalWindow2['default'].MediaSource = this.oldMediaSource;
|
||
_globalWindow2['default'].WebKitMediaSource = _globalWindow2['default'].MediaSource;
|
||
}
|
||
});
|
||
|
||
_qunit2['default'].test('constructs a native MediaSource', function () {
|
||
_qunit2['default'].ok(new _videoJs2['default'].MediaSource().nativeMediaSource_.isNative, 'constructed a MediaSource');
|
||
});
|
||
|
||
var createDataMessage = function createDataMessage(type, typedArray, extraObject) {
|
||
var message = {
|
||
data: {
|
||
action: 'data',
|
||
segment: {
|
||
type: type,
|
||
data: typedArray.buffer,
|
||
initSegment: {
|
||
data: typedArray.buffer,
|
||
byteOffset: typedArray.byteOffset,
|
||
byteLength: typedArray.byteLength
|
||
}
|
||
},
|
||
byteOffset: typedArray.byteOffset,
|
||
byteLength: typedArray.byteLength
|
||
}
|
||
};
|
||
|
||
return Object.keys(extraObject || {}).reduce(function (obj, key) {
|
||
obj.data.segment[key] = extraObject[key];
|
||
return obj;
|
||
}, message);
|
||
};
|
||
|
||
// Create a WebWorker-style message that signals the transmuxer is done
|
||
var doneMessage = {
|
||
data: {
|
||
action: 'done'
|
||
}
|
||
};
|
||
|
||
// send fake data to the transmuxer to trigger the creation of the
|
||
// native source buffers
|
||
var initializeNativeSourceBuffers = function initializeNativeSourceBuffers(sourceBuffer) {
|
||
// initialize an audio source buffer
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('audio', new Uint8Array(1)));
|
||
|
||
// initialize a video source buffer
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('video', new Uint8Array(1)));
|
||
|
||
// instruct the transmuxer to flush the "data" it has buffered so
|
||
// far
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
};
|
||
|
||
_qunit2['default'].test('creates mp4 source buffers for mp2t segments', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
_qunit2['default'].ok(mediaSource.videoBuffer_, 'created a video buffer');
|
||
_qunit2['default'].equal(mediaSource.videoBuffer_.type, 'video/mp4;codecs="avc1.4d400d"', 'video buffer has the default codec');
|
||
|
||
_qunit2['default'].ok(mediaSource.audioBuffer_, 'created an audio buffer');
|
||
_qunit2['default'].equal(mediaSource.audioBuffer_.type, 'audio/mp4;codecs="mp4a.40.2"', 'audio buffer has the default codec');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers.length, 1, 'created one virtual buffer');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers[0], sourceBuffer, 'returned the virtual buffer');
|
||
_qunit2['default'].ok(sourceBuffer.transmuxer_, 'created a transmuxer');
|
||
});
|
||
|
||
_qunit2['default'].test('the terminate is called on the transmuxer when the media source is killed', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
var terminates = 0;
|
||
|
||
sourceBuffer.transmuxer_ = {
|
||
terminate: function terminate() {
|
||
terminates++;
|
||
}
|
||
};
|
||
|
||
mediaSource.trigger('sourceclose');
|
||
|
||
_qunit2['default'].equal(terminates, 1, 'called terminate on transmux web worker');
|
||
});
|
||
|
||
_qunit2['default'].test('duration is faked when playing a live stream', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
mediaSource.duration = Infinity;
|
||
mediaSource.nativeMediaSource_.duration = 100;
|
||
_qunit2['default'].equal(mediaSource.nativeMediaSource_.duration, 100, 'native duration was not set to infinity');
|
||
_qunit2['default'].equal(mediaSource.duration, Infinity, 'the MediaSource wrapper pretends it has an infinite duration');
|
||
});
|
||
|
||
_qunit2['default'].test('duration uses the underlying MediaSource\'s duration when not live', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
mediaSource.duration = 100;
|
||
mediaSource.nativeMediaSource_.duration = 120;
|
||
_qunit2['default'].equal(mediaSource.duration, 120, 'the MediaSource wrapper returns the native duration');
|
||
});
|
||
|
||
_qunit2['default'].test('abort on the fake source buffer calls abort on the real ones', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
var messages = [];
|
||
var aborts = 0;
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
sourceBuffer.transmuxer_.postMessage = function (message) {
|
||
messages.push(message);
|
||
};
|
||
sourceBuffer.bufferUpdating_ = true;
|
||
sourceBuffer.videoBuffer_.abort = function () {
|
||
aborts++;
|
||
};
|
||
sourceBuffer.audioBuffer_.abort = function () {
|
||
aborts++;
|
||
};
|
||
|
||
sourceBuffer.abort();
|
||
|
||
_qunit2['default'].equal(aborts, 2, 'called abort on both');
|
||
_qunit2['default'].equal(sourceBuffer.bufferUpdating_, false, 'set updating to false');
|
||
_qunit2['default'].equal(messages.length, 1, 'has one message');
|
||
_qunit2['default'].equal(messages[0].action, 'reset', 'reset called on transmuxer');
|
||
});
|
||
|
||
_qunit2['default'].test('calling remove deletes cues and invokes remove on any extant source buffers', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
var removedCue = [];
|
||
var removes = 0;
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
sourceBuffer.inbandTextTracks_ = {
|
||
CC1: {
|
||
removeCue: function removeCue(cue) {
|
||
removedCue.push(cue);
|
||
this.cues.splice(this.cues.indexOf(cue), 1);
|
||
},
|
||
cues: [{ startTime: 10, endTime: 20, text: 'delete me' }, { startTime: 0, endTime: 2, text: 'save me' }]
|
||
}
|
||
};
|
||
mediaSource.videoBuffer_.remove = function (start, end) {
|
||
if (start === 3 && end === 10) {
|
||
removes++;
|
||
}
|
||
};
|
||
mediaSource.audioBuffer_.remove = function (start, end) {
|
||
if (start === 3 && end === 10) {
|
||
removes++;
|
||
}
|
||
};
|
||
|
||
sourceBuffer.remove(3, 10);
|
||
|
||
_qunit2['default'].equal(removes, 2, 'called remove on both sourceBuffers');
|
||
_qunit2['default'].equal(sourceBuffer.inbandTextTracks_.CC1.cues.length, 1, 'one cue remains after remove');
|
||
_qunit2['default'].equal(removedCue[0].text, 'delete me', 'the cue that overlapped the remove region was removed');
|
||
});
|
||
|
||
_qunit2['default'].test('calling remove property handles absence of cues (null)', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
sourceBuffer.inbandTextTracks_ = {
|
||
CC1: {
|
||
cues: null
|
||
}
|
||
};
|
||
|
||
mediaSource.videoBuffer_.remove = function (start, end) {
|
||
// pass
|
||
};
|
||
mediaSource.audioBuffer_.remove = function (start, end) {
|
||
// pass
|
||
};
|
||
|
||
// this call should not raise an exception
|
||
sourceBuffer.remove(3, 10);
|
||
|
||
_qunit2['default'].equal(sourceBuffer.inbandTextTracks_.CC1.cues, null, 'cues are still null');
|
||
});
|
||
|
||
_qunit2['default'].test('removing doesn\'t happen with audio disabled', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var muxedBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
// creating this audio buffer disables audio in the muxed one
|
||
var audioBuffer = mediaSource.addSourceBuffer('audio/mp2t; codecs="mp4a.40.2"');
|
||
var removedCue = [];
|
||
var removes = 0;
|
||
|
||
initializeNativeSourceBuffers(muxedBuffer);
|
||
|
||
muxedBuffer.inbandTextTracks_ = {
|
||
CC1: {
|
||
removeCue: function removeCue(cue) {
|
||
removedCue.push(cue);
|
||
this.cues.splice(this.cues.indexOf(cue), 1);
|
||
},
|
||
cues: [{ startTime: 10, endTime: 20, text: 'delete me' }, { startTime: 0, endTime: 2, text: 'save me' }]
|
||
}
|
||
};
|
||
mediaSource.videoBuffer_.remove = function (start, end) {
|
||
if (start === 3 && end === 10) {
|
||
removes++;
|
||
}
|
||
};
|
||
mediaSource.audioBuffer_.remove = function (start, end) {
|
||
if (start === 3 && end === 10) {
|
||
removes++;
|
||
}
|
||
};
|
||
|
||
muxedBuffer.remove(3, 10);
|
||
|
||
_qunit2['default'].equal(removes, 1, 'called remove on only one source buffer');
|
||
_qunit2['default'].equal(muxedBuffer.inbandTextTracks_.CC1.cues.length, 1, 'one cue remains after remove');
|
||
_qunit2['default'].equal(removedCue[0].text, 'delete me', 'the cue that overlapped the remove region was removed');
|
||
});
|
||
|
||
_qunit2['default'].test('readyState delegates to the native implementation', function () {
|
||
var mediaSource = new _srcHtmlMediaSource2['default']();
|
||
|
||
_qunit2['default'].equal(mediaSource.readyState, mediaSource.nativeMediaSource_.readyState, 'readyStates are equal');
|
||
|
||
mediaSource.nativeMediaSource_.readyState = 'nonsense stuff';
|
||
_qunit2['default'].equal(mediaSource.readyState, mediaSource.nativeMediaSource_.readyState, 'readyStates are equal');
|
||
});
|
||
|
||
_qunit2['default'].test('addSeekableRange_ throws an error for media with known duration', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
|
||
mediaSource.duration = 100;
|
||
_qunit2['default'].throws(function () {
|
||
mediaSource.addSeekableRange_(0, 100);
|
||
}, 'cannot add seekable range');
|
||
});
|
||
|
||
_qunit2['default'].test('addSeekableRange_ adds to the native MediaSource duration', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
|
||
mediaSource.duration = Infinity;
|
||
mediaSource.addSeekableRange_(120, 240);
|
||
_qunit2['default'].equal(mediaSource.nativeMediaSource_.duration, 240, 'set native duration');
|
||
_qunit2['default'].equal(mediaSource.duration, Infinity, 'emulated duration');
|
||
|
||
mediaSource.addSeekableRange_(120, 220);
|
||
_qunit2['default'].equal(mediaSource.nativeMediaSource_.duration, 240, 'ignored the smaller range');
|
||
_qunit2['default'].equal(mediaSource.duration, Infinity, 'emulated duration');
|
||
});
|
||
|
||
_qunit2['default'].test('appendBuffer error triggers on the player', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
var error = false;
|
||
|
||
mediaSource.player_ = this.player;
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
sourceBuffer.videoBuffer_.appendBuffer = function () {
|
||
throw new Error();
|
||
};
|
||
|
||
this.player.on('error', function () {
|
||
return error = true;
|
||
});
|
||
|
||
// send fake data to the source buffer from the transmuxer to append to native buffer
|
||
// initializeNativeSourceBuffers does the same thing to trigger the creation of
|
||
// native source buffers.
|
||
var fakeTransmuxerMessage = initializeNativeSourceBuffers;
|
||
|
||
fakeTransmuxerMessage(sourceBuffer);
|
||
|
||
this.clock.tick(1);
|
||
|
||
_qunit2['default'].ok(error, 'error triggered on player');
|
||
});
|
||
|
||
_qunit2['default'].test('transmuxes mp2t segments', function () {
|
||
var mp2tSegments = [];
|
||
var mp4Segments = [];
|
||
var data = new Uint8Array(1);
|
||
var mediaSource = undefined;
|
||
var sourceBuffer = undefined;
|
||
|
||
mediaSource = new _videoJs2['default'].MediaSource();
|
||
sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
sourceBuffer.transmuxer_.postMessage = function (segment) {
|
||
if (segment.action === 'push') {
|
||
var buffer = new Uint8Array(segment.data, segment.byteOffset, segment.byteLength);
|
||
|
||
mp2tSegments.push(buffer);
|
||
}
|
||
};
|
||
|
||
sourceBuffer.concatAndAppendSegments_ = function (segmentObj, destinationBuffer) {
|
||
mp4Segments.push(segmentObj);
|
||
};
|
||
|
||
sourceBuffer.appendBuffer(data);
|
||
_qunit2['default'].equal(mp2tSegments.length, 1, 'transmuxed one segment');
|
||
_qunit2['default'].equal(mp2tSegments[0].length, 1, 'did not alter the segment');
|
||
_qunit2['default'].equal(mp2tSegments[0][0], data[0], 'did not alter the segment');
|
||
|
||
// an init segment
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('video', new Uint8Array(1)));
|
||
|
||
// a media segment
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('audio', new Uint8Array(1)));
|
||
|
||
// Segments are concatenated
|
||
_qunit2['default'].equal(mp4Segments.length, 0, 'segments are not appended until after the `done` message');
|
||
|
||
// send `done` message
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
|
||
// Segments are concatenated
|
||
_qunit2['default'].equal(mp4Segments.length, 2, 'appended the segments');
|
||
});
|
||
|
||
_qunit2['default'].test('handles typed-arrays that are subsets of their underlying buffer', function () {
|
||
var mp2tSegments = [];
|
||
var mp4Segments = [];
|
||
var dataBuffer = new Uint8Array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
|
||
var data = dataBuffer.subarray(5, 7);
|
||
var mediaSource = undefined;
|
||
var sourceBuffer = undefined;
|
||
|
||
mediaSource = new _videoJs2['default'].MediaSource();
|
||
sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
sourceBuffer.transmuxer_.postMessage = function (segment) {
|
||
if (segment.action === 'push') {
|
||
var buffer = new Uint8Array(segment.data, segment.byteOffset, segment.byteLength);
|
||
|
||
mp2tSegments.push(buffer);
|
||
}
|
||
};
|
||
|
||
sourceBuffer.concatAndAppendSegments_ = function (segmentObj, destinationBuffer) {
|
||
mp4Segments.push(segmentObj.segments[0]);
|
||
};
|
||
|
||
sourceBuffer.appendBuffer(data);
|
||
|
||
_qunit2['default'].equal(mp2tSegments.length, 1, 'emitted the fragment');
|
||
_qunit2['default'].equal(mp2tSegments[0].length, 2, 'correctly handled a typed-array that is a subset');
|
||
_qunit2['default'].equal(mp2tSegments[0][0], 5, 'fragment contains the correct first byte');
|
||
_qunit2['default'].equal(mp2tSegments[0][1], 6, 'fragment contains the correct second byte');
|
||
|
||
// an init segment
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('video', data));
|
||
|
||
// Segments are concatenated
|
||
_qunit2['default'].equal(mp4Segments.length, 0, 'segments are not appended until after the `done` message');
|
||
|
||
// send `done` message
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
|
||
// Segments are concatenated
|
||
_qunit2['default'].equal(mp4Segments.length, 1, 'emitted the fragment');
|
||
_qunit2['default'].equal(mp4Segments[0].length, 2, 'correctly handled a typed-array that is a subset');
|
||
_qunit2['default'].equal(mp4Segments[0][0], 5, 'fragment contains the correct first byte');
|
||
_qunit2['default'].equal(mp4Segments[0][1], 6, 'fragment contains the correct second byte');
|
||
});
|
||
|
||
_qunit2['default'].test('only appends audio init segment for first segment or on audio/media changes', function () {
|
||
var mp4Segments = [];
|
||
var initBuffer = new Uint8Array([0, 1]);
|
||
var dataBuffer = new Uint8Array([2, 3]);
|
||
var mediaSource = undefined;
|
||
var sourceBuffer = undefined;
|
||
|
||
mediaSource = new _videoJs2['default'].MediaSource();
|
||
sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
sourceBuffer.audioDisabled_ = false;
|
||
mediaSource.player_ = this.player;
|
||
mediaSource.url_ = this.url;
|
||
mediaSource.trigger('sourceopen');
|
||
|
||
sourceBuffer.concatAndAppendSegments_ = function (segmentObj, destinationBuffer) {
|
||
var segment = segmentObj.segments.reduce(function (seg, arr) {
|
||
return seg.concat(Array.from(arr));
|
||
}, []);
|
||
|
||
mp4Segments.push(segment);
|
||
};
|
||
|
||
_qunit2['default'].ok(sourceBuffer.appendAudioInitSegment_, 'will append init segment next');
|
||
|
||
// an init segment
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('audio', dataBuffer, {
|
||
initSegment: {
|
||
data: initBuffer.buffer,
|
||
byteOffset: initBuffer.byteOffset,
|
||
byteLength: initBuffer.byteLength
|
||
}
|
||
}));
|
||
|
||
// Segments are concatenated
|
||
_qunit2['default'].equal(mp4Segments.length, 0, 'segments are not appended until after the `done` message');
|
||
|
||
// send `done` message
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
|
||
// Segments are concatenated
|
||
_qunit2['default'].equal(mp4Segments.length, 1, 'emitted the fragment');
|
||
// Contains init segment on first segment
|
||
_qunit2['default'].equal(mp4Segments[0][0], 0, 'fragment contains the correct first byte');
|
||
_qunit2['default'].equal(mp4Segments[0][1], 1, 'fragment contains the correct second byte');
|
||
_qunit2['default'].equal(mp4Segments[0][2], 2, 'fragment contains the correct third byte');
|
||
_qunit2['default'].equal(mp4Segments[0][3], 3, 'fragment contains the correct fourth byte');
|
||
_qunit2['default'].ok(!sourceBuffer.appendAudioInitSegment_, 'will not append init segment next');
|
||
|
||
dataBuffer = new Uint8Array([4, 5]);
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('audio', dataBuffer, {
|
||
initSegment: {
|
||
data: initBuffer.buffer,
|
||
byteOffset: initBuffer.byteOffset,
|
||
byteLength: initBuffer.byteLength
|
||
}
|
||
}));
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
_qunit2['default'].equal(mp4Segments.length, 2, 'emitted the fragment');
|
||
// does not contain init segment on next segment
|
||
_qunit2['default'].equal(mp4Segments[1][0], 4, 'fragment contains the correct first byte');
|
||
_qunit2['default'].equal(mp4Segments[1][1], 5, 'fragment contains the correct second byte');
|
||
|
||
// audio track change
|
||
this.player.audioTracks().trigger('change');
|
||
sourceBuffer.audioDisabled_ = false;
|
||
_qunit2['default'].ok(sourceBuffer.appendAudioInitSegment_, 'audio change sets appendAudioInitSegment_');
|
||
dataBuffer = new Uint8Array([6, 7]);
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('audio', dataBuffer, {
|
||
initSegment: {
|
||
data: initBuffer.buffer,
|
||
byteOffset: initBuffer.byteOffset,
|
||
byteLength: initBuffer.byteLength
|
||
}
|
||
}));
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
_qunit2['default'].equal(mp4Segments.length, 3, 'emitted the fragment');
|
||
// contains init segment after audio track change
|
||
_qunit2['default'].equal(mp4Segments[2][0], 0, 'fragment contains the correct first byte');
|
||
_qunit2['default'].equal(mp4Segments[2][1], 1, 'fragment contains the correct second byte');
|
||
_qunit2['default'].equal(mp4Segments[2][2], 6, 'fragment contains the correct third byte');
|
||
_qunit2['default'].equal(mp4Segments[2][3], 7, 'fragment contains the correct fourth byte');
|
||
_qunit2['default'].ok(!sourceBuffer.appendAudioInitSegment_, 'will not append init segment next');
|
||
|
||
dataBuffer = new Uint8Array([8, 9]);
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('audio', dataBuffer, {
|
||
initSegment: {
|
||
data: initBuffer.buffer,
|
||
byteOffset: initBuffer.byteOffset,
|
||
byteLength: initBuffer.byteLength
|
||
}
|
||
}));
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
_qunit2['default'].equal(mp4Segments.length, 4, 'emitted the fragment');
|
||
// does not contain init segment in next segment
|
||
_qunit2['default'].equal(mp4Segments[3][0], 8, 'fragment contains the correct first byte');
|
||
_qunit2['default'].equal(mp4Segments[3][1], 9, 'fragment contains the correct second byte');
|
||
_qunit2['default'].ok(!sourceBuffer.appendAudioInitSegment_, 'will not append init segment next');
|
||
|
||
// rendition switch
|
||
this.player.trigger('mediachange');
|
||
_qunit2['default'].ok(sourceBuffer.appendAudioInitSegment_, 'media change sets appendAudioInitSegment_');
|
||
dataBuffer = new Uint8Array([10, 11]);
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('audio', dataBuffer, {
|
||
initSegment: {
|
||
data: initBuffer.buffer,
|
||
byteOffset: initBuffer.byteOffset,
|
||
byteLength: initBuffer.byteLength
|
||
}
|
||
}));
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
_qunit2['default'].equal(mp4Segments.length, 5, 'emitted the fragment');
|
||
// contains init segment after audio track change
|
||
_qunit2['default'].equal(mp4Segments[4][0], 0, 'fragment contains the correct first byte');
|
||
_qunit2['default'].equal(mp4Segments[4][1], 1, 'fragment contains the correct second byte');
|
||
_qunit2['default'].equal(mp4Segments[4][2], 10, 'fragment contains the correct third byte');
|
||
_qunit2['default'].equal(mp4Segments[4][3], 11, 'fragment contains the correct fourth byte');
|
||
_qunit2['default'].ok(!sourceBuffer.appendAudioInitSegment_, 'will not append init segment next');
|
||
});
|
||
|
||
_qunit2['default'].test('appends video init segment for every segment', function () {
|
||
var mp4Segments = [];
|
||
var initBuffer = new Uint8Array([0, 1]);
|
||
var dataBuffer = new Uint8Array([2, 3]);
|
||
var mediaSource = undefined;
|
||
var sourceBuffer = undefined;
|
||
|
||
mediaSource = new _videoJs2['default'].MediaSource();
|
||
sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
mediaSource.player_ = this.player;
|
||
mediaSource.url_ = this.url;
|
||
mediaSource.trigger('sourceopen');
|
||
|
||
sourceBuffer.concatAndAppendSegments_ = function (segmentObj, destinationBuffer) {
|
||
var segment = segmentObj.segments.reduce(function (seg, arr) {
|
||
return seg.concat(Array.from(arr));
|
||
}, []);
|
||
|
||
mp4Segments.push(segment);
|
||
};
|
||
|
||
// an init segment
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('video', dataBuffer, {
|
||
initSegment: {
|
||
data: initBuffer.buffer,
|
||
byteOffset: initBuffer.byteOffset,
|
||
byteLength: initBuffer.byteLength
|
||
}
|
||
}));
|
||
|
||
// Segments are concatenated
|
||
_qunit2['default'].equal(mp4Segments.length, 0, 'segments are not appended until after the `done` message');
|
||
|
||
// send `done` message
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
|
||
// Segments are concatenated
|
||
_qunit2['default'].equal(mp4Segments.length, 1, 'emitted the fragment');
|
||
// Contains init segment on first segment
|
||
_qunit2['default'].equal(mp4Segments[0][0], 0, 'fragment contains the correct first byte');
|
||
_qunit2['default'].equal(mp4Segments[0][1], 1, 'fragment contains the correct second byte');
|
||
_qunit2['default'].equal(mp4Segments[0][2], 2, 'fragment contains the correct third byte');
|
||
_qunit2['default'].equal(mp4Segments[0][3], 3, 'fragment contains the correct fourth byte');
|
||
|
||
dataBuffer = new Uint8Array([4, 5]);
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('video', dataBuffer, {
|
||
initSegment: {
|
||
data: initBuffer.buffer,
|
||
byteOffset: initBuffer.byteOffset,
|
||
byteLength: initBuffer.byteLength
|
||
}
|
||
}));
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
_qunit2['default'].equal(mp4Segments.length, 2, 'emitted the fragment');
|
||
_qunit2['default'].equal(mp4Segments[1][0], 0, 'fragment contains the correct first byte');
|
||
_qunit2['default'].equal(mp4Segments[1][1], 1, 'fragment contains the correct second byte');
|
||
_qunit2['default'].equal(mp4Segments[1][2], 4, 'fragment contains the correct third byte');
|
||
_qunit2['default'].equal(mp4Segments[1][3], 5, 'fragment contains the correct fourth byte');
|
||
|
||
dataBuffer = new Uint8Array([6, 7]);
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('video', dataBuffer, {
|
||
initSegment: {
|
||
data: initBuffer.buffer,
|
||
byteOffset: initBuffer.byteOffset,
|
||
byteLength: initBuffer.byteLength
|
||
}
|
||
}));
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
_qunit2['default'].equal(mp4Segments.length, 3, 'emitted the fragment');
|
||
// contains init segment after audio track change
|
||
_qunit2['default'].equal(mp4Segments[2][0], 0, 'fragment contains the correct first byte');
|
||
_qunit2['default'].equal(mp4Segments[2][1], 1, 'fragment contains the correct second byte');
|
||
_qunit2['default'].equal(mp4Segments[2][2], 6, 'fragment contains the correct third byte');
|
||
_qunit2['default'].equal(mp4Segments[2][3], 7, 'fragment contains the correct fourth byte');
|
||
});
|
||
|
||
_qunit2['default'].test('handles empty codec string value', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t; codecs=""');
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
_qunit2['default'].ok(mediaSource.videoBuffer_, 'created a video buffer');
|
||
_qunit2['default'].equal(mediaSource.videoBuffer_.type, 'video/mp4;codecs="avc1.4d400d"', 'video buffer has the default codec');
|
||
|
||
_qunit2['default'].ok(mediaSource.audioBuffer_, 'created an audio buffer');
|
||
_qunit2['default'].equal(mediaSource.audioBuffer_.type, 'audio/mp4;codecs="mp4a.40.2"', 'audio buffer has the default codec');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers.length, 1, 'created one virtual buffer');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers[0], sourceBuffer, 'returned the virtual buffer');
|
||
});
|
||
|
||
_qunit2['default'].test('can create an audio buffer by itself', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t; codecs="mp4a.40.2"');
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
_qunit2['default'].ok(!mediaSource.videoBuffer_, 'did not create a video buffer');
|
||
_qunit2['default'].ok(mediaSource.audioBuffer_, 'created an audio buffer');
|
||
_qunit2['default'].equal(mediaSource.audioBuffer_.type, 'audio/mp4;codecs="mp4a.40.2"', 'audio buffer has the default codec');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers.length, 1, 'created one virtual buffer');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers[0], sourceBuffer, 'returned the virtual buffer');
|
||
});
|
||
|
||
_qunit2['default'].test('can create an video buffer by itself', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t; codecs="avc1.4d400d"');
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
_qunit2['default'].ok(!mediaSource.audioBuffer_, 'did not create an audio buffer');
|
||
_qunit2['default'].ok(mediaSource.videoBuffer_, 'created an video buffer');
|
||
_qunit2['default'].equal(mediaSource.videoBuffer_.type, 'video/mp4;codecs="avc1.4d400d"', 'video buffer has the codec that was passed');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers.length, 1, 'created one virtual buffer');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers[0], sourceBuffer, 'returned the virtual buffer');
|
||
});
|
||
|
||
_qunit2['default'].test('handles invalid codec string', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t; codecs="nope"');
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
_qunit2['default'].ok(mediaSource.videoBuffer_, 'created a video buffer');
|
||
_qunit2['default'].equal(mediaSource.videoBuffer_.type, 'video/mp4;codecs="avc1.4d400d"', 'video buffer has the default codec');
|
||
|
||
_qunit2['default'].ok(mediaSource.audioBuffer_, 'created an audio buffer');
|
||
_qunit2['default'].equal(mediaSource.audioBuffer_.type, 'audio/mp4;codecs="mp4a.40.2"', 'audio buffer has the default codec');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers.length, 1, 'created one virtual buffer');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers[0], sourceBuffer, 'returned the virtual buffer');
|
||
});
|
||
|
||
_qunit2['default'].test('handles codec strings in reverse order', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t; codecs="mp4a.40.5,avc1.64001f"');
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
_qunit2['default'].ok(mediaSource.videoBuffer_, 'created a video buffer');
|
||
|
||
_qunit2['default'].equal(mediaSource.videoBuffer_.type, 'video/mp4;codecs="avc1.64001f"', 'video buffer has the passed codec');
|
||
|
||
_qunit2['default'].ok(mediaSource.audioBuffer_, 'created an audio buffer');
|
||
_qunit2['default'].equal(mediaSource.audioBuffer_.type, 'audio/mp4;codecs="mp4a.40.5"', 'audio buffer has the passed codec');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers.length, 1, 'created one virtual buffer');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers[0], sourceBuffer, 'returned the virtual buffer');
|
||
_qunit2['default'].ok(sourceBuffer.transmuxer_, 'created a transmuxer');
|
||
});
|
||
|
||
_qunit2['default'].test('forwards codec strings to native buffers when specified', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t; codecs="avc1.64001f,mp4a.40.5"');
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
_qunit2['default'].ok(mediaSource.videoBuffer_, 'created a video buffer');
|
||
_qunit2['default'].equal(mediaSource.videoBuffer_.type, 'video/mp4;codecs="avc1.64001f"', 'passed the video codec along');
|
||
|
||
_qunit2['default'].ok(mediaSource.audioBuffer_, 'created a video buffer');
|
||
_qunit2['default'].equal(mediaSource.audioBuffer_.type, 'audio/mp4;codecs="mp4a.40.5"', 'passed the audio codec along');
|
||
});
|
||
|
||
_qunit2['default'].test('parses old-school apple codec strings to the modern standard', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t; codecs="avc1.100.31,mp4a.40.5"');
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
_qunit2['default'].ok(mediaSource.videoBuffer_, 'created a video buffer');
|
||
_qunit2['default'].equal(mediaSource.videoBuffer_.type, 'video/mp4;codecs="avc1.64001f"', 'passed the video codec along');
|
||
|
||
_qunit2['default'].ok(mediaSource.audioBuffer_, 'created a video buffer');
|
||
_qunit2['default'].equal(mediaSource.audioBuffer_.type, 'audio/mp4;codecs="mp4a.40.5"', 'passed the audio codec along');
|
||
});
|
||
|
||
_qunit2['default'].test('specifies reasonable codecs if none are specified', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
_qunit2['default'].ok(mediaSource.videoBuffer_, 'created a video buffer');
|
||
_qunit2['default'].equal(mediaSource.videoBuffer_.type, 'video/mp4;codecs="avc1.4d400d"', 'passed the video codec along');
|
||
|
||
_qunit2['default'].ok(mediaSource.audioBuffer_, 'created a video buffer');
|
||
_qunit2['default'].equal(mediaSource.audioBuffer_.type, 'audio/mp4;codecs="mp4a.40.2"', 'passed the audio codec along');
|
||
});
|
||
|
||
_qunit2['default'].test('virtual buffers are updating if either native buffer is', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
mediaSource.videoBuffer_.updating = true;
|
||
mediaSource.audioBuffer_.updating = false;
|
||
_qunit2['default'].equal(sourceBuffer.updating, true, 'virtual buffer is updating');
|
||
|
||
mediaSource.audioBuffer_.updating = true;
|
||
_qunit2['default'].equal(sourceBuffer.updating, true, 'virtual buffer is updating');
|
||
|
||
mediaSource.videoBuffer_.updating = false;
|
||
_qunit2['default'].equal(sourceBuffer.updating, true, 'virtual buffer is updating');
|
||
|
||
mediaSource.audioBuffer_.updating = false;
|
||
_qunit2['default'].equal(sourceBuffer.updating, false, 'virtual buffer is not updating');
|
||
});
|
||
|
||
_qunit2['default'].test('virtual buffers have a position buffered if both native buffers do', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
mediaSource.videoBuffer_.buffered = _videoJs2['default'].createTimeRanges([[0, 10], [20, 30]]);
|
||
mediaSource.audioBuffer_.buffered = _videoJs2['default'].createTimeRanges([[0, 7], [11, 15], [16, 40]]);
|
||
|
||
_qunit2['default'].equal(sourceBuffer.buffered.length, 2, 'two buffered ranges');
|
||
_qunit2['default'].equal(sourceBuffer.buffered.start(0), 0, 'first starts at zero');
|
||
_qunit2['default'].equal(sourceBuffer.buffered.end(0), 7, 'first ends at seven');
|
||
_qunit2['default'].equal(sourceBuffer.buffered.start(1), 20, 'second starts at twenty');
|
||
_qunit2['default'].equal(sourceBuffer.buffered.end(1), 30, 'second ends at 30');
|
||
});
|
||
|
||
_qunit2['default'].test('disabled audio does not affect buffered property', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var muxedBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
// creating a separate audio buffer disables audio on the muxed one
|
||
var audioBuffer = mediaSource.addSourceBuffer('audio/mp2t; codecs="mp4a.40.2"');
|
||
|
||
initializeNativeSourceBuffers(muxedBuffer);
|
||
|
||
mediaSource.videoBuffer_.buffered = _videoJs2['default'].createTimeRanges([[1, 10]]);
|
||
mediaSource.audioBuffer_.buffered = _videoJs2['default'].createTimeRanges([[2, 11]]);
|
||
|
||
_qunit2['default'].equal(audioBuffer.buffered.length, 1, 'one buffered range');
|
||
_qunit2['default'].equal(audioBuffer.buffered.start(0), 2, 'starts at two');
|
||
_qunit2['default'].equal(audioBuffer.buffered.end(0), 11, 'ends at eleven');
|
||
_qunit2['default'].equal(muxedBuffer.buffered.length, 1, 'one buffered range');
|
||
_qunit2['default'].equal(muxedBuffer.buffered.start(0), 1, 'starts at one');
|
||
_qunit2['default'].equal(muxedBuffer.buffered.end(0), 10, 'ends at ten');
|
||
});
|
||
|
||
_qunit2['default'].test('sets transmuxer baseMediaDecodeTime on appends', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
var resets = [];
|
||
|
||
sourceBuffer.transmuxer_.postMessage = function (message) {
|
||
if (message.action === 'setTimestampOffset') {
|
||
resets.push(message.timestampOffset);
|
||
}
|
||
};
|
||
|
||
sourceBuffer.timestampOffset = 42;
|
||
|
||
_qunit2['default'].equal(resets.length, 1, 'reset called');
|
||
_qunit2['default'].equal(resets[0], 42, 'set the baseMediaDecodeTime based on timestampOffset');
|
||
});
|
||
|
||
_qunit2['default'].test('aggregates source buffer update events', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
var updates = 0;
|
||
var updateends = 0;
|
||
var updatestarts = 0;
|
||
|
||
initializeNativeSourceBuffers(sourceBuffer);
|
||
|
||
mediaSource.player_ = this.player;
|
||
|
||
sourceBuffer.addEventListener('updatestart', function () {
|
||
updatestarts++;
|
||
});
|
||
sourceBuffer.addEventListener('update', function () {
|
||
updates++;
|
||
});
|
||
sourceBuffer.addEventListener('updateend', function () {
|
||
updateends++;
|
||
});
|
||
|
||
_qunit2['default'].equal(updatestarts, 0, 'no updatestarts before a `done` message is received');
|
||
_qunit2['default'].equal(updates, 0, 'no updates before a `done` message is received');
|
||
_qunit2['default'].equal(updateends, 0, 'no updateends before a `done` message is received');
|
||
|
||
// the video buffer begins updating first:
|
||
sourceBuffer.videoBuffer_.updating = true;
|
||
sourceBuffer.audioBuffer_.updating = false;
|
||
sourceBuffer.videoBuffer_.trigger('updatestart');
|
||
_qunit2['default'].equal(updatestarts, 1, 'aggregated updatestart');
|
||
sourceBuffer.audioBuffer_.updating = true;
|
||
sourceBuffer.audioBuffer_.trigger('updatestart');
|
||
_qunit2['default'].equal(updatestarts, 1, 'aggregated updatestart');
|
||
|
||
// the audio buffer finishes first:
|
||
sourceBuffer.audioBuffer_.updating = false;
|
||
sourceBuffer.videoBuffer_.updating = true;
|
||
sourceBuffer.audioBuffer_.trigger('update');
|
||
_qunit2['default'].equal(updates, 0, 'waited for the second update');
|
||
sourceBuffer.videoBuffer_.updating = false;
|
||
sourceBuffer.videoBuffer_.trigger('update');
|
||
_qunit2['default'].equal(updates, 1, 'aggregated update');
|
||
|
||
// audio finishes first:
|
||
sourceBuffer.videoBuffer_.updating = true;
|
||
sourceBuffer.audioBuffer_.updating = false;
|
||
sourceBuffer.audioBuffer_.trigger('updateend');
|
||
_qunit2['default'].equal(updateends, 0, 'waited for the second updateend');
|
||
sourceBuffer.videoBuffer_.updating = false;
|
||
sourceBuffer.videoBuffer_.trigger('updateend');
|
||
_qunit2['default'].equal(updateends, 1, 'aggregated updateend');
|
||
});
|
||
|
||
_qunit2['default'].test('translates caption events into WebVTT cues', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
var types = [];
|
||
var hls608 = 0;
|
||
|
||
mediaSource.player_ = {
|
||
addRemoteTextTrack: function addRemoteTextTrack(options) {
|
||
types.push(options.kind);
|
||
return {
|
||
track: {
|
||
kind: options.kind,
|
||
label: options.label,
|
||
cues: [],
|
||
addCue: function addCue(cue) {
|
||
this.cues.push(cue);
|
||
}
|
||
}
|
||
};
|
||
},
|
||
textTracks: function textTracks() {
|
||
return {
|
||
getTrackById: function getTrackById() {}
|
||
};
|
||
},
|
||
remoteTextTracks: function remoteTextTracks() {},
|
||
tech_: new _videoJs2['default'].EventTarget()
|
||
};
|
||
mediaSource.player_.tech_.on('usage', function (event) {
|
||
if (event.name === 'hls-608') {
|
||
hls608++;
|
||
}
|
||
});
|
||
sourceBuffer.timestampOffset = 10;
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('video', new Uint8Array(1), {
|
||
captions: [{
|
||
startTime: 1,
|
||
endTime: 3,
|
||
text: 'This is an in-band caption in CC1',
|
||
stream: 'CC1'
|
||
}],
|
||
captionStreams: { CC1: true }
|
||
}));
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
var cues = sourceBuffer.inbandTextTracks_.CC1.cues;
|
||
|
||
_qunit2['default'].equal(hls608, 1, 'one hls-608 event was triggered');
|
||
_qunit2['default'].equal(types.length, 1, 'created one text track');
|
||
_qunit2['default'].equal(types[0], 'captions', 'the type was captions');
|
||
_qunit2['default'].equal(cues.length, 1, 'created one cue');
|
||
_qunit2['default'].equal(cues[0].text, 'This is an in-band caption in CC1', 'included the text');
|
||
_qunit2['default'].equal(cues[0].startTime, 11, 'started at eleven');
|
||
_qunit2['default'].equal(cues[0].endTime, 13, 'ended at thirteen');
|
||
});
|
||
|
||
_qunit2['default'].test('captions use existing tracks with id equal to CC#', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
var addTrackCalled = 0;
|
||
var tracks = {
|
||
CC1: {
|
||
kind: 'captions',
|
||
label: 'CC1',
|
||
id: 'CC1',
|
||
cues: [],
|
||
addCue: function addCue(cue) {
|
||
this.cues.push(cue);
|
||
}
|
||
},
|
||
CC2: {
|
||
kind: 'captions',
|
||
label: 'CC2',
|
||
id: 'CC2',
|
||
cues: [],
|
||
addCue: function addCue(cue) {
|
||
this.cues.push(cue);
|
||
}
|
||
}
|
||
};
|
||
|
||
mediaSource.player_ = {
|
||
addRemoteTextTrack: function addRemoteTextTrack(options) {
|
||
addTrackCalled++;
|
||
},
|
||
textTracks: function textTracks() {
|
||
return {
|
||
getTrackById: function getTrackById(id) {
|
||
return tracks[id];
|
||
}
|
||
};
|
||
},
|
||
remoteTextTracks: function remoteTextTracks() {},
|
||
tech_: new _videoJs2['default'].EventTarget()
|
||
};
|
||
sourceBuffer.timestampOffset = 10;
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('video', new Uint8Array(1), {
|
||
captions: [{
|
||
stream: 'CC1',
|
||
startTime: 1,
|
||
endTime: 3,
|
||
text: 'This is an in-band caption in CC1'
|
||
}, {
|
||
stream: 'CC2',
|
||
startTime: 1,
|
||
endTime: 3,
|
||
text: 'This is an in-band caption in CC2'
|
||
}],
|
||
captionStreams: { CC1: true, CC2: true }
|
||
}));
|
||
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
var cues = sourceBuffer.inbandTextTracks_.CC1.cues;
|
||
|
||
_qunit2['default'].equal(addTrackCalled, 0, 'no tracks were created');
|
||
_qunit2['default'].equal(tracks.CC1.cues.length, 1, 'CC1 contains 1 cue');
|
||
_qunit2['default'].equal(tracks.CC2.cues.length, 1, 'CC2 contains 1 cue');
|
||
|
||
_qunit2['default'].equal(tracks.CC1.cues[0].text, 'This is an in-band caption in CC1', 'CC1 contains the right cue');
|
||
_qunit2['default'].equal(tracks.CC2.cues[0].text, 'This is an in-band caption in CC2', 'CC2 contains the right cue');
|
||
});
|
||
|
||
_qunit2['default'].test('translates metadata events into WebVTT cues', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
|
||
mediaSource.duration = Infinity;
|
||
mediaSource.nativeMediaSource_.duration = 60;
|
||
|
||
var types = [];
|
||
var metadata = [{
|
||
cueTime: 2,
|
||
frames: [{
|
||
url: 'This is a url tag'
|
||
}, {
|
||
value: 'This is a text tag'
|
||
}]
|
||
}, {
|
||
cueTime: 12,
|
||
frames: [{
|
||
data: 'This is a priv tag'
|
||
}]
|
||
}];
|
||
|
||
metadata.dispatchType = 0x10;
|
||
mediaSource.player_ = {
|
||
addRemoteTextTrack: function addRemoteTextTrack(options) {
|
||
types.push(options.kind);
|
||
return {
|
||
track: {
|
||
kind: options.kind,
|
||
label: options.label,
|
||
cues: [],
|
||
addCue: function addCue(cue) {
|
||
this.cues.push(cue);
|
||
}
|
||
}
|
||
};
|
||
},
|
||
remoteTextTracks: function remoteTextTracks() {}
|
||
};
|
||
sourceBuffer.timestampOffset = 10;
|
||
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('video', new Uint8Array(1), {
|
||
metadata: metadata
|
||
}));
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
|
||
_qunit2['default'].equal(sourceBuffer.metadataTrack_.inBandMetadataTrackDispatchType, 16, 'in-band metadata track dispatch type correctly set');
|
||
var cues = sourceBuffer.metadataTrack_.cues;
|
||
|
||
_qunit2['default'].equal(types.length, 1, 'created one text track');
|
||
_qunit2['default'].equal(types[0], 'metadata', 'the type was metadata');
|
||
_qunit2['default'].equal(cues.length, 3, 'created three cues');
|
||
_qunit2['default'].equal(cues[0].text, 'This is a url tag', 'included the text');
|
||
_qunit2['default'].equal(cues[0].startTime, 12, 'started at twelve');
|
||
_qunit2['default'].equal(cues[0].endTime, 22, 'ended at StartTime of next cue(22)');
|
||
_qunit2['default'].equal(cues[1].text, 'This is a text tag', 'included the text');
|
||
_qunit2['default'].equal(cues[1].startTime, 12, 'started at twelve');
|
||
_qunit2['default'].equal(cues[1].endTime, 22, 'ended at the startTime of next cue(22)');
|
||
_qunit2['default'].equal(cues[2].text, 'This is a priv tag', 'included the text');
|
||
_qunit2['default'].equal(cues[2].startTime, 22, 'started at twenty two');
|
||
_qunit2['default'].equal(cues[2].endTime, Number.MAX_VALUE, 'ended at the maximum value');
|
||
mediaSource.duration = 100;
|
||
mediaSource.trigger('sourceended');
|
||
_qunit2['default'].equal(cues[2].endTime, mediaSource.duration, 'sourceended is fired');
|
||
});
|
||
|
||
_qunit2['default'].test('does not wrap mp4 source buffers', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
|
||
mediaSource.addSourceBuffer('video/mp4;codecs=avc1.4d400d');
|
||
mediaSource.addSourceBuffer('audio/mp4;codecs=mp4a.40.2');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers.length, mediaSource.nativeMediaSource_.sourceBuffers.length, 'did not need virtual buffers');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers.length, 2, 'created native buffers');
|
||
});
|
||
|
||
_qunit2['default'].test('can get activeSourceBuffers', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
|
||
// although activeSourceBuffers should technically be a SourceBufferList, we are
|
||
// returning it as an array, and users may expect it to behave as such
|
||
_qunit2['default'].ok(Array.isArray(mediaSource.activeSourceBuffers));
|
||
});
|
||
|
||
_qunit2['default'].test('active source buffers are updated on each buffer\'s updateend', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var updateCallCount = 0;
|
||
var sourceBuffer = undefined;
|
||
|
||
mediaSource.updateActiveSourceBuffers_ = function () {
|
||
updateCallCount++;
|
||
};
|
||
|
||
sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
mediaSource.player_ = this.player;
|
||
mediaSource.url_ = this.url;
|
||
mediaSource.trigger('sourceopen');
|
||
_qunit2['default'].equal(updateCallCount, 0, 'active source buffers not updated on adding source buffer');
|
||
|
||
mediaSource.player_.audioTracks().trigger('addtrack');
|
||
_qunit2['default'].equal(updateCallCount, 1, 'active source buffers updated after addtrack');
|
||
|
||
sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
_qunit2['default'].equal(updateCallCount, 1, 'active source buffers not updated on adding second source buffer');
|
||
|
||
mediaSource.player_.audioTracks().trigger('removetrack');
|
||
_qunit2['default'].equal(updateCallCount, 2, 'active source buffers updated after removetrack');
|
||
|
||
mediaSource.player_.audioTracks().trigger('change');
|
||
_qunit2['default'].equal(updateCallCount, 3, 'active source buffers updated after change');
|
||
});
|
||
|
||
_qunit2['default'].test('combined buffer is the only active buffer when main track enabled', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBufferAudio = undefined;
|
||
var sourceBufferCombined = undefined;
|
||
var audioTracks = [{
|
||
enabled: true,
|
||
kind: 'main',
|
||
label: 'main'
|
||
}, {
|
||
enabled: false,
|
||
kind: 'alternative',
|
||
label: 'English (UK)'
|
||
}];
|
||
|
||
this.player.audioTracks = function () {
|
||
return audioTracks;
|
||
};
|
||
|
||
mediaSource.player_ = this.player;
|
||
|
||
sourceBufferCombined = mediaSource.addSourceBuffer('video/m2pt');
|
||
sourceBufferCombined.videoCodec_ = true;
|
||
sourceBufferCombined.audioCodec_ = true;
|
||
sourceBufferAudio = mediaSource.addSourceBuffer('video/m2pt');
|
||
sourceBufferAudio.videoCodec_ = false;
|
||
sourceBufferAudio.audioCodec_ = true;
|
||
|
||
mediaSource.updateActiveSourceBuffers_();
|
||
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers.length, 1, 'active source buffers starts with one source buffer');
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers[0], sourceBufferCombined, 'active source buffers starts with combined source buffer');
|
||
});
|
||
|
||
_qunit2['default'].test('combined & audio buffers are active when alternative track enabled', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBufferAudio = undefined;
|
||
var sourceBufferCombined = undefined;
|
||
var audioTracks = [{
|
||
enabled: false,
|
||
kind: 'main',
|
||
label: 'main'
|
||
}, {
|
||
enabled: true,
|
||
kind: 'alternative',
|
||
label: 'English (UK)'
|
||
}];
|
||
|
||
this.player.audioTracks = function () {
|
||
return audioTracks;
|
||
};
|
||
|
||
mediaSource.player_ = this.player;
|
||
|
||
sourceBufferCombined = mediaSource.addSourceBuffer('video/m2pt');
|
||
sourceBufferCombined.videoCodec_ = true;
|
||
sourceBufferCombined.audioCodec_ = true;
|
||
sourceBufferAudio = mediaSource.addSourceBuffer('video/m2pt');
|
||
sourceBufferAudio.videoCodec_ = false;
|
||
sourceBufferAudio.audioCodec_ = true;
|
||
|
||
mediaSource.updateActiveSourceBuffers_();
|
||
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers.length, 2, 'active source buffers includes both source buffers');
|
||
// maintains same order as source buffers were created
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers[0], sourceBufferCombined, 'active source buffers starts with combined source buffer');
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers[1], sourceBufferAudio, 'active source buffers ends with audio source buffer');
|
||
});
|
||
|
||
_qunit2['default'].test('video only & audio only buffers are always active', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBufferAudio = undefined;
|
||
var sourceBufferCombined = undefined;
|
||
var audioTracks = [{
|
||
enabled: false,
|
||
kind: 'main',
|
||
label: 'main'
|
||
}, {
|
||
enabled: true,
|
||
kind: 'alternative',
|
||
label: 'English (UK)'
|
||
}];
|
||
|
||
this.player.audioTracks = function () {
|
||
return audioTracks;
|
||
};
|
||
|
||
mediaSource.player_ = this.player;
|
||
|
||
sourceBufferCombined = mediaSource.addSourceBuffer('video/m2pt');
|
||
sourceBufferCombined.videoCodec_ = true;
|
||
sourceBufferCombined.audioCodec_ = false;
|
||
sourceBufferAudio = mediaSource.addSourceBuffer('video/m2pt');
|
||
sourceBufferAudio.videoCodec_ = false;
|
||
sourceBufferAudio.audioCodec_ = true;
|
||
|
||
mediaSource.updateActiveSourceBuffers_();
|
||
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers.length, 2, 'active source buffers includes both source buffers');
|
||
// maintains same order as source buffers were created
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers[0], sourceBufferCombined, 'active source buffers starts with combined source buffer');
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers[1], sourceBufferAudio, 'active source buffers ends with audio source buffer');
|
||
|
||
audioTracks[0].enabled = true;
|
||
audioTracks[1].enabled = false;
|
||
mediaSource.updateActiveSourceBuffers_();
|
||
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers.length, 2, 'active source buffers includes both source buffers');
|
||
// maintains same order as source buffers were created
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers[0], sourceBufferCombined, 'active source buffers starts with combined source buffer');
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers[1], sourceBufferAudio, 'active source buffers ends with audio source buffer');
|
||
});
|
||
|
||
_qunit2['default'].test('Single buffer always active. Audio disabled depends on audio codec', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var audioTracks = [{
|
||
enabled: true,
|
||
kind: 'main',
|
||
label: 'main'
|
||
}];
|
||
|
||
this.player.audioTracks = function () {
|
||
return audioTracks;
|
||
};
|
||
|
||
mediaSource.player_ = this.player;
|
||
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/m2pt');
|
||
|
||
// video only
|
||
sourceBuffer.videoCodec_ = true;
|
||
sourceBuffer.audioCodec_ = false;
|
||
|
||
mediaSource.updateActiveSourceBuffers_();
|
||
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers.length, 1, 'sourceBuffer is active');
|
||
_qunit2['default'].ok(mediaSource.activeSourceBuffers[0].audioDisabled_, 'audio is disabled on video only active sourceBuffer');
|
||
|
||
// audio only
|
||
sourceBuffer.videoCodec_ = false;
|
||
sourceBuffer.audioCodec_ = true;
|
||
|
||
mediaSource.updateActiveSourceBuffers_();
|
||
|
||
_qunit2['default'].equal(mediaSource.activeSourceBuffers.length, 1, 'sourceBuffer is active');
|
||
_qunit2['default'].notOk(mediaSource.activeSourceBuffers[0].audioDisabled_, 'audio not disabled on audio only active sourceBuffer');
|
||
});
|
||
|
||
_qunit2['default'].test('video segments with info trigger videooinfo event', function () {
|
||
var data = new Uint8Array(1);
|
||
var infoEvents = [];
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
var info = { width: 100 };
|
||
var newinfo = { width: 225 };
|
||
|
||
mediaSource.on('videoinfo', function (e) {
|
||
return infoEvents.push(e);
|
||
});
|
||
|
||
// send an audio segment with info, then send done
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('video', data, { info: info }));
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
|
||
_qunit2['default'].equal(infoEvents.length, 1, 'video info should trigger');
|
||
_qunit2['default'].deepEqual(infoEvents[0].info, info, 'video info = muxed info');
|
||
|
||
// send an audio segment with info, then send done
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('video', data, { info: newinfo }));
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
|
||
_qunit2['default'].equal(infoEvents.length, 2, 'video info should trigger');
|
||
_qunit2['default'].deepEqual(infoEvents[1].info, newinfo, 'video info = muxed info');
|
||
});
|
||
|
||
_qunit2['default'].test('audio segments with info trigger audioinfo event', function () {
|
||
var data = new Uint8Array(1);
|
||
var infoEvents = [];
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t');
|
||
var info = { width: 100 };
|
||
var newinfo = { width: 225 };
|
||
|
||
mediaSource.on('audioinfo', function (e) {
|
||
return infoEvents.push(e);
|
||
});
|
||
|
||
// send an audio segment with info, then send done
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('audio', data, { info: info }));
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
|
||
_qunit2['default'].equal(infoEvents.length, 1, 'audio info should trigger');
|
||
_qunit2['default'].deepEqual(infoEvents[0].info, info, 'audio info = muxed info');
|
||
|
||
// send an audio segment with info, then send done
|
||
sourceBuffer.transmuxer_.onmessage(createDataMessage('audio', data, { info: newinfo }));
|
||
sourceBuffer.transmuxer_.onmessage(doneMessage);
|
||
|
||
_qunit2['default'].equal(infoEvents.length, 2, 'audio info should trigger');
|
||
_qunit2['default'].deepEqual(infoEvents[1].info, newinfo, 'audio info = muxed info');
|
||
});
|
||
|
||
_qunit2['default'].test('creates native SourceBuffers immediately if a second ' + 'VirtualSourceBuffer is created', function () {
|
||
var mediaSource = new _videoJs2['default'].MediaSource();
|
||
var sourceBuffer = mediaSource.addSourceBuffer('video/mp2t; codecs="avc1.64001f,mp4a.40.5"');
|
||
var sourceBuffer2 = mediaSource.addSourceBuffer('video/mp2t; codecs="mp4a.40.5"');
|
||
|
||
_qunit2['default'].ok(mediaSource.videoBuffer_, 'created a video buffer');
|
||
_qunit2['default'].equal(mediaSource.videoBuffer_.type, 'video/mp4;codecs="avc1.64001f"', 'video buffer has the specified codec');
|
||
|
||
_qunit2['default'].ok(mediaSource.audioBuffer_, 'created an audio buffer');
|
||
_qunit2['default'].equal(mediaSource.audioBuffer_.type, 'audio/mp4;codecs="mp4a.40.5"', 'audio buffer has the specified codec');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers.length, 2, 'created two virtual buffers');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers[0], sourceBuffer, 'returned the virtual buffer');
|
||
_qunit2['default'].equal(mediaSource.sourceBuffers[1], sourceBuffer2, 'returned the virtual buffer');
|
||
_qunit2['default'].equal(sourceBuffer.audioDisabled_, true, 'first source buffer\'s audio is automatically disabled');
|
||
_qunit2['default'].ok(sourceBuffer2.audioBuffer_, 'second source buffer has an audio source buffer');
|
||
});
|
||
|
||
_qunit2['default'].module('VirtualSourceBuffer - Isolated Functions');
|
||
|
||
_qunit2['default'].test('gopsSafeToAlignWith returns correct list', function () {
|
||
// gopsSafeToAlignWith uses a 3 second safetyNet so that gops very close to the playhead
|
||
// are not considered safe to append to
|
||
var safetyNet = 3;
|
||
var pts = function pts(time) {
|
||
return Math.ceil(time * 90000);
|
||
};
|
||
var mapping = 0;
|
||
var _currentTime = 0;
|
||
var buffer = [];
|
||
var player = undefined;
|
||
var actual = undefined;
|
||
var expected = undefined;
|
||
|
||
expected = [];
|
||
actual = (0, _srcVirtualSourceBuffer.gopsSafeToAlignWith)(buffer, player, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'empty array when player is undefined');
|
||
|
||
player = { currentTime: function currentTime() {
|
||
return _currentTime;
|
||
} };
|
||
actual = (0, _srcVirtualSourceBuffer.gopsSafeToAlignWith)(buffer, player, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'empty array when buffer is empty');
|
||
|
||
buffer = expected = [{ pts: pts(_currentTime + safetyNet + 1) }, { pts: pts(_currentTime + safetyNet + 2) }, { pts: pts(_currentTime + safetyNet + 3) }];
|
||
actual = (0, _srcVirtualSourceBuffer.gopsSafeToAlignWith)(buffer, player, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'entire buffer considered safe when all gops come after currentTime + safetyNet');
|
||
|
||
buffer = [{ pts: pts(_currentTime + safetyNet) }, { pts: pts(_currentTime + safetyNet + 1) }, { pts: pts(_currentTime + safetyNet + 2) }];
|
||
expected = [{ pts: pts(_currentTime + safetyNet + 1) }, { pts: pts(_currentTime + safetyNet + 2) }];
|
||
actual = (0, _srcVirtualSourceBuffer.gopsSafeToAlignWith)(buffer, player, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'safetyNet comparison is not inclusive');
|
||
|
||
_currentTime = 10;
|
||
mapping = -5;
|
||
buffer = [{ pts: pts(_currentTime - mapping + safetyNet - 2) }, { pts: pts(_currentTime - mapping + safetyNet - 1) }, { pts: pts(_currentTime - mapping + safetyNet) }, { pts: pts(_currentTime - mapping + safetyNet + 1) }, { pts: pts(_currentTime - mapping + safetyNet + 2) }];
|
||
expected = [{ pts: pts(_currentTime - mapping + safetyNet + 1) }, { pts: pts(_currentTime - mapping + safetyNet + 2) }];
|
||
actual = (0, _srcVirtualSourceBuffer.gopsSafeToAlignWith)(buffer, player, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'uses mapping to shift currentTime');
|
||
|
||
_currentTime = 20;
|
||
expected = [];
|
||
actual = (0, _srcVirtualSourceBuffer.gopsSafeToAlignWith)(buffer, player, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'empty array when no gops in buffer come after currentTime');
|
||
});
|
||
|
||
_qunit2['default'].test('updateGopBuffer correctly processes new gop information', function () {
|
||
var buffer = [];
|
||
var gops = [];
|
||
var replace = true;
|
||
var actual = undefined;
|
||
var expected = undefined;
|
||
|
||
buffer = expected = [{ pts: 100 }, { pts: 200 }];
|
||
actual = (0, _srcVirtualSourceBuffer.updateGopBuffer)(buffer, gops, replace);
|
||
_qunit2['default'].deepEqual(actual, expected, 'returns buffer when no new gops');
|
||
|
||
gops = expected = [{ pts: 300 }, { pts: 400 }];
|
||
actual = (0, _srcVirtualSourceBuffer.updateGopBuffer)(buffer, gops, replace);
|
||
_qunit2['default'].deepEqual(actual, expected, 'returns only new gops when replace is true');
|
||
|
||
replace = false;
|
||
buffer = [];
|
||
gops = [{ pts: 100 }];
|
||
expected = [{ pts: 100 }];
|
||
actual = (0, _srcVirtualSourceBuffer.updateGopBuffer)(buffer, gops, replace);
|
||
_qunit2['default'].deepEqual(actual, expected, 'appends new gops to empty buffer');
|
||
|
||
buffer = [{ pts: 100 }, { pts: 200 }];
|
||
gops = [{ pts: 300 }, { pts: 400 }];
|
||
expected = [{ pts: 100 }, { pts: 200 }, { pts: 300 }, { pts: 400 }];
|
||
actual = (0, _srcVirtualSourceBuffer.updateGopBuffer)(buffer, gops, replace);
|
||
_qunit2['default'].deepEqual(actual, expected, 'appends new gops at end of buffer when no overlap');
|
||
|
||
buffer = [{ pts: 100 }, { pts: 200 }, { pts: 300 }, { pts: 400 }];
|
||
gops = [{ pts: 250 }, { pts: 300 }, { pts: 350 }];
|
||
expected = [{ pts: 100 }, { pts: 200 }, { pts: 250 }, { pts: 300 }, { pts: 350 }];
|
||
actual = (0, _srcVirtualSourceBuffer.updateGopBuffer)(buffer, gops, replace);
|
||
_qunit2['default'].deepEqual(actual, expected, 'slices buffer at point of overlap and appends new gops');
|
||
|
||
buffer = [{ pts: 100 }, { pts: 200 }, { pts: 300 }, { pts: 400 }];
|
||
gops = [{ pts: 200 }, { pts: 300 }, { pts: 350 }];
|
||
expected = [{ pts: 100 }, { pts: 200 }, { pts: 300 }, { pts: 350 }];
|
||
actual = (0, _srcVirtualSourceBuffer.updateGopBuffer)(buffer, gops, replace);
|
||
_qunit2['default'].deepEqual(actual, expected, 'overlap slice is inclusive');
|
||
|
||
buffer = [{ pts: 300 }, { pts: 400 }, { pts: 500 }, { pts: 600 }];
|
||
gops = [{ pts: 100 }, { pts: 200 }, { pts: 250 }];
|
||
expected = [{ pts: 100 }, { pts: 200 }, { pts: 250 }];
|
||
actual = (0, _srcVirtualSourceBuffer.updateGopBuffer)(buffer, gops, replace);
|
||
_qunit2['default'].deepEqual(actual, expected, 'completely replaces buffer with new gops when all gops come before buffer');
|
||
});
|
||
|
||
_qunit2['default'].test('removeGopBuffer correctly removes range from buffer', function () {
|
||
var pts = function pts(time) {
|
||
return Math.ceil(time * 90000);
|
||
};
|
||
var buffer = [];
|
||
var start = 0;
|
||
var end = 0;
|
||
var mapping = -5;
|
||
var actual = undefined;
|
||
var expected = undefined;
|
||
|
||
expected = [];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'returns empty array when buffer empty');
|
||
|
||
start = 0;
|
||
end = 8;
|
||
buffer = expected = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'no removal when remove range comes before start of buffer');
|
||
|
||
start = 22;
|
||
end = 30;
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
expected = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'removes last gop when remove range is after end of buffer');
|
||
|
||
start = 0;
|
||
end = 10;
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
expected = [{ pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'clamps start range to begining of buffer');
|
||
|
||
start = 0;
|
||
end = 12;
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
expected = [{ pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'clamps start range to begining of buffer');
|
||
|
||
start = 0;
|
||
end = 14;
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
expected = [{ pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'clamps start range to begining of buffer');
|
||
|
||
start = 15;
|
||
end = 30;
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
expected = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'clamps end range to end of buffer');
|
||
|
||
start = 17;
|
||
end = 30;
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
expected = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'clamps end range to end of buffer');
|
||
|
||
start = 20;
|
||
end = 30;
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
expected = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'clamps end range to end of buffer');
|
||
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
start = 12;
|
||
end = 15;
|
||
expected = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'removes gops that remove range intersects with');
|
||
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
start = 12;
|
||
end = 14;
|
||
expected = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'removes gops that remove range intersects with');
|
||
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
start = 13;
|
||
end = 14;
|
||
expected = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'removes gops that remove range intersects with');
|
||
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
start = 13;
|
||
end = 15;
|
||
expected = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'removes gops that remove range intersects with');
|
||
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
start = 12;
|
||
end = 17;
|
||
expected = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'removes gops that remove range intersects with');
|
||
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
start = 13;
|
||
end = 16;
|
||
expected = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'removes gops that remove range intersects with');
|
||
|
||
start = 10;
|
||
end = 20;
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
expected = [];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'removes entire buffer when buffer inside remove range');
|
||
|
||
start = 0;
|
||
end = 30;
|
||
buffer = [{ pts: pts(10 - mapping) }, { pts: pts(11 - mapping) }, { pts: pts(12 - mapping) }, { pts: pts(15 - mapping) }, { pts: pts(18 - mapping) }, { pts: pts(20 - mapping) }];
|
||
expected = [];
|
||
actual = (0, _srcVirtualSourceBuffer.removeGopBuffer)(buffer, start, end, mapping);
|
||
_qunit2['default'].deepEqual(actual, expected, 'removes entire buffer when buffer inside remove range');
|
||
});
|
||
|
||
}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
|
||
},{"../src/html-media-source":42,"../src/videojs-contrib-media-sources.js":45,"../src/virtual-source-buffer":46,"global/document":2,"global/window":3}]},{},[47,48,49,50,51]);
|