{\n return Promise.reject(\n new Error(\n `[${this}] This demuxer does not support Sample-AES decryption`,\n ),\n );\n }\n\n flush(timeOffset: number): DemuxerResult {\n // Parse cache in case of remaining frames.\n const cachedData = this.cachedData;\n if (cachedData) {\n this.cachedData = null;\n this.demux(cachedData, 0);\n }\n\n return {\n audioTrack: this._audioTrack,\n videoTrack: dummyTrack() as DemuxedVideoTrackBase,\n id3Track: this._id3Track,\n textTrack: dummyTrack() as DemuxedUserdataTrack,\n };\n }\n\n destroy() {}\n}\n\n/**\n * Initialize PTS\n * \n * use timestamp unless it is undefined, NaN or Infinity\n *
\n */\nexport const initPTSFn = (\n timestamp: number | undefined,\n timeOffset: number,\n initPTS: RationalTimestamp | null,\n): number => {\n if (Number.isFinite(timestamp as number)) {\n return timestamp! * 90;\n }\n const init90kHz = initPTS\n ? (initPTS.baseTime * 90000) / initPTS.timescale\n : 0;\n return timeOffset * 90000 + init90kHz;\n};\nexport default BaseAudioDemuxer;\n", "/**\n * ADTS parser helper\n * @link https://wiki.multimedia.cx/index.php?title=ADTS\n */\nimport { logger } from '../../utils/logger';\nimport { ErrorTypes, ErrorDetails } from '../../errors';\nimport type { HlsEventEmitter } from '../../events';\nimport { Events } from '../../events';\nimport type {\n DemuxedAudioTrack,\n AudioFrame,\n AudioSample,\n} from '../../types/demuxer';\n\ntype AudioConfig = {\n config: number[];\n samplerate: number;\n channelCount: number;\n codec: string;\n manifestCodec: string;\n};\n\ntype FrameHeader = {\n headerLength: number;\n frameLength: number;\n};\n\nexport function getAudioConfig(\n observer: HlsEventEmitter,\n data: Uint8Array,\n offset: number,\n audioCodec: string,\n): AudioConfig | void {\n let adtsObjectType: number;\n let adtsExtensionSamplingIndex: number;\n let adtsChannelConfig: number;\n let config: number[];\n const userAgent = navigator.userAgent.toLowerCase();\n const manifestCodec = audioCodec;\n const adtsSamplingRates = [\n 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025,\n 8000, 7350,\n ];\n // byte 2\n adtsObjectType = ((data[offset + 2] & 0xc0) >>> 6) + 1;\n const adtsSamplingIndex = (data[offset + 2] & 0x3c) >>> 2;\n if (adtsSamplingIndex > adtsSamplingRates.length - 1) {\n const error = new Error(`invalid ADTS sampling index:${adtsSamplingIndex}`);\n observer.emit(Events.ERROR, Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.FRAG_PARSING_ERROR,\n fatal: true,\n error,\n reason: error.message,\n });\n return;\n }\n adtsChannelConfig = (data[offset + 2] & 0x01) << 2;\n // byte 3\n adtsChannelConfig |= (data[offset + 3] & 0xc0) >>> 6;\n logger.log(\n `manifest codec:${audioCodec}, ADTS type:${adtsObjectType}, samplingIndex:${adtsSamplingIndex}`,\n );\n // firefox: freq less than 24kHz = AAC SBR (HE-AAC)\n if (/firefox/i.test(userAgent)) {\n if (adtsSamplingIndex >= 6) {\n adtsObjectType = 5;\n config = new Array(4);\n // HE-AAC uses SBR (Spectral Band Replication) , high frequencies are constructed from low frequencies\n // there is a factor 2 between frame sample rate and output sample rate\n // multiply frequency by 2 (see table below, equivalent to substract 3)\n adtsExtensionSamplingIndex = adtsSamplingIndex - 3;\n } else {\n adtsObjectType = 2;\n config = new Array(2);\n adtsExtensionSamplingIndex = adtsSamplingIndex;\n }\n // Android : always use AAC\n } else if (userAgent.indexOf('android') !== -1) {\n adtsObjectType = 2;\n config = new Array(2);\n adtsExtensionSamplingIndex = adtsSamplingIndex;\n } else {\n /* for other browsers (Chrome/Vivaldi/Opera ...)\n always force audio type to be HE-AAC SBR, as some browsers do not support audio codec switch properly (like Chrome ...)\n */\n adtsObjectType = 5;\n config = new Array(4);\n // if (manifest codec is HE-AAC or HE-AACv2) OR (manifest codec not specified AND frequency less than 24kHz)\n if (\n (audioCodec &&\n (audioCodec.indexOf('mp4a.40.29') !== -1 ||\n audioCodec.indexOf('mp4a.40.5') !== -1)) ||\n (!audioCodec && adtsSamplingIndex >= 6)\n ) {\n // HE-AAC uses SBR (Spectral Band Replication) , high frequencies are constructed from low frequencies\n // there is a factor 2 between frame sample rate and output sample rate\n // multiply frequency by 2 (see table below, equivalent to substract 3)\n adtsExtensionSamplingIndex = adtsSamplingIndex - 3;\n } else {\n // if (manifest codec is AAC) AND (frequency less than 24kHz AND nb channel is 1) OR (manifest codec not specified and mono audio)\n // Chrome fails to play back with low frequency AAC LC mono when initialized with HE-AAC. This is not a problem with stereo.\n if (\n (audioCodec &&\n audioCodec.indexOf('mp4a.40.2') !== -1 &&\n ((adtsSamplingIndex >= 6 && adtsChannelConfig === 1) ||\n /vivaldi/i.test(userAgent))) ||\n (!audioCodec && adtsChannelConfig === 1)\n ) {\n adtsObjectType = 2;\n config = new Array(2);\n }\n adtsExtensionSamplingIndex = adtsSamplingIndex;\n }\n }\n /* refer to http://wiki.multimedia.cx/index.php?title=MPEG-4_Audio#Audio_Specific_Config\n ISO 14496-3 (AAC).pdf - Table 1.13 — Syntax of AudioSpecificConfig()\n Audio Profile / Audio Object Type\n 0: Null\n 1: AAC Main\n 2: AAC LC (Low Complexity)\n 3: AAC SSR (Scalable Sample Rate)\n 4: AAC LTP (Long Term Prediction)\n 5: SBR (Spectral Band Replication)\n 6: AAC Scalable\n sampling freq\n 0: 96000 Hz\n 1: 88200 Hz\n 2: 64000 Hz\n 3: 48000 Hz\n 4: 44100 Hz\n 5: 32000 Hz\n 6: 24000 Hz\n 7: 22050 Hz\n 8: 16000 Hz\n 9: 12000 Hz\n 10: 11025 Hz\n 11: 8000 Hz\n 12: 7350 Hz\n 13: Reserved\n 14: Reserved\n 15: frequency is written explictly\n Channel Configurations\n These are the channel configurations:\n 0: Defined in AOT Specifc Config\n 1: 1 channel: front-center\n 2: 2 channels: front-left, front-right\n */\n // audioObjectType = profile => profile, the MPEG-4 Audio Object Type minus 1\n config[0] = adtsObjectType << 3;\n // samplingFrequencyIndex\n config[0] |= (adtsSamplingIndex & 0x0e) >> 1;\n config[1] |= (adtsSamplingIndex & 0x01) << 7;\n // channelConfiguration\n config[1] |= adtsChannelConfig << 3;\n if (adtsObjectType === 5) {\n // adtsExtensionSamplingIndex\n config[1] |= (adtsExtensionSamplingIndex & 0x0e) >> 1;\n config[2] = (adtsExtensionSamplingIndex & 0x01) << 7;\n // adtsObjectType (force to 2, chrome is checking that object type is less than 5 ???\n // https://chromium.googlesource.com/chromium/src.git/+/master/media/formats/mp4/aac.cc\n config[2] |= 2 << 2;\n config[3] = 0;\n }\n return {\n config,\n samplerate: adtsSamplingRates[adtsSamplingIndex],\n channelCount: adtsChannelConfig,\n codec: 'mp4a.40.' + adtsObjectType,\n manifestCodec,\n };\n}\n\nexport function isHeaderPattern(data: Uint8Array, offset: number): boolean {\n return data[offset] === 0xff && (data[offset + 1] & 0xf6) === 0xf0;\n}\n\nexport function getHeaderLength(data: Uint8Array, offset: number): number {\n return data[offset + 1] & 0x01 ? 7 : 9;\n}\n\nexport function getFullFrameLength(data: Uint8Array, offset: number): number {\n return (\n ((data[offset + 3] & 0x03) << 11) |\n (data[offset + 4] << 3) |\n ((data[offset + 5] & 0xe0) >>> 5)\n );\n}\n\nexport function canGetFrameLength(data: Uint8Array, offset: number): boolean {\n return offset + 5 < data.length;\n}\n\nexport function isHeader(data: Uint8Array, offset: number): boolean {\n // Look for ADTS header | 1111 1111 | 1111 X00X | where X can be either 0 or 1\n // Layer bits (position 14 and 15) in header should be always 0 for ADTS\n // More info https://wiki.multimedia.cx/index.php?title=ADTS\n return offset + 1 < data.length && isHeaderPattern(data, offset);\n}\n\nexport function canParse(data: Uint8Array, offset: number): boolean {\n return (\n canGetFrameLength(data, offset) &&\n isHeaderPattern(data, offset) &&\n getFullFrameLength(data, offset) <= data.length - offset\n );\n}\n\nexport function probe(data: Uint8Array, offset: number): boolean {\n // same as isHeader but we also check that ADTS frame follows last ADTS frame\n // or end of data is reached\n if (isHeader(data, offset)) {\n // ADTS header Length\n const headerLength = getHeaderLength(data, offset);\n if (offset + headerLength >= data.length) {\n return false;\n }\n // ADTS frame Length\n const frameLength = getFullFrameLength(data, offset);\n if (frameLength <= headerLength) {\n return false;\n }\n\n const newOffset = offset + frameLength;\n return newOffset === data.length || isHeader(data, newOffset);\n }\n return false;\n}\n\nexport function initTrackConfig(\n track: DemuxedAudioTrack,\n observer: HlsEventEmitter,\n data: Uint8Array,\n offset: number,\n audioCodec: string,\n) {\n if (!track.samplerate) {\n const config = getAudioConfig(observer, data, offset, audioCodec);\n if (!config) {\n return;\n }\n track.config = config.config;\n track.samplerate = config.samplerate;\n track.channelCount = config.channelCount;\n track.codec = config.codec;\n track.manifestCodec = config.manifestCodec;\n logger.log(\n `parsed codec:${track.codec}, rate:${config.samplerate}, channels:${config.channelCount}`,\n );\n }\n}\n\nexport function getFrameDuration(samplerate: number): number {\n return (1024 * 90000) / samplerate;\n}\n\nexport function parseFrameHeader(\n data: Uint8Array,\n offset: number,\n): FrameHeader | void {\n // The protection skip bit tells us if we have 2 bytes of CRC data at the end of the ADTS header\n const headerLength = getHeaderLength(data, offset);\n if (offset + headerLength <= data.length) {\n // retrieve frame size\n const frameLength = getFullFrameLength(data, offset) - headerLength;\n if (frameLength > 0) {\n // logger.log(`AAC frame, offset/length/total/pts:${offset+headerLength}/${frameLength}/${data.byteLength}`);\n return { headerLength, frameLength };\n }\n }\n}\n\nexport function appendFrame(\n track: DemuxedAudioTrack,\n data: Uint8Array,\n offset: number,\n pts: number,\n frameIndex: number,\n): AudioFrame {\n const frameDuration = getFrameDuration(track.samplerate as number);\n const stamp = pts + frameIndex * frameDuration;\n const header = parseFrameHeader(data, offset);\n let unit: Uint8Array;\n if (header) {\n const { frameLength, headerLength } = header;\n const length = headerLength + frameLength;\n const missing = Math.max(0, offset + length - data.length);\n // logger.log(`AAC frame ${frameIndex}, pts:${stamp} length@offset/total: ${frameLength}@${offset+headerLength}/${data.byteLength} missing: ${missing}`);\n if (missing) {\n unit = new Uint8Array(length - headerLength);\n unit.set(data.subarray(offset + headerLength, data.length), 0);\n } else {\n unit = data.subarray(offset + headerLength, offset + length);\n }\n\n const sample: AudioSample = {\n unit,\n pts: stamp,\n };\n if (!missing) {\n track.samples.push(sample as AudioSample);\n }\n\n return { sample, length, missing };\n }\n // overflow incomplete header\n const length = data.length - offset;\n unit = new Uint8Array(length);\n unit.set(data.subarray(offset, data.length), 0);\n const sample: AudioSample = {\n unit,\n pts: stamp,\n };\n return { sample, length, missing: -1 };\n}\n", "/**\n * MPEG parser helper\n */\nimport { DemuxedAudioTrack } from '../../types/demuxer';\n\nlet chromeVersion: number | null = null;\n\nconst BitratesMap = [\n 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 32, 48, 56,\n 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 32, 40, 48, 56, 64, 80,\n 96, 112, 128, 160, 192, 224, 256, 320, 32, 48, 56, 64, 80, 96, 112, 128, 144,\n 160, 176, 192, 224, 256, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144,\n 160,\n];\n\nconst SamplingRateMap = [\n 44100, 48000, 32000, 22050, 24000, 16000, 11025, 12000, 8000,\n];\n\nconst SamplesCoefficients = [\n // MPEG 2.5\n [\n 0, // Reserved\n 72, // Layer3\n 144, // Layer2\n 12, // Layer1\n ],\n // Reserved\n [\n 0, // Reserved\n 0, // Layer3\n 0, // Layer2\n 0, // Layer1\n ],\n // MPEG 2\n [\n 0, // Reserved\n 72, // Layer3\n 144, // Layer2\n 12, // Layer1\n ],\n // MPEG 1\n [\n 0, // Reserved\n 144, // Layer3\n 144, // Layer2\n 12, // Layer1\n ],\n];\n\nconst BytesInSlot = [\n 0, // Reserved\n 1, // Layer3\n 1, // Layer2\n 4, // Layer1\n];\n\nexport function appendFrame(\n track: DemuxedAudioTrack,\n data: Uint8Array,\n offset: number,\n pts: number,\n frameIndex: number,\n) {\n // Using http://www.datavoyage.com/mpgscript/mpeghdr.htm as a reference\n if (offset + 24 > data.length) {\n return;\n }\n\n const header = parseHeader(data, offset);\n if (header && offset + header.frameLength <= data.length) {\n const frameDuration = (header.samplesPerFrame * 90000) / header.sampleRate;\n const stamp = pts + frameIndex * frameDuration;\n const sample = {\n unit: data.subarray(offset, offset + header.frameLength),\n pts: stamp,\n dts: stamp,\n };\n\n track.config = [];\n track.channelCount = header.channelCount;\n track.samplerate = header.sampleRate;\n track.samples.push(sample);\n\n return { sample, length: header.frameLength, missing: 0 };\n }\n}\n\nexport function parseHeader(data: Uint8Array, offset: number) {\n const mpegVersion = (data[offset + 1] >> 3) & 3;\n const mpegLayer = (data[offset + 1] >> 1) & 3;\n const bitRateIndex = (data[offset + 2] >> 4) & 15;\n const sampleRateIndex = (data[offset + 2] >> 2) & 3;\n if (\n mpegVersion !== 1 &&\n bitRateIndex !== 0 &&\n bitRateIndex !== 15 &&\n sampleRateIndex !== 3\n ) {\n const paddingBit = (data[offset + 2] >> 1) & 1;\n const channelMode = data[offset + 3] >> 6;\n const columnInBitrates =\n mpegVersion === 3 ? 3 - mpegLayer : mpegLayer === 3 ? 3 : 4;\n const bitRate =\n BitratesMap[columnInBitrates * 14 + bitRateIndex - 1] * 1000;\n const columnInSampleRates =\n mpegVersion === 3 ? 0 : mpegVersion === 2 ? 1 : 2;\n const sampleRate =\n SamplingRateMap[columnInSampleRates * 3 + sampleRateIndex];\n const channelCount = channelMode === 3 ? 1 : 2; // If bits of channel mode are `11` then it is a single channel (Mono)\n const sampleCoefficient = SamplesCoefficients[mpegVersion][mpegLayer];\n const bytesInSlot = BytesInSlot[mpegLayer];\n const samplesPerFrame = sampleCoefficient * 8 * bytesInSlot;\n const frameLength =\n Math.floor((sampleCoefficient * bitRate) / sampleRate + paddingBit) *\n bytesInSlot;\n\n if (chromeVersion === null) {\n const userAgent = navigator.userAgent || '';\n const result = userAgent.match(/Chrome\\/(\\d+)/i);\n chromeVersion = result ? parseInt(result[1]) : 0;\n }\n const needChromeFix = !!chromeVersion && chromeVersion <= 87;\n\n if (\n needChromeFix &&\n mpegLayer === 2 &&\n bitRate >= 224000 &&\n channelMode === 0\n ) {\n // Work around bug in Chromium by setting channelMode to dual-channel (01) instead of stereo (00)\n data[offset + 3] = data[offset + 3] | 0x80;\n }\n\n return { sampleRate, channelCount, frameLength, samplesPerFrame };\n }\n}\n\nexport function isHeaderPattern(data: Uint8Array, offset: number): boolean {\n return (\n data[offset] === 0xff &&\n (data[offset + 1] & 0xe0) === 0xe0 &&\n (data[offset + 1] & 0x06) !== 0x00\n );\n}\n\nexport function isHeader(data: Uint8Array, offset: number): boolean {\n // Look for MPEG header | 1111 1111 | 111X XYZX | where X can be either 0 or 1 and Y or Z should be 1\n // Layer bits (position 14 and 15) in header should be always different from 0 (Layer I or Layer II or Layer III)\n // More info http://www.mp3-tech.org/programmer/frame_header.html\n return offset + 1 < data.length && isHeaderPattern(data, offset);\n}\n\nexport function canParse(data: Uint8Array, offset: number): boolean {\n const headerSize = 4;\n\n return isHeaderPattern(data, offset) && headerSize <= data.length - offset;\n}\n\nexport function probe(data: Uint8Array, offset: number): boolean {\n // same as isHeader but we also check that MPEG frame follows last MPEG frame\n // or end of data is reached\n if (offset + 1 < data.length && isHeaderPattern(data, offset)) {\n // MPEG header Length\n const headerLength = 4;\n // MPEG frame Length\n const header = parseHeader(data, offset);\n let frameLength = headerLength;\n if (header?.frameLength) {\n frameLength = header.frameLength;\n }\n\n const newOffset = offset + frameLength;\n return newOffset === data.length || isHeader(data, newOffset);\n }\n return false;\n}\n", "/**\n * AAC demuxer\n */\nimport BaseAudioDemuxer from './base-audio-demuxer';\nimport * as ADTS from './adts';\nimport * as MpegAudio from './mpegaudio';\nimport { logger } from '../../utils/logger';\nimport * as ID3 from '../id3';\nimport type { HlsEventEmitter } from '../../events';\nimport type { HlsConfig } from '../../config';\n\nclass AACDemuxer extends BaseAudioDemuxer {\n private readonly observer: HlsEventEmitter;\n private readonly config: HlsConfig;\n\n constructor(observer, config) {\n super();\n this.observer = observer;\n this.config = config;\n }\n\n resetInitSegment(\n initSegment: Uint8Array | undefined,\n audioCodec: string | undefined,\n videoCodec: string | undefined,\n trackDuration: number,\n ) {\n super.resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration);\n this._audioTrack = {\n container: 'audio/adts',\n type: 'audio',\n id: 2,\n pid: -1,\n sequenceNumber: 0,\n segmentCodec: 'aac',\n samples: [],\n manifestCodec: audioCodec,\n duration: trackDuration,\n inputTimeScale: 90000,\n dropped: 0,\n };\n }\n\n // Source for probe info - https://wiki.multimedia.cx/index.php?title=ADTS\n static probe(data: Uint8Array | undefined): boolean {\n if (!data) {\n return false;\n }\n\n // Check for the ADTS sync word\n // Look for ADTS header | 1111 1111 | 1111 X00X | where X can be either 0 or 1\n // Layer bits (position 14 and 15) in header should be always 0 for ADTS\n // More info https://wiki.multimedia.cx/index.php?title=ADTS\n const id3Data = ID3.getID3Data(data, 0);\n let offset = id3Data?.length || 0;\n\n if (MpegAudio.probe(data, offset)) {\n return false;\n }\n\n for (let length = data.length; offset < length; offset++) {\n if (ADTS.probe(data, offset)) {\n logger.log('ADTS sync word found !');\n return true;\n }\n }\n return false;\n }\n\n canParse(data, offset) {\n return ADTS.canParse(data, offset);\n }\n\n appendFrame(track, data, offset) {\n ADTS.initTrackConfig(\n track,\n this.observer,\n data,\n offset,\n track.manifestCodec,\n );\n const frame = ADTS.appendFrame(\n track,\n data,\n offset,\n this.basePTS as number,\n this.frameIndex,\n );\n if (frame && frame.missing === 0) {\n return frame;\n }\n }\n}\n\nexport default AACDemuxer;\n", "/**\n * MP4 demuxer\n */\nimport {\n Demuxer,\n DemuxerResult,\n PassthroughTrack,\n DemuxedAudioTrack,\n DemuxedUserdataTrack,\n DemuxedMetadataTrack,\n KeyData,\n MetadataSchema,\n} from '../types/demuxer';\nimport {\n findBox,\n segmentValidRange,\n appendUint8Array,\n parseEmsg,\n parseSamples,\n parseInitSegment,\n RemuxerTrackIdConfig,\n hasMoofData,\n} from '../utils/mp4-tools';\nimport { dummyTrack } from './dummy-demuxed-track';\nimport type { HlsEventEmitter } from '../events';\nimport type { HlsConfig } from '../config';\n\nconst emsgSchemePattern = /\\/emsg[-/]ID3/i;\n\nclass MP4Demuxer implements Demuxer {\n private remainderData: Uint8Array | null = null;\n private timeOffset: number = 0;\n private config: HlsConfig;\n private videoTrack?: PassthroughTrack;\n private audioTrack?: DemuxedAudioTrack;\n private id3Track?: DemuxedMetadataTrack;\n private txtTrack?: DemuxedUserdataTrack;\n\n constructor(observer: HlsEventEmitter, config: HlsConfig) {\n this.config = config;\n }\n\n public resetTimeStamp() {}\n\n public resetInitSegment(\n initSegment: Uint8Array | undefined,\n audioCodec: string | undefined,\n videoCodec: string | undefined,\n trackDuration: number,\n ) {\n const videoTrack = (this.videoTrack = dummyTrack(\n 'video',\n 1,\n ) as PassthroughTrack);\n const audioTrack = (this.audioTrack = dummyTrack(\n 'audio',\n 1,\n ) as DemuxedAudioTrack);\n const captionTrack = (this.txtTrack = dummyTrack(\n 'text',\n 1,\n ) as DemuxedUserdataTrack);\n\n this.id3Track = dummyTrack('id3', 1) as DemuxedMetadataTrack;\n this.timeOffset = 0;\n\n if (!initSegment?.byteLength) {\n return;\n }\n const initData = parseInitSegment(initSegment);\n\n if (initData.video) {\n const { id, timescale, codec } = initData.video;\n videoTrack.id = id;\n videoTrack.timescale = captionTrack.timescale = timescale;\n videoTrack.codec = codec;\n }\n\n if (initData.audio) {\n const { id, timescale, codec } = initData.audio;\n audioTrack.id = id;\n audioTrack.timescale = timescale;\n audioTrack.codec = codec;\n }\n\n captionTrack.id = RemuxerTrackIdConfig.text;\n videoTrack.sampleDuration = 0;\n videoTrack.duration = audioTrack.duration = trackDuration;\n }\n\n public resetContiguity(): void {\n this.remainderData = null;\n }\n\n static probe(data: Uint8Array) {\n return hasMoofData(data);\n }\n\n public demux(data: Uint8Array, timeOffset: number): DemuxerResult {\n this.timeOffset = timeOffset;\n // Load all data into the avc track. The CMAF remuxer will look for the data in the samples object; the rest of the fields do not matter\n let videoSamples = data;\n const videoTrack = this.videoTrack as PassthroughTrack;\n const textTrack = this.txtTrack as DemuxedUserdataTrack;\n if (this.config.progressive) {\n // Split the bytestream into two ranges: one encompassing all data up until the start of the last moof, and everything else.\n // This is done to guarantee that we're sending valid data to MSE - when demuxing progressively, we have no guarantee\n // that the fetch loader gives us flush moof+mdat pairs. If we push jagged data to MSE, it will throw an exception.\n if (this.remainderData) {\n videoSamples = appendUint8Array(this.remainderData, data);\n }\n const segmentedData = segmentValidRange(videoSamples);\n this.remainderData = segmentedData.remainder;\n videoTrack.samples = segmentedData.valid || new Uint8Array();\n } else {\n videoTrack.samples = videoSamples;\n }\n\n const id3Track = this.extractID3Track(videoTrack, timeOffset);\n textTrack.samples = parseSamples(timeOffset, videoTrack);\n\n return {\n videoTrack,\n audioTrack: this.audioTrack as DemuxedAudioTrack,\n id3Track,\n textTrack: this.txtTrack as DemuxedUserdataTrack,\n };\n }\n\n public flush() {\n const timeOffset = this.timeOffset;\n const videoTrack = this.videoTrack as PassthroughTrack;\n const textTrack = this.txtTrack as DemuxedUserdataTrack;\n videoTrack.samples = this.remainderData || new Uint8Array();\n this.remainderData = null;\n\n const id3Track = this.extractID3Track(videoTrack, this.timeOffset);\n textTrack.samples = parseSamples(timeOffset, videoTrack);\n\n return {\n videoTrack,\n audioTrack: dummyTrack() as DemuxedAudioTrack,\n id3Track,\n textTrack: dummyTrack() as DemuxedUserdataTrack,\n };\n }\n\n private extractID3Track(\n videoTrack: PassthroughTrack,\n timeOffset: number,\n ): DemuxedMetadataTrack {\n const id3Track = this.id3Track as DemuxedMetadataTrack;\n if (videoTrack.samples.length) {\n const emsgs = findBox(videoTrack.samples, ['emsg']);\n if (emsgs) {\n emsgs.forEach((data: Uint8Array) => {\n const emsgInfo = parseEmsg(data);\n if (emsgSchemePattern.test(emsgInfo.schemeIdUri)) {\n const pts = Number.isFinite(emsgInfo.presentationTime)\n ? emsgInfo.presentationTime! / emsgInfo.timeScale\n : timeOffset +\n emsgInfo.presentationTimeDelta! / emsgInfo.timeScale;\n let duration =\n emsgInfo.eventDuration === 0xffffffff\n ? Number.POSITIVE_INFINITY\n : emsgInfo.eventDuration / emsgInfo.timeScale;\n // Safari takes anything <= 0.001 seconds and maps it to Infinity\n if (duration <= 0.001) {\n duration = Number.POSITIVE_INFINITY;\n }\n const payload = emsgInfo.payload;\n id3Track.samples.push({\n data: payload,\n len: payload.byteLength,\n dts: pts,\n pts: pts,\n type: MetadataSchema.emsg,\n duration: duration,\n });\n }\n });\n }\n }\n return id3Track;\n }\n\n demuxSampleAes(\n data: Uint8Array,\n keyData: KeyData,\n timeOffset: number,\n ): Promise {\n return Promise.reject(\n new Error('The MP4 demuxer does not support SAMPLE-AES decryption'),\n );\n }\n\n destroy() {}\n}\n\nexport default MP4Demuxer;\n", "export const getAudioBSID = (data: Uint8Array, offset: number): number => {\n // check the bsid to confirm ac-3 | ec-3\n let bsid = 0;\n let numBits = 5;\n offset += numBits;\n const temp = new Uint32Array(1); // unsigned 32 bit for temporary storage\n const mask = new Uint32Array(1); // unsigned 32 bit mask value\n const byte = new Uint8Array(1); // unsigned 8 bit for temporary storage\n while (numBits > 0) {\n byte[0] = data[offset];\n // read remaining bits, upto 8 bits at a time\n const bits = Math.min(numBits, 8);\n const shift = 8 - bits;\n mask[0] = (0xff000000 >>> (24 + shift)) << shift;\n temp[0] = (byte[0] & mask[0]) >> shift;\n bsid = !bsid ? temp[0] : (bsid << bits) | temp[0];\n offset += 1;\n numBits -= bits;\n }\n return bsid;\n};\n", "import BaseAudioDemuxer from './base-audio-demuxer';\nimport { getID3Data, getTimeStamp } from '../id3';\nimport { getAudioBSID } from './dolby';\nimport type { HlsEventEmitter } from '../../events';\nimport type { AudioFrame, DemuxedAudioTrack } from '../../types/demuxer';\n\nexport class AC3Demuxer extends BaseAudioDemuxer {\n private readonly observer: HlsEventEmitter;\n\n constructor(observer) {\n super();\n this.observer = observer;\n }\n\n resetInitSegment(\n initSegment: Uint8Array | undefined,\n audioCodec: string | undefined,\n videoCodec: string | undefined,\n trackDuration: number,\n ) {\n super.resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration);\n this._audioTrack = {\n container: 'audio/ac-3',\n type: 'audio',\n id: 2,\n pid: -1,\n sequenceNumber: 0,\n segmentCodec: 'ac3',\n samples: [],\n manifestCodec: audioCodec,\n duration: trackDuration,\n inputTimeScale: 90000,\n dropped: 0,\n };\n }\n\n canParse(data: Uint8Array, offset: number): boolean {\n return offset + 64 < data.length;\n }\n\n appendFrame(\n track: DemuxedAudioTrack,\n data: Uint8Array,\n offset: number,\n ): AudioFrame | void {\n const frameLength = appendFrame(\n track,\n data,\n offset,\n this.basePTS as number,\n this.frameIndex,\n );\n if (frameLength !== -1) {\n const sample = track.samples[track.samples.length - 1];\n return { sample, length: frameLength, missing: 0 };\n }\n }\n\n static probe(data: Uint8Array | undefined): boolean {\n if (!data) {\n return false;\n }\n\n const id3Data = getID3Data(data, 0);\n if (!id3Data) {\n return false;\n }\n\n // look for the ac-3 sync bytes\n const offset = id3Data.length;\n if (\n data[offset] === 0x0b &&\n data[offset + 1] === 0x77 &&\n getTimeStamp(id3Data) !== undefined &&\n // check the bsid to confirm ac-3\n getAudioBSID(data, offset) < 16\n ) {\n return true;\n }\n return false;\n }\n}\n\nexport function appendFrame(\n track: DemuxedAudioTrack,\n data: Uint8Array,\n start: number,\n pts: number,\n frameIndex: number,\n): number {\n if (start + 8 > data.length) {\n return -1; // not enough bytes left\n }\n\n if (data[start] !== 0x0b || data[start + 1] !== 0x77) {\n return -1; // invalid magic\n }\n\n // get sample rate\n const samplingRateCode = data[start + 4] >> 6;\n if (samplingRateCode >= 3) {\n return -1; // invalid sampling rate\n }\n\n const samplingRateMap = [48000, 44100, 32000];\n const sampleRate = samplingRateMap[samplingRateCode];\n\n // get frame size\n const frameSizeCode = data[start + 4] & 0x3f;\n const frameSizeMap = [\n 64, 69, 96, 64, 70, 96, 80, 87, 120, 80, 88, 120, 96, 104, 144, 96, 105,\n 144, 112, 121, 168, 112, 122, 168, 128, 139, 192, 128, 140, 192, 160, 174,\n 240, 160, 175, 240, 192, 208, 288, 192, 209, 288, 224, 243, 336, 224, 244,\n 336, 256, 278, 384, 256, 279, 384, 320, 348, 480, 320, 349, 480, 384, 417,\n 576, 384, 418, 576, 448, 487, 672, 448, 488, 672, 512, 557, 768, 512, 558,\n 768, 640, 696, 960, 640, 697, 960, 768, 835, 1152, 768, 836, 1152, 896, 975,\n 1344, 896, 976, 1344, 1024, 1114, 1536, 1024, 1115, 1536, 1152, 1253, 1728,\n 1152, 1254, 1728, 1280, 1393, 1920, 1280, 1394, 1920,\n ];\n\n const frameLength = frameSizeMap[frameSizeCode * 3 + samplingRateCode] * 2;\n if (start + frameLength > data.length) {\n return -1;\n }\n\n // get channel count\n const channelMode = data[start + 6] >> 5;\n let skipCount = 0;\n if (channelMode === 2) {\n skipCount += 2;\n } else {\n if (channelMode & 1 && channelMode !== 1) {\n skipCount += 2;\n }\n if (channelMode & 4) {\n skipCount += 2;\n }\n }\n\n const lfeon =\n (((data[start + 6] << 8) | data[start + 7]) >> (12 - skipCount)) & 1;\n\n const channelsMap = [2, 1, 2, 3, 3, 4, 4, 5];\n const channelCount = channelsMap[channelMode] + lfeon;\n\n // build dac3 box\n const bsid = data[start + 5] >> 3;\n const bsmod = data[start + 5] & 7;\n\n const config = new Uint8Array([\n (samplingRateCode << 6) | (bsid << 1) | (bsmod >> 2),\n ((bsmod & 3) << 6) |\n (channelMode << 3) |\n (lfeon << 2) |\n (frameSizeCode >> 4),\n (frameSizeCode << 4) & 0xe0,\n ]);\n\n const frameDuration = (1536 / sampleRate) * 90000;\n const stamp = pts + frameIndex * frameDuration;\n const unit = data.subarray(start, start + frameLength);\n\n track.config = config;\n track.channelCount = channelCount;\n track.samplerate = sampleRate;\n track.samples.push({ unit, pts: stamp });\n\n return frameLength;\n}\n", "import type { ParsedVideoSample } from '../tsdemuxer';\nimport {\n DemuxedVideoTrack,\n VideoSample,\n VideoSampleUnit,\n} from '../../types/demuxer';\nimport { logger } from '../../utils/logger';\n\nclass BaseVideoParser {\n protected VideoSample: ParsedVideoSample | null = null;\n\n protected createVideoSample(\n key: boolean,\n pts: number | undefined,\n dts: number | undefined,\n debug: string,\n ): ParsedVideoSample {\n return {\n key,\n frame: false,\n pts,\n dts,\n units: [],\n debug,\n length: 0,\n };\n }\n\n protected getLastNalUnit(\n samples: VideoSample[],\n ): VideoSampleUnit | undefined {\n let VideoSample = this.VideoSample;\n let lastUnit: VideoSampleUnit | undefined;\n // try to fallback to previous sample if current one is empty\n if (!VideoSample || VideoSample.units.length === 0) {\n VideoSample = samples[samples.length - 1];\n }\n if (VideoSample?.units) {\n const units = VideoSample.units;\n lastUnit = units[units.length - 1];\n }\n return lastUnit;\n }\n\n protected pushAccessUnit(\n VideoSample: ParsedVideoSample,\n videoTrack: DemuxedVideoTrack,\n ) {\n if (VideoSample.units.length && VideoSample.frame) {\n // if sample does not have PTS/DTS, patch with last sample PTS/DTS\n if (VideoSample.pts === undefined) {\n const samples = videoTrack.samples;\n const nbSamples = samples.length;\n if (nbSamples) {\n const lastSample = samples[nbSamples - 1];\n VideoSample.pts = lastSample.pts;\n VideoSample.dts = lastSample.dts;\n } else {\n // dropping samples, no timestamp found\n videoTrack.dropped++;\n return;\n }\n }\n videoTrack.samples.push(VideoSample as VideoSample);\n }\n if (VideoSample.debug.length) {\n logger.log(\n VideoSample.pts + '/' + VideoSample.dts + ':' + VideoSample.debug,\n );\n }\n }\n}\n\nexport default BaseVideoParser;\n", "/**\n * Parser for exponential Golomb codes, a variable-bitwidth number encoding scheme used by h264.\n */\n\nimport { logger } from '../../utils/logger';\n\nclass ExpGolomb {\n private data: Uint8Array;\n public bytesAvailable: number;\n private word: number;\n private bitsAvailable: number;\n\n constructor(data: Uint8Array) {\n this.data = data;\n // the number of bytes left to examine in this.data\n this.bytesAvailable = data.byteLength;\n // the current word being examined\n this.word = 0; // :uint\n // the number of bits left to examine in the current word\n this.bitsAvailable = 0; // :uint\n }\n\n // ():void\n loadWord(): void {\n const data = this.data;\n const bytesAvailable = this.bytesAvailable;\n const position = data.byteLength - bytesAvailable;\n const workingBytes = new Uint8Array(4);\n const availableBytes = Math.min(4, bytesAvailable);\n if (availableBytes === 0) {\n throw new Error('no bytes available');\n }\n\n workingBytes.set(data.subarray(position, position + availableBytes));\n this.word = new DataView(workingBytes.buffer).getUint32(0);\n // track the amount of this.data that has been processed\n this.bitsAvailable = availableBytes * 8;\n this.bytesAvailable -= availableBytes;\n }\n\n // (count:int):void\n skipBits(count: number): void {\n let skipBytes; // :int\n count = Math.min(count, this.bytesAvailable * 8 + this.bitsAvailable);\n if (this.bitsAvailable > count) {\n this.word <<= count;\n this.bitsAvailable -= count;\n } else {\n count -= this.bitsAvailable;\n skipBytes = count >> 3;\n count -= skipBytes << 3;\n this.bytesAvailable -= skipBytes;\n this.loadWord();\n this.word <<= count;\n this.bitsAvailable -= count;\n }\n }\n\n // (size:int):uint\n readBits(size: number): number {\n let bits = Math.min(this.bitsAvailable, size); // :uint\n const valu = this.word >>> (32 - bits); // :uint\n if (size > 32) {\n logger.error('Cannot read more than 32 bits at a time');\n }\n\n this.bitsAvailable -= bits;\n if (this.bitsAvailable > 0) {\n this.word <<= bits;\n } else if (this.bytesAvailable > 0) {\n this.loadWord();\n } else {\n throw new Error('no bits available');\n }\n\n bits = size - bits;\n if (bits > 0 && this.bitsAvailable) {\n return (valu << bits) | this.readBits(bits);\n } else {\n return valu;\n }\n }\n\n // ():uint\n skipLZ(): number {\n let leadingZeroCount; // :uint\n for (\n leadingZeroCount = 0;\n leadingZeroCount < this.bitsAvailable;\n ++leadingZeroCount\n ) {\n if ((this.word & (0x80000000 >>> leadingZeroCount)) !== 0) {\n // the first bit of working word is 1\n this.word <<= leadingZeroCount;\n this.bitsAvailable -= leadingZeroCount;\n return leadingZeroCount;\n }\n }\n // we exhausted word and still have not found a 1\n this.loadWord();\n return leadingZeroCount + this.skipLZ();\n }\n\n // ():void\n skipUEG(): void {\n this.skipBits(1 + this.skipLZ());\n }\n\n // ():void\n skipEG(): void {\n this.skipBits(1 + this.skipLZ());\n }\n\n // ():uint\n readUEG(): number {\n const clz = this.skipLZ(); // :uint\n return this.readBits(clz + 1) - 1;\n }\n\n // ():int\n readEG(): number {\n const valu = this.readUEG(); // :int\n if (0x01 & valu) {\n // the number is odd if the low order bit is set\n return (1 + valu) >>> 1; // add 1 to make it even, and divide by 2\n } else {\n return -1 * (valu >>> 1); // divide by two then make it negative\n }\n }\n\n // Some convenience functions\n // :Boolean\n readBoolean(): boolean {\n return this.readBits(1) === 1;\n }\n\n // ():int\n readUByte(): number {\n return this.readBits(8);\n }\n\n // ():int\n readUShort(): number {\n return this.readBits(16);\n }\n\n // ():int\n readUInt(): number {\n return this.readBits(32);\n }\n\n /**\n * Advance the ExpGolomb decoder past a scaling list. The scaling\n * list is optionally transmitted as part of a sequence parameter\n * set and is not relevant to transmuxing.\n * @param count the number of entries in this scaling list\n * @see Recommendation ITU-T H.264, Section 7.3.2.1.1.1\n */\n skipScalingList(count: number): void {\n let lastScale = 8;\n let nextScale = 8;\n let deltaScale;\n for (let j = 0; j < count; j++) {\n if (nextScale !== 0) {\n deltaScale = this.readEG();\n nextScale = (lastScale + deltaScale + 256) % 256;\n }\n lastScale = nextScale === 0 ? lastScale : nextScale;\n }\n }\n\n /**\n * Read a sequence parameter set and return some interesting video\n * properties. A sequence parameter set is the H264 metadata that\n * describes the properties of upcoming video frames.\n * @returns an object with configuration parsed from the\n * sequence parameter set, including the dimensions of the\n * associated video frames.\n */\n readSPS(): {\n width: number;\n height: number;\n pixelRatio: [number, number];\n } {\n let frameCropLeftOffset = 0;\n let frameCropRightOffset = 0;\n let frameCropTopOffset = 0;\n let frameCropBottomOffset = 0;\n let numRefFramesInPicOrderCntCycle;\n let scalingListCount;\n let i;\n const readUByte = this.readUByte.bind(this);\n const readBits = this.readBits.bind(this);\n const readUEG = this.readUEG.bind(this);\n const readBoolean = this.readBoolean.bind(this);\n const skipBits = this.skipBits.bind(this);\n const skipEG = this.skipEG.bind(this);\n const skipUEG = this.skipUEG.bind(this);\n const skipScalingList = this.skipScalingList.bind(this);\n\n readUByte();\n const profileIdc = readUByte(); // profile_idc\n readBits(5); // profileCompat constraint_set[0-4]_flag, u(5)\n skipBits(3); // reserved_zero_3bits u(3),\n readUByte(); // level_idc u(8)\n skipUEG(); // seq_parameter_set_id\n // some profiles have more optional data we don't need\n if (\n profileIdc === 100 ||\n profileIdc === 110 ||\n profileIdc === 122 ||\n profileIdc === 244 ||\n profileIdc === 44 ||\n profileIdc === 83 ||\n profileIdc === 86 ||\n profileIdc === 118 ||\n profileIdc === 128\n ) {\n const chromaFormatIdc = readUEG();\n if (chromaFormatIdc === 3) {\n skipBits(1);\n } // separate_colour_plane_flag\n\n skipUEG(); // bit_depth_luma_minus8\n skipUEG(); // bit_depth_chroma_minus8\n skipBits(1); // qpprime_y_zero_transform_bypass_flag\n if (readBoolean()) {\n // seq_scaling_matrix_present_flag\n scalingListCount = chromaFormatIdc !== 3 ? 8 : 12;\n for (i = 0; i < scalingListCount; i++) {\n if (readBoolean()) {\n // seq_scaling_list_present_flag[ i ]\n if (i < 6) {\n skipScalingList(16);\n } else {\n skipScalingList(64);\n }\n }\n }\n }\n }\n skipUEG(); // log2_max_frame_num_minus4\n const picOrderCntType = readUEG();\n if (picOrderCntType === 0) {\n readUEG(); // log2_max_pic_order_cnt_lsb_minus4\n } else if (picOrderCntType === 1) {\n skipBits(1); // delta_pic_order_always_zero_flag\n skipEG(); // offset_for_non_ref_pic\n skipEG(); // offset_for_top_to_bottom_field\n numRefFramesInPicOrderCntCycle = readUEG();\n for (i = 0; i < numRefFramesInPicOrderCntCycle; i++) {\n skipEG();\n } // offset_for_ref_frame[ i ]\n }\n skipUEG(); // max_num_ref_frames\n skipBits(1); // gaps_in_frame_num_value_allowed_flag\n const picWidthInMbsMinus1 = readUEG();\n const picHeightInMapUnitsMinus1 = readUEG();\n const frameMbsOnlyFlag = readBits(1);\n if (frameMbsOnlyFlag === 0) {\n skipBits(1);\n } // mb_adaptive_frame_field_flag\n\n skipBits(1); // direct_8x8_inference_flag\n if (readBoolean()) {\n // frame_cropping_flag\n frameCropLeftOffset = readUEG();\n frameCropRightOffset = readUEG();\n frameCropTopOffset = readUEG();\n frameCropBottomOffset = readUEG();\n }\n let pixelRatio: [number, number] = [1, 1];\n if (readBoolean()) {\n // vui_parameters_present_flag\n if (readBoolean()) {\n // aspect_ratio_info_present_flag\n const aspectRatioIdc = readUByte();\n switch (aspectRatioIdc) {\n case 1:\n pixelRatio = [1, 1];\n break;\n case 2:\n pixelRatio = [12, 11];\n break;\n case 3:\n pixelRatio = [10, 11];\n break;\n case 4:\n pixelRatio = [16, 11];\n break;\n case 5:\n pixelRatio = [40, 33];\n break;\n case 6:\n pixelRatio = [24, 11];\n break;\n case 7:\n pixelRatio = [20, 11];\n break;\n case 8:\n pixelRatio = [32, 11];\n break;\n case 9:\n pixelRatio = [80, 33];\n break;\n case 10:\n pixelRatio = [18, 11];\n break;\n case 11:\n pixelRatio = [15, 11];\n break;\n case 12:\n pixelRatio = [64, 33];\n break;\n case 13:\n pixelRatio = [160, 99];\n break;\n case 14:\n pixelRatio = [4, 3];\n break;\n case 15:\n pixelRatio = [3, 2];\n break;\n case 16:\n pixelRatio = [2, 1];\n break;\n case 255: {\n pixelRatio = [\n (readUByte() << 8) | readUByte(),\n (readUByte() << 8) | readUByte(),\n ];\n break;\n }\n }\n }\n }\n return {\n width: Math.ceil(\n (picWidthInMbsMinus1 + 1) * 16 -\n frameCropLeftOffset * 2 -\n frameCropRightOffset * 2,\n ),\n height:\n (2 - frameMbsOnlyFlag) * (picHeightInMapUnitsMinus1 + 1) * 16 -\n (frameMbsOnlyFlag ? 2 : 4) *\n (frameCropTopOffset + frameCropBottomOffset),\n pixelRatio: pixelRatio,\n };\n }\n\n readSliceType() {\n // skip NALu type\n this.readUByte();\n // discard first_mb_in_slice\n this.readUEG();\n // return slice_type\n return this.readUEG();\n }\n}\n\nexport default ExpGolomb;\n", "import BaseVideoParser from './base-video-parser';\nimport {\n DemuxedVideoTrack,\n DemuxedUserdataTrack,\n VideoSampleUnit,\n} from '../../types/demuxer';\nimport {\n appendUint8Array,\n parseSEIMessageFromNALu,\n} from '../../utils/mp4-tools';\nimport ExpGolomb from './exp-golomb';\nimport type { PES } from '../tsdemuxer';\n\nclass AvcVideoParser extends BaseVideoParser {\n public parseAVCPES(\n track: DemuxedVideoTrack,\n textTrack: DemuxedUserdataTrack,\n pes: PES,\n last: boolean,\n duration: number,\n ) {\n const units = this.parseAVCNALu(track, pes.data);\n const debug = false;\n let VideoSample = this.VideoSample;\n let push: boolean;\n let spsfound = false;\n // free pes.data to save up some memory\n (pes as any).data = null;\n\n // if new NAL units found and last sample still there, let's push ...\n // this helps parsing streams with missing AUD (only do this if AUD never found)\n if (VideoSample && units.length && !track.audFound) {\n this.pushAccessUnit(VideoSample, track);\n VideoSample = this.VideoSample = this.createVideoSample(\n false,\n pes.pts,\n pes.dts,\n '',\n );\n }\n\n units.forEach((unit) => {\n switch (unit.type) {\n // NDR\n case 1: {\n let iskey = false;\n push = true;\n const data = unit.data;\n // only check slice type to detect KF in case SPS found in same packet (any keyframe is preceded by SPS ...)\n if (spsfound && data.length > 4) {\n // retrieve slice type by parsing beginning of NAL unit (follow H264 spec, slice_header definition) to detect keyframe embedded in NDR\n const sliceType = new ExpGolomb(data).readSliceType();\n // 2 : I slice, 4 : SI slice, 7 : I slice, 9: SI slice\n // SI slice : A slice that is coded using intra prediction only and using quantisation of the prediction samples.\n // An SI slice can be coded such that its decoded samples can be constructed identically to an SP slice.\n // I slice: A slice that is not an SI slice that is decoded using intra prediction only.\n // if (sliceType === 2 || sliceType === 7) {\n if (\n sliceType === 2 ||\n sliceType === 4 ||\n sliceType === 7 ||\n sliceType === 9\n ) {\n iskey = true;\n }\n }\n\n if (iskey) {\n // if we have non-keyframe data already, that cannot belong to the same frame as a keyframe, so force a push\n if (VideoSample?.frame && !VideoSample.key) {\n this.pushAccessUnit(VideoSample, track);\n VideoSample = this.VideoSample = null;\n }\n }\n\n if (!VideoSample) {\n VideoSample = this.VideoSample = this.createVideoSample(\n true,\n pes.pts,\n pes.dts,\n '',\n );\n }\n\n if (debug) {\n VideoSample.debug += 'NDR ';\n }\n\n VideoSample.frame = true;\n VideoSample.key = iskey;\n\n break;\n // IDR\n }\n case 5:\n push = true;\n // handle PES not starting with AUD\n // if we have frame data already, that cannot belong to the same frame, so force a push\n if (VideoSample?.frame && !VideoSample.key) {\n this.pushAccessUnit(VideoSample, track);\n VideoSample = this.VideoSample = null;\n }\n if (!VideoSample) {\n VideoSample = this.VideoSample = this.createVideoSample(\n true,\n pes.pts,\n pes.dts,\n '',\n );\n }\n\n if (debug) {\n VideoSample.debug += 'IDR ';\n }\n\n VideoSample.key = true;\n VideoSample.frame = true;\n break;\n // SEI\n case 6: {\n push = true;\n if (debug && VideoSample) {\n VideoSample.debug += 'SEI ';\n }\n parseSEIMessageFromNALu(\n unit.data,\n 1,\n pes.pts as number,\n textTrack.samples,\n );\n break;\n // SPS\n }\n case 7: {\n push = true;\n spsfound = true;\n if (debug && VideoSample) {\n VideoSample.debug += 'SPS ';\n }\n const sps = unit.data;\n const expGolombDecoder = new ExpGolomb(sps);\n const config = expGolombDecoder.readSPS();\n\n if (\n !track.sps ||\n track.width !== config.width ||\n track.height !== config.height ||\n track.pixelRatio?.[0] !== config.pixelRatio[0] ||\n track.pixelRatio?.[1] !== config.pixelRatio[1]\n ) {\n track.width = config.width;\n track.height = config.height;\n track.pixelRatio = config.pixelRatio;\n track.sps = [sps];\n track.duration = duration;\n const codecarray = sps.subarray(1, 4);\n let codecstring = 'avc1.';\n for (let i = 0; i < 3; i++) {\n let h = codecarray[i].toString(16);\n if (h.length < 2) {\n h = '0' + h;\n }\n\n codecstring += h;\n }\n track.codec = codecstring;\n }\n\n break;\n }\n // PPS\n case 8:\n push = true;\n if (debug && VideoSample) {\n VideoSample.debug += 'PPS ';\n }\n\n track.pps = [unit.data];\n\n break;\n // AUD\n case 9:\n push = true;\n track.audFound = true;\n if (VideoSample) {\n this.pushAccessUnit(VideoSample, track);\n }\n\n VideoSample = this.VideoSample = this.createVideoSample(\n false,\n pes.pts,\n pes.dts,\n debug ? 'AUD ' : '',\n );\n break;\n // Filler Data\n case 12:\n push = true;\n break;\n default:\n push = false;\n if (VideoSample) {\n VideoSample.debug += 'unknown NAL ' + unit.type + ' ';\n }\n\n break;\n }\n if (VideoSample && push) {\n const units = VideoSample.units;\n units.push(unit);\n }\n });\n // if last PES packet, push samples\n if (last && VideoSample) {\n this.pushAccessUnit(VideoSample, track);\n this.VideoSample = null;\n }\n }\n\n private parseAVCNALu(\n track: DemuxedVideoTrack,\n array: Uint8Array,\n ): Array<{\n data: Uint8Array;\n type: number;\n state?: number;\n }> {\n const len = array.byteLength;\n let state = track.naluState || 0;\n const lastState = state;\n const units: VideoSampleUnit[] = [];\n let i = 0;\n let value: number;\n let overflow: number;\n let unitType: number;\n let lastUnitStart = -1;\n let lastUnitType: number = 0;\n // logger.log('PES:' + Hex.hexDump(array));\n\n if (state === -1) {\n // special use case where we found 3 or 4-byte start codes exactly at the end of previous PES packet\n lastUnitStart = 0;\n // NALu type is value read from offset 0\n lastUnitType = array[0] & 0x1f;\n state = 0;\n i = 1;\n }\n\n while (i < len) {\n value = array[i++];\n // optimization. state 0 and 1 are the predominant case. let's handle them outside of the switch/case\n if (!state) {\n state = value ? 0 : 1;\n continue;\n }\n if (state === 1) {\n state = value ? 0 : 2;\n continue;\n }\n // here we have state either equal to 2 or 3\n if (!value) {\n state = 3;\n } else if (value === 1) {\n overflow = i - state - 1;\n if (lastUnitStart >= 0) {\n const unit: VideoSampleUnit = {\n data: array.subarray(lastUnitStart, overflow),\n type: lastUnitType,\n };\n // logger.log('pushing NALU, type/size:' + unit.type + '/' + unit.data.byteLength);\n units.push(unit);\n } else {\n // lastUnitStart is undefined => this is the first start code found in this PES packet\n // first check if start code delimiter is overlapping between 2 PES packets,\n // ie it started in last packet (lastState not zero)\n // and ended at the beginning of this PES packet (i <= 4 - lastState)\n const lastUnit = this.getLastNalUnit(track.samples);\n if (lastUnit) {\n if (lastState && i <= 4 - lastState) {\n // start delimiter overlapping between PES packets\n // strip start delimiter bytes from the end of last NAL unit\n // check if lastUnit had a state different from zero\n if (lastUnit.state) {\n // strip last bytes\n lastUnit.data = lastUnit.data.subarray(\n 0,\n lastUnit.data.byteLength - lastState,\n );\n }\n }\n // If NAL units are not starting right at the beginning of the PES packet, push preceding data into previous NAL unit.\n\n if (overflow > 0) {\n // logger.log('first NALU found with overflow:' + overflow);\n lastUnit.data = appendUint8Array(\n lastUnit.data,\n array.subarray(0, overflow),\n );\n lastUnit.state = 0;\n }\n }\n }\n // check if we can read unit type\n if (i < len) {\n unitType = array[i] & 0x1f;\n // logger.log('find NALU @ offset:' + i + ',type:' + unitType);\n lastUnitStart = i;\n lastUnitType = unitType;\n state = 0;\n } else {\n // not enough byte to read unit type. let's read it on next PES parsing\n state = -1;\n }\n } else {\n state = 0;\n }\n }\n if (lastUnitStart >= 0 && state >= 0) {\n const unit: VideoSampleUnit = {\n data: array.subarray(lastUnitStart, len),\n type: lastUnitType,\n state: state,\n };\n units.push(unit);\n // logger.log('pushing NALU, type/size/state:' + unit.type + '/' + unit.data.byteLength + '/' + state);\n }\n // no NALu found\n if (units.length === 0) {\n // append pes.data to previous NAL unit\n const lastUnit = this.getLastNalUnit(track.samples);\n if (lastUnit) {\n lastUnit.data = appendUint8Array(lastUnit.data, array);\n }\n }\n track.naluState = state;\n return units;\n }\n}\n\nexport default AvcVideoParser;\n", "/**\n * SAMPLE-AES decrypter\n */\n\nimport { HlsConfig } from '../config';\nimport Decrypter from '../crypt/decrypter';\nimport { HlsEventEmitter } from '../events';\nimport type {\n AudioSample,\n VideoSample,\n VideoSampleUnit,\n DemuxedVideoTrackBase,\n KeyData,\n} from '../types/demuxer';\nimport { discardEPB } from '../utils/mp4-tools';\n\nclass SampleAesDecrypter {\n private keyData: KeyData;\n private decrypter: Decrypter;\n\n constructor(observer: HlsEventEmitter, config: HlsConfig, keyData: KeyData) {\n this.keyData = keyData;\n this.decrypter = new Decrypter(config, {\n removePKCS7Padding: false,\n });\n }\n\n decryptBuffer(encryptedData: Uint8Array | ArrayBuffer): Promise {\n return this.decrypter.decrypt(\n encryptedData,\n this.keyData.key.buffer,\n this.keyData.iv.buffer,\n );\n }\n\n // AAC - encrypt all full 16 bytes blocks starting from offset 16\n private decryptAacSample(\n samples: AudioSample[],\n sampleIndex: number,\n callback: () => void,\n ) {\n const curUnit = samples[sampleIndex].unit;\n if (curUnit.length <= 16) {\n // No encrypted portion in this sample (first 16 bytes is not\n // encrypted, see https://developer.apple.com/library/archive/documentation/AudioVideo/Conceptual/HLS_Sample_Encryption/Encryption/Encryption.html),\n return;\n }\n const encryptedData = curUnit.subarray(\n 16,\n curUnit.length - (curUnit.length % 16),\n );\n const encryptedBuffer = encryptedData.buffer.slice(\n encryptedData.byteOffset,\n encryptedData.byteOffset + encryptedData.length,\n );\n\n this.decryptBuffer(encryptedBuffer).then((decryptedBuffer: ArrayBuffer) => {\n const decryptedData = new Uint8Array(decryptedBuffer);\n curUnit.set(decryptedData, 16);\n\n if (!this.decrypter.isSync()) {\n this.decryptAacSamples(samples, sampleIndex + 1, callback);\n }\n });\n }\n\n decryptAacSamples(\n samples: AudioSample[],\n sampleIndex: number,\n callback: () => void,\n ) {\n for (; ; sampleIndex++) {\n if (sampleIndex >= samples.length) {\n callback();\n return;\n }\n\n if (samples[sampleIndex].unit.length < 32) {\n continue;\n }\n\n this.decryptAacSample(samples, sampleIndex, callback);\n\n if (!this.decrypter.isSync()) {\n return;\n }\n }\n }\n\n // AVC - encrypt one 16 bytes block out of ten, starting from offset 32\n getAvcEncryptedData(decodedData: Uint8Array) {\n const encryptedDataLen =\n Math.floor((decodedData.length - 48) / 160) * 16 + 16;\n const encryptedData = new Int8Array(encryptedDataLen);\n let outputPos = 0;\n for (\n let inputPos = 32;\n inputPos < decodedData.length - 16;\n inputPos += 160, outputPos += 16\n ) {\n encryptedData.set(\n decodedData.subarray(inputPos, inputPos + 16),\n outputPos,\n );\n }\n\n return encryptedData;\n }\n\n getAvcDecryptedUnit(\n decodedData: Uint8Array,\n decryptedData: ArrayLike | ArrayBuffer | SharedArrayBuffer,\n ) {\n const uint8DecryptedData = new Uint8Array(decryptedData);\n let inputPos = 0;\n for (\n let outputPos = 32;\n outputPos < decodedData.length - 16;\n outputPos += 160, inputPos += 16\n ) {\n decodedData.set(\n uint8DecryptedData.subarray(inputPos, inputPos + 16),\n outputPos,\n );\n }\n\n return decodedData;\n }\n\n decryptAvcSample(\n samples: VideoSample[],\n sampleIndex: number,\n unitIndex: number,\n callback: () => void,\n curUnit: VideoSampleUnit,\n ) {\n const decodedData = discardEPB(curUnit.data);\n const encryptedData = this.getAvcEncryptedData(decodedData);\n\n this.decryptBuffer(encryptedData.buffer).then(\n (decryptedBuffer: ArrayBuffer) => {\n curUnit.data = this.getAvcDecryptedUnit(decodedData, decryptedBuffer);\n\n if (!this.decrypter.isSync()) {\n this.decryptAvcSamples(samples, sampleIndex, unitIndex + 1, callback);\n }\n },\n );\n }\n\n decryptAvcSamples(\n samples: DemuxedVideoTrackBase['samples'],\n sampleIndex: number,\n unitIndex: number,\n callback: () => void,\n ) {\n if (samples instanceof Uint8Array) {\n throw new Error('Cannot decrypt samples of type Uint8Array');\n }\n\n for (; ; sampleIndex++, unitIndex = 0) {\n if (sampleIndex >= samples.length) {\n callback();\n return;\n }\n\n const curUnits = samples[sampleIndex].units;\n for (; ; unitIndex++) {\n if (unitIndex >= curUnits.length) {\n break;\n }\n\n const curUnit = curUnits[unitIndex];\n if (\n curUnit.data.length <= 48 ||\n (curUnit.type !== 1 && curUnit.type !== 5)\n ) {\n continue;\n }\n\n this.decryptAvcSample(\n samples,\n sampleIndex,\n unitIndex,\n callback,\n curUnit,\n );\n\n if (!this.decrypter.isSync()) {\n return;\n }\n }\n }\n }\n}\n\nexport default SampleAesDecrypter;\n", "/**\n * highly optimized TS demuxer:\n * parse PAT, PMT\n * extract PES packet from audio and video PIDs\n * extract AVC/H264 NAL units and AAC/ADTS samples from PES packet\n * trigger the remuxer upon parsing completion\n * it also tries to workaround as best as it can audio codec switch (HE-AAC to AAC and vice versa), without having to restart the MediaSource.\n * it also controls the remuxing process :\n * upon discontinuity or level switch detection, it will also notifies the remuxer so that it can reset its state.\n */\n\nimport * as ADTS from './audio/adts';\nimport * as MpegAudio from './audio/mpegaudio';\nimport * as AC3 from './audio/ac3-demuxer';\nimport AvcVideoParser from './video/avc-video-parser';\nimport SampleAesDecrypter from './sample-aes';\nimport { Events } from '../events';\nimport { appendUint8Array, RemuxerTrackIdConfig } from '../utils/mp4-tools';\nimport { logger } from '../utils/logger';\nimport { ErrorTypes, ErrorDetails } from '../errors';\nimport type { HlsConfig } from '../config';\nimport type { HlsEventEmitter } from '../events';\nimport {\n DemuxedVideoTrack,\n DemuxedAudioTrack,\n DemuxedTrack,\n Demuxer,\n DemuxerResult,\n VideoSample,\n DemuxedMetadataTrack,\n DemuxedUserdataTrack,\n ElementaryStreamData,\n KeyData,\n MetadataSchema,\n} from '../types/demuxer';\nimport { AudioFrame } from '../types/demuxer';\n\nexport type ParsedTimestamp = {\n pts?: number;\n dts?: number;\n};\n\nexport type PES = ParsedTimestamp & {\n data: Uint8Array;\n len: number;\n};\n\nexport type ParsedVideoSample = ParsedTimestamp &\n Omit;\n\nexport interface TypeSupported {\n mpeg: boolean;\n mp3: boolean;\n ac3: boolean;\n}\n\nconst PACKET_LENGTH = 188;\n\nclass TSDemuxer implements Demuxer {\n private readonly observer: HlsEventEmitter;\n private readonly config: HlsConfig;\n private typeSupported: TypeSupported;\n\n private sampleAes: SampleAesDecrypter | null = null;\n private pmtParsed: boolean = false;\n private audioCodec?: string;\n private videoCodec?: string;\n private _duration: number = 0;\n private _pmtId: number = -1;\n\n private _videoTrack?: DemuxedVideoTrack;\n private _audioTrack?: DemuxedAudioTrack;\n private _id3Track?: DemuxedMetadataTrack;\n private _txtTrack?: DemuxedUserdataTrack;\n private aacOverFlow: AudioFrame | null = null;\n private remainderData: Uint8Array | null = null;\n private videoParser: AvcVideoParser;\n\n constructor(\n observer: HlsEventEmitter,\n config: HlsConfig,\n typeSupported: TypeSupported,\n ) {\n this.observer = observer;\n this.config = config;\n this.typeSupported = typeSupported;\n this.videoParser = new AvcVideoParser();\n }\n\n static probe(data: Uint8Array) {\n const syncOffset = TSDemuxer.syncOffset(data);\n if (syncOffset > 0) {\n logger.warn(\n `MPEG2-TS detected but first sync word found @ offset ${syncOffset}`,\n );\n }\n return syncOffset !== -1;\n }\n\n static syncOffset(data: Uint8Array): number {\n const length = data.length;\n let scanwindow = Math.min(PACKET_LENGTH * 5, length - PACKET_LENGTH) + 1;\n let i = 0;\n while (i < scanwindow) {\n // a TS init segment should contain at least 2 TS packets: PAT and PMT, each starting with 0x47\n let foundPat = false;\n let packetStart = -1;\n let tsPackets = 0;\n for (let j = i; j < length; j += PACKET_LENGTH) {\n if (\n data[j] === 0x47 &&\n (length - j === PACKET_LENGTH || data[j + PACKET_LENGTH] === 0x47)\n ) {\n tsPackets++;\n if (packetStart === -1) {\n packetStart = j;\n // First sync word found at offset, increase scan length (#5251)\n if (packetStart !== 0) {\n scanwindow =\n Math.min(\n packetStart + PACKET_LENGTH * 99,\n data.length - PACKET_LENGTH,\n ) + 1;\n }\n }\n if (!foundPat) {\n foundPat = parsePID(data, j) === 0;\n }\n // Sync word found at 0 with 3 packets, or found at offset least 2 packets up to scanwindow (#5501)\n if (\n foundPat &&\n tsPackets > 1 &&\n ((packetStart === 0 && tsPackets > 2) ||\n j + PACKET_LENGTH > scanwindow)\n ) {\n return packetStart;\n }\n } else if (tsPackets) {\n // Exit if sync word found, but does not contain contiguous packets\n return -1;\n } else {\n break;\n }\n }\n i++;\n }\n return -1;\n }\n\n /**\n * Creates a track model internal to demuxer used to drive remuxing input\n */\n static createTrack(\n type: 'audio' | 'video' | 'id3' | 'text',\n duration?: number,\n ): DemuxedTrack {\n return {\n container:\n type === 'video' || type === 'audio' ? 'video/mp2t' : undefined,\n type,\n id: RemuxerTrackIdConfig[type],\n pid: -1,\n inputTimeScale: 90000,\n sequenceNumber: 0,\n samples: [],\n dropped: 0,\n duration: type === 'audio' ? duration : undefined,\n };\n }\n\n /**\n * Initializes a new init segment on the demuxer/remuxer interface. Needed for discontinuities/track-switches (or at stream start)\n * Resets all internal track instances of the demuxer.\n */\n public resetInitSegment(\n initSegment: Uint8Array | undefined,\n audioCodec: string,\n videoCodec: string,\n trackDuration: number,\n ) {\n this.pmtParsed = false;\n this._pmtId = -1;\n\n this._videoTrack = TSDemuxer.createTrack('video') as DemuxedVideoTrack;\n this._audioTrack = TSDemuxer.createTrack(\n 'audio',\n trackDuration,\n ) as DemuxedAudioTrack;\n this._id3Track = TSDemuxer.createTrack('id3') as DemuxedMetadataTrack;\n this._txtTrack = TSDemuxer.createTrack('text') as DemuxedUserdataTrack;\n this._audioTrack.segmentCodec = 'aac';\n\n // flush any partial content\n this.aacOverFlow = null;\n this.remainderData = null;\n this.audioCodec = audioCodec;\n this.videoCodec = videoCodec;\n this._duration = trackDuration;\n }\n\n public resetTimeStamp() {}\n\n public resetContiguity(): void {\n const { _audioTrack, _videoTrack, _id3Track } = this;\n if (_audioTrack) {\n _audioTrack.pesData = null;\n }\n if (_videoTrack) {\n _videoTrack.pesData = null;\n }\n if (_id3Track) {\n _id3Track.pesData = null;\n }\n this.aacOverFlow = null;\n this.remainderData = null;\n }\n\n public demux(\n data: Uint8Array,\n timeOffset: number,\n isSampleAes = false,\n flush = false,\n ): DemuxerResult {\n if (!isSampleAes) {\n this.sampleAes = null;\n }\n\n let pes: PES | null;\n\n const videoTrack = this._videoTrack as DemuxedVideoTrack;\n const audioTrack = this._audioTrack as DemuxedAudioTrack;\n const id3Track = this._id3Track as DemuxedMetadataTrack;\n const textTrack = this._txtTrack as DemuxedUserdataTrack;\n\n let videoPid = videoTrack.pid;\n let videoData = videoTrack.pesData;\n let audioPid = audioTrack.pid;\n let id3Pid = id3Track.pid;\n let audioData = audioTrack.pesData;\n let id3Data = id3Track.pesData;\n let unknownPID: number | null = null;\n let pmtParsed = this.pmtParsed;\n let pmtId = this._pmtId;\n\n let len = data.length;\n if (this.remainderData) {\n data = appendUint8Array(this.remainderData, data);\n len = data.length;\n this.remainderData = null;\n }\n\n if (len < PACKET_LENGTH && !flush) {\n this.remainderData = data;\n return {\n audioTrack,\n videoTrack,\n id3Track,\n textTrack,\n };\n }\n\n const syncOffset = Math.max(0, TSDemuxer.syncOffset(data));\n len -= (len - syncOffset) % PACKET_LENGTH;\n if (len < data.byteLength && !flush) {\n this.remainderData = new Uint8Array(\n data.buffer,\n len,\n data.buffer.byteLength - len,\n );\n }\n\n // loop through TS packets\n let tsPacketErrors = 0;\n for (let start = syncOffset; start < len; start += PACKET_LENGTH) {\n if (data[start] === 0x47) {\n const stt = !!(data[start + 1] & 0x40);\n const pid = parsePID(data, start);\n const atf = (data[start + 3] & 0x30) >> 4;\n\n // if an adaption field is present, its length is specified by the fifth byte of the TS packet header.\n let offset: number;\n if (atf > 1) {\n offset = start + 5 + data[start + 4];\n // continue if there is only adaptation field\n if (offset === start + PACKET_LENGTH) {\n continue;\n }\n } else {\n offset = start + 4;\n }\n switch (pid) {\n case videoPid:\n if (stt) {\n if (videoData && (pes = parsePES(videoData))) {\n this.videoParser.parseAVCPES(\n videoTrack,\n textTrack,\n pes,\n false,\n this._duration,\n );\n }\n\n videoData = { data: [], size: 0 };\n }\n if (videoData) {\n videoData.data.push(data.subarray(offset, start + PACKET_LENGTH));\n videoData.size += start + PACKET_LENGTH - offset;\n }\n break;\n case audioPid:\n if (stt) {\n if (audioData && (pes = parsePES(audioData))) {\n switch (audioTrack.segmentCodec) {\n case 'aac':\n this.parseAACPES(audioTrack, pes);\n break;\n case 'mp3':\n this.parseMPEGPES(audioTrack, pes);\n break;\n case 'ac3':\n if (__USE_M2TS_ADVANCED_CODECS__) {\n this.parseAC3PES(audioTrack, pes);\n }\n break;\n }\n }\n audioData = { data: [], size: 0 };\n }\n if (audioData) {\n audioData.data.push(data.subarray(offset, start + PACKET_LENGTH));\n audioData.size += start + PACKET_LENGTH - offset;\n }\n break;\n case id3Pid:\n if (stt) {\n if (id3Data && (pes = parsePES(id3Data))) {\n this.parseID3PES(id3Track, pes);\n }\n\n id3Data = { data: [], size: 0 };\n }\n if (id3Data) {\n id3Data.data.push(data.subarray(offset, start + PACKET_LENGTH));\n id3Data.size += start + PACKET_LENGTH - offset;\n }\n break;\n case 0:\n if (stt) {\n offset += data[offset] + 1;\n }\n\n pmtId = this._pmtId = parsePAT(data, offset);\n // logger.log('PMT PID:' + this._pmtId);\n break;\n case pmtId: {\n if (stt) {\n offset += data[offset] + 1;\n }\n\n const parsedPIDs = parsePMT(\n data,\n offset,\n this.typeSupported,\n isSampleAes,\n this.observer,\n );\n\n // only update track id if track PID found while parsing PMT\n // this is to avoid resetting the PID to -1 in case\n // track PID transiently disappears from the stream\n // this could happen in case of transient missing audio samples for example\n // NOTE this is only the PID of the track as found in TS,\n // but we are not using this for MP4 track IDs.\n videoPid = parsedPIDs.videoPid;\n if (videoPid > 0) {\n videoTrack.pid = videoPid;\n videoTrack.segmentCodec = parsedPIDs.segmentVideoCodec;\n }\n\n audioPid = parsedPIDs.audioPid;\n if (audioPid > 0) {\n audioTrack.pid = audioPid;\n audioTrack.segmentCodec = parsedPIDs.segmentAudioCodec;\n }\n id3Pid = parsedPIDs.id3Pid;\n if (id3Pid > 0) {\n id3Track.pid = id3Pid;\n }\n\n if (unknownPID !== null && !pmtParsed) {\n logger.warn(\n `MPEG-TS PMT found at ${start} after unknown PID '${unknownPID}'. Backtracking to sync byte @${syncOffset} to parse all TS packets.`,\n );\n unknownPID = null;\n // we set it to -188, the += 188 in the for loop will reset start to 0\n start = syncOffset - 188;\n }\n pmtParsed = this.pmtParsed = true;\n break;\n }\n case 0x11:\n case 0x1fff:\n break;\n default:\n unknownPID = pid;\n break;\n }\n } else {\n tsPacketErrors++;\n }\n }\n\n if (tsPacketErrors > 0) {\n emitParsingError(\n this.observer,\n new Error(\n `Found ${tsPacketErrors} TS packet/s that do not start with 0x47`,\n ),\n );\n }\n\n videoTrack.pesData = videoData;\n audioTrack.pesData = audioData;\n id3Track.pesData = id3Data;\n\n const demuxResult: DemuxerResult = {\n audioTrack,\n videoTrack,\n id3Track,\n textTrack,\n };\n\n if (flush) {\n this.extractRemainingSamples(demuxResult);\n }\n\n return demuxResult;\n }\n\n public flush(): DemuxerResult | Promise {\n const { remainderData } = this;\n this.remainderData = null;\n let result: DemuxerResult;\n if (remainderData) {\n result = this.demux(remainderData, -1, false, true);\n } else {\n result = {\n videoTrack: this._videoTrack as DemuxedVideoTrack,\n audioTrack: this._audioTrack as DemuxedAudioTrack,\n id3Track: this._id3Track as DemuxedMetadataTrack,\n textTrack: this._txtTrack as DemuxedUserdataTrack,\n };\n }\n this.extractRemainingSamples(result);\n if (this.sampleAes) {\n return this.decrypt(result, this.sampleAes);\n }\n return result;\n }\n\n private extractRemainingSamples(demuxResult: DemuxerResult) {\n const { audioTrack, videoTrack, id3Track, textTrack } = demuxResult;\n const videoData = videoTrack.pesData;\n const audioData = audioTrack.pesData;\n const id3Data = id3Track.pesData;\n // try to parse last PES packets\n let pes: PES | null;\n if (videoData && (pes = parsePES(videoData))) {\n this.videoParser.parseAVCPES(\n videoTrack as DemuxedVideoTrack,\n textTrack as DemuxedUserdataTrack,\n pes,\n true,\n this._duration,\n );\n videoTrack.pesData = null;\n } else {\n // either avcData null or PES truncated, keep it for next frag parsing\n videoTrack.pesData = videoData;\n }\n\n if (audioData && (pes = parsePES(audioData))) {\n switch (audioTrack.segmentCodec) {\n case 'aac':\n this.parseAACPES(audioTrack, pes);\n break;\n case 'mp3':\n this.parseMPEGPES(audioTrack, pes);\n break;\n case 'ac3':\n if (__USE_M2TS_ADVANCED_CODECS__) {\n this.parseAC3PES(audioTrack, pes);\n }\n break;\n }\n audioTrack.pesData = null;\n } else {\n if (audioData?.size) {\n logger.log(\n 'last AAC PES packet truncated,might overlap between fragments',\n );\n }\n\n // either audioData null or PES truncated, keep it for next frag parsing\n audioTrack.pesData = audioData;\n }\n\n if (id3Data && (pes = parsePES(id3Data))) {\n this.parseID3PES(id3Track, pes);\n id3Track.pesData = null;\n } else {\n // either id3Data null or PES truncated, keep it for next frag parsing\n id3Track.pesData = id3Data;\n }\n }\n\n public demuxSampleAes(\n data: Uint8Array,\n keyData: KeyData,\n timeOffset: number,\n ): Promise {\n const demuxResult = this.demux(\n data,\n timeOffset,\n true,\n !this.config.progressive,\n );\n const sampleAes = (this.sampleAes = new SampleAesDecrypter(\n this.observer,\n this.config,\n keyData,\n ));\n return this.decrypt(demuxResult, sampleAes);\n }\n\n private decrypt(\n demuxResult: DemuxerResult,\n sampleAes: SampleAesDecrypter,\n ): Promise {\n return new Promise((resolve) => {\n const { audioTrack, videoTrack } = demuxResult;\n if (audioTrack.samples && audioTrack.segmentCodec === 'aac') {\n sampleAes.decryptAacSamples(audioTrack.samples, 0, () => {\n if (videoTrack.samples) {\n sampleAes.decryptAvcSamples(videoTrack.samples, 0, 0, () => {\n resolve(demuxResult);\n });\n } else {\n resolve(demuxResult);\n }\n });\n } else if (videoTrack.samples) {\n sampleAes.decryptAvcSamples(videoTrack.samples, 0, 0, () => {\n resolve(demuxResult);\n });\n }\n });\n }\n\n public destroy() {\n this._duration = 0;\n }\n\n private parseAACPES(track: DemuxedAudioTrack, pes: PES) {\n let startOffset = 0;\n const aacOverFlow = this.aacOverFlow;\n let data = pes.data;\n if (aacOverFlow) {\n this.aacOverFlow = null;\n const frameMissingBytes = aacOverFlow.missing;\n const sampleLength = aacOverFlow.sample.unit.byteLength;\n // logger.log(`AAC: append overflowing ${sampleLength} bytes to beginning of new PES`);\n if (frameMissingBytes === -1) {\n data = appendUint8Array(aacOverFlow.sample.unit, data);\n } else {\n const frameOverflowBytes = sampleLength - frameMissingBytes;\n aacOverFlow.sample.unit.set(\n data.subarray(0, frameMissingBytes),\n frameOverflowBytes,\n );\n track.samples.push(aacOverFlow.sample);\n startOffset = aacOverFlow.missing;\n }\n }\n // look for ADTS header (0xFFFx)\n let offset: number;\n let len: number;\n for (offset = startOffset, len = data.length; offset < len - 1; offset++) {\n if (ADTS.isHeader(data, offset)) {\n break;\n }\n }\n // if ADTS header does not start straight from the beginning of the PES payload, raise an error\n if (offset !== startOffset) {\n let reason: string;\n const recoverable = offset < len - 1;\n if (recoverable) {\n reason = `AAC PES did not start with ADTS header,offset:${offset}`;\n } else {\n reason = 'No ADTS header found in AAC PES';\n }\n emitParsingError(this.observer, new Error(reason), recoverable);\n if (!recoverable) {\n return;\n }\n }\n\n ADTS.initTrackConfig(\n track,\n this.observer,\n data,\n offset,\n this.audioCodec as string,\n );\n\n let pts: number;\n if (pes.pts !== undefined) {\n pts = pes.pts;\n } else if (aacOverFlow) {\n // if last AAC frame is overflowing, we should ensure timestamps are contiguous:\n // first sample PTS should be equal to last sample PTS + frameDuration\n const frameDuration = ADTS.getFrameDuration(track.samplerate as number);\n pts = aacOverFlow.sample.pts + frameDuration;\n } else {\n logger.warn('[tsdemuxer]: AAC PES unknown PTS');\n return;\n }\n\n // scan for aac samples\n let frameIndex = 0;\n let frame;\n while (offset < len) {\n frame = ADTS.appendFrame(track, data, offset, pts, frameIndex);\n offset += frame.length;\n if (!frame.missing) {\n frameIndex++;\n for (; offset < len - 1; offset++) {\n if (ADTS.isHeader(data, offset)) {\n break;\n }\n }\n } else {\n this.aacOverFlow = frame;\n break;\n }\n }\n }\n\n private parseMPEGPES(track: DemuxedAudioTrack, pes: PES) {\n const data = pes.data;\n const length = data.length;\n let frameIndex = 0;\n let offset = 0;\n const pts = pes.pts;\n if (pts === undefined) {\n logger.warn('[tsdemuxer]: MPEG PES unknown PTS');\n return;\n }\n\n while (offset < length) {\n if (MpegAudio.isHeader(data, offset)) {\n const frame = MpegAudio.appendFrame(\n track,\n data,\n offset,\n pts,\n frameIndex,\n );\n if (frame) {\n offset += frame.length;\n frameIndex++;\n } else {\n // logger.log('Unable to parse Mpeg audio frame');\n break;\n }\n } else {\n // nothing found, keep looking\n offset++;\n }\n }\n }\n\n private parseAC3PES(track: DemuxedAudioTrack, pes: PES) {\n if (__USE_M2TS_ADVANCED_CODECS__) {\n const data = pes.data;\n const pts = pes.pts;\n if (pts === undefined) {\n logger.warn('[tsdemuxer]: AC3 PES unknown PTS');\n return;\n }\n const length = data.length;\n let frameIndex = 0;\n let offset = 0;\n let parsed;\n\n while (\n offset < length &&\n (parsed = AC3.appendFrame(track, data, offset, pts, frameIndex++)) > 0\n ) {\n offset += parsed;\n }\n }\n }\n\n private parseID3PES(id3Track: DemuxedMetadataTrack, pes: PES) {\n if (pes.pts === undefined) {\n logger.warn('[tsdemuxer]: ID3 PES unknown PTS');\n return;\n }\n const id3Sample = Object.assign({}, pes as Required, {\n type: this._videoTrack ? MetadataSchema.emsg : MetadataSchema.audioId3,\n duration: Number.POSITIVE_INFINITY,\n });\n id3Track.samples.push(id3Sample);\n }\n}\n\nfunction parsePID(data: Uint8Array, offset: number): number {\n // pid is a 13-bit field starting at the last bit of TS[1]\n return ((data[offset + 1] & 0x1f) << 8) + data[offset + 2];\n}\n\nfunction parsePAT(data: Uint8Array, offset: number): number {\n // skip the PSI header and parse the first PMT entry\n return ((data[offset + 10] & 0x1f) << 8) | data[offset + 11];\n}\n\nfunction parsePMT(\n data: Uint8Array,\n offset: number,\n typeSupported: TypeSupported,\n isSampleAes: boolean,\n observer: HlsEventEmitter,\n) {\n const result = {\n audioPid: -1,\n videoPid: -1,\n id3Pid: -1,\n segmentVideoCodec: 'avc',\n segmentAudioCodec: 'aac',\n };\n const sectionLength = ((data[offset + 1] & 0x0f) << 8) | data[offset + 2];\n const tableEnd = offset + 3 + sectionLength - 4;\n // to determine where the table is, we have to figure out how\n // long the program info descriptors are\n const programInfoLength =\n ((data[offset + 10] & 0x0f) << 8) | data[offset + 11];\n // advance the offset to the first entry in the mapping table\n offset += 12 + programInfoLength;\n while (offset < tableEnd) {\n const pid = parsePID(data, offset);\n const esInfoLength = ((data[offset + 3] & 0x0f) << 8) | data[offset + 4];\n switch (data[offset]) {\n case 0xcf: // SAMPLE-AES AAC\n if (!isSampleAes) {\n logEncryptedSamplesFoundInUnencryptedStream('ADTS AAC');\n break;\n }\n /* falls through */\n case 0x0f: // ISO/IEC 13818-7 ADTS AAC (MPEG-2 lower bit-rate audio)\n // logger.log('AAC PID:' + pid);\n if (result.audioPid === -1) {\n result.audioPid = pid;\n }\n\n break;\n\n // Packetized metadata (ID3)\n case 0x15:\n // logger.log('ID3 PID:' + pid);\n if (result.id3Pid === -1) {\n result.id3Pid = pid;\n }\n\n break;\n\n case 0xdb: // SAMPLE-AES AVC\n if (!isSampleAes) {\n logEncryptedSamplesFoundInUnencryptedStream('H.264');\n break;\n }\n /* falls through */\n case 0x1b: // ITU-T Rec. H.264 and ISO/IEC 14496-10 (lower bit-rate video)\n // logger.log('AVC PID:' + pid);\n if (result.videoPid === -1) {\n result.videoPid = pid;\n result.segmentVideoCodec = 'avc';\n }\n\n break;\n\n // ISO/IEC 11172-3 (MPEG-1 audio)\n // or ISO/IEC 13818-3 (MPEG-2 halved sample rate audio)\n case 0x03:\n case 0x04:\n // logger.log('MPEG PID:' + pid);\n if (!typeSupported.mpeg && !typeSupported.mp3) {\n logger.log('MPEG audio found, not supported in this browser');\n } else if (result.audioPid === -1) {\n result.audioPid = pid;\n result.segmentAudioCodec = 'mp3';\n }\n break;\n\n case 0xc1: // SAMPLE-AES AC3\n if (!isSampleAes) {\n logEncryptedSamplesFoundInUnencryptedStream('AC-3');\n break;\n }\n /* falls through */\n case 0x81:\n if (__USE_M2TS_ADVANCED_CODECS__) {\n if (!typeSupported.ac3) {\n logger.log('AC-3 audio found, not supported in this browser');\n } else if (result.audioPid === -1) {\n result.audioPid = pid;\n result.segmentAudioCodec = 'ac3';\n }\n } else {\n logger.warn('AC-3 in M2TS support not included in build');\n }\n break;\n\n case 0x06:\n // stream_type 6 can mean a lot of different things in case of DVB.\n // We need to look at the descriptors. Right now, we're only interested\n // in AC-3 audio, so we do the descriptor parsing only when we don't have\n // an audio PID yet.\n if (result.audioPid === -1 && esInfoLength > 0) {\n let parsePos = offset + 5;\n let remaining = esInfoLength;\n\n while (remaining > 2) {\n const descriptorId = data[parsePos];\n\n switch (descriptorId) {\n case 0x6a: // DVB Descriptor for AC-3\n if (__USE_M2TS_ADVANCED_CODECS__) {\n if (typeSupported.ac3 !== true) {\n logger.log(\n 'AC-3 audio found, not supported in this browser for now',\n );\n } else {\n result.audioPid = pid;\n result.segmentAudioCodec = 'ac3';\n }\n } else {\n logger.warn('AC-3 in M2TS support not included in build');\n }\n break;\n }\n\n const descriptorLen = data[parsePos + 1] + 2;\n parsePos += descriptorLen;\n remaining -= descriptorLen;\n }\n }\n break;\n\n case 0xc2: // SAMPLE-AES EC3\n /* falls through */\n case 0x87:\n emitParsingError(observer, new Error('Unsupported EC-3 in M2TS found'));\n return result;\n\n case 0x24:\n emitParsingError(observer, new Error('Unsupported HEVC in M2TS found'));\n return result;\n\n default:\n // logger.log('unknown stream type:' + data[offset]);\n break;\n }\n // move to the next table entry\n // skip past the elementary stream descriptors, if present\n offset += esInfoLength + 5;\n }\n return result;\n}\n\nfunction emitParsingError(\n observer: HlsEventEmitter,\n error: Error,\n levelRetry?: boolean,\n) {\n logger.warn(`parsing error: ${error.message}`);\n observer.emit(Events.ERROR, Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.FRAG_PARSING_ERROR,\n fatal: false,\n levelRetry,\n error,\n reason: error.message,\n });\n}\n\nfunction logEncryptedSamplesFoundInUnencryptedStream(type: string) {\n logger.log(`${type} with AES-128-CBC encryption found in unencrypted stream`);\n}\n\nfunction parsePES(stream: ElementaryStreamData): PES | null {\n let i = 0;\n let frag: Uint8Array;\n let pesLen: number;\n let pesHdrLen: number;\n let pesPts: number | undefined;\n let pesDts: number | undefined;\n const data = stream.data;\n // safety check\n if (!stream || stream.size === 0) {\n return null;\n }\n\n // we might need up to 19 bytes to read PES header\n // if first chunk of data is less than 19 bytes, let's merge it with following ones until we get 19 bytes\n // usually only one merge is needed (and this is rare ...)\n while (data[0].length < 19 && data.length > 1) {\n data[0] = appendUint8Array(data[0], data[1]);\n data.splice(1, 1);\n }\n // retrieve PTS/DTS from first fragment\n frag = data[0];\n const pesPrefix = (frag[0] << 16) + (frag[1] << 8) + frag[2];\n if (pesPrefix === 1) {\n pesLen = (frag[4] << 8) + frag[5];\n // if PES parsed length is not zero and greater than total received length, stop parsing. PES might be truncated\n // minus 6 : PES header size\n if (pesLen && pesLen > stream.size - 6) {\n return null;\n }\n\n const pesFlags = frag[7];\n if (pesFlags & 0xc0) {\n /* PES header described here : http://dvd.sourceforge.net/dvdinfo/pes-hdr.html\n as PTS / DTS is 33 bit we cannot use bitwise operator in JS,\n as Bitwise operators treat their operands as a sequence of 32 bits */\n pesPts =\n (frag[9] & 0x0e) * 536870912 + // 1 << 29\n (frag[10] & 0xff) * 4194304 + // 1 << 22\n (frag[11] & 0xfe) * 16384 + // 1 << 14\n (frag[12] & 0xff) * 128 + // 1 << 7\n (frag[13] & 0xfe) / 2;\n\n if (pesFlags & 0x40) {\n pesDts =\n (frag[14] & 0x0e) * 536870912 + // 1 << 29\n (frag[15] & 0xff) * 4194304 + // 1 << 22\n (frag[16] & 0xfe) * 16384 + // 1 << 14\n (frag[17] & 0xff) * 128 + // 1 << 7\n (frag[18] & 0xfe) / 2;\n\n if (pesPts - pesDts > 60 * 90000) {\n logger.warn(\n `${Math.round(\n (pesPts - pesDts) / 90000,\n )}s delta between PTS and DTS, align them`,\n );\n pesPts = pesDts;\n }\n } else {\n pesDts = pesPts;\n }\n }\n pesHdrLen = frag[8];\n // 9 bytes : 6 bytes for PES header + 3 bytes for PES extension\n let payloadStartOffset = pesHdrLen + 9;\n if (stream.size <= payloadStartOffset) {\n return null;\n }\n stream.size -= payloadStartOffset;\n // reassemble PES packet\n const pesData = new Uint8Array(stream.size);\n for (let j = 0, dataLen = data.length; j < dataLen; j++) {\n frag = data[j];\n let len = frag.byteLength;\n if (payloadStartOffset) {\n if (payloadStartOffset > len) {\n // trim full frag if PES header bigger than frag\n payloadStartOffset -= len;\n continue;\n } else {\n // trim partial frag if PES header smaller than frag\n frag = frag.subarray(payloadStartOffset);\n len -= payloadStartOffset;\n payloadStartOffset = 0;\n }\n }\n pesData.set(frag, i);\n i += len;\n }\n if (pesLen) {\n // payload size : remove PES header + PES extension\n pesLen -= pesHdrLen + 3;\n }\n return { data: pesData, pts: pesPts, dts: pesDts, len: pesLen };\n }\n return null;\n}\n\nexport default TSDemuxer;\n", "/**\n * MP3 demuxer\n */\nimport BaseAudioDemuxer from './base-audio-demuxer';\nimport { getID3Data, getTimeStamp } from '../id3';\nimport { getAudioBSID } from './dolby';\nimport { logger } from '../../utils/logger';\nimport * as MpegAudio from './mpegaudio';\n\nclass MP3Demuxer extends BaseAudioDemuxer {\n resetInitSegment(\n initSegment: Uint8Array | undefined,\n audioCodec: string | undefined,\n videoCodec: string | undefined,\n trackDuration: number,\n ) {\n super.resetInitSegment(initSegment, audioCodec, videoCodec, trackDuration);\n this._audioTrack = {\n container: 'audio/mpeg',\n type: 'audio',\n id: 2,\n pid: -1,\n sequenceNumber: 0,\n segmentCodec: 'mp3',\n samples: [],\n manifestCodec: audioCodec,\n duration: trackDuration,\n inputTimeScale: 90000,\n dropped: 0,\n };\n }\n\n static probe(data: Uint8Array | undefined): boolean {\n if (!data) {\n return false;\n }\n\n // check if data contains ID3 timestamp and MPEG sync word\n // Look for MPEG header | 1111 1111 | 111X XYZX | where X can be either 0 or 1 and Y or Z should be 1\n // Layer bits (position 14 and 15) in header should be always different from 0 (Layer I or Layer II or Layer III)\n // More info http://www.mp3-tech.org/programmer/frame_header.html\n const id3Data = getID3Data(data, 0);\n let offset = id3Data?.length || 0;\n\n // Check for ac-3|ec-3 sync bytes and return false if present\n if (\n id3Data &&\n data[offset] === 0x0b &&\n data[offset + 1] === 0x77 &&\n getTimeStamp(id3Data) !== undefined &&\n // check the bsid to confirm ac-3 or ec-3 (not mp3)\n getAudioBSID(data, offset) <= 16\n ) {\n return false;\n }\n\n for (let length = data.length; offset < length; offset++) {\n if (MpegAudio.probe(data, offset)) {\n logger.log('MPEG Audio sync word found !');\n return true;\n }\n }\n return false;\n }\n\n canParse(data, offset) {\n return MpegAudio.canParse(data, offset);\n }\n\n appendFrame(track, data, offset) {\n if (this.basePTS === null) {\n return;\n }\n return MpegAudio.appendFrame(\n track,\n data,\n offset,\n this.basePTS,\n this.frameIndex,\n );\n }\n}\n\nexport default MP3Demuxer;\n", "/**\n * AAC helper\n */\n\nclass AAC {\n static getSilentFrame(\n codec?: string,\n channelCount?: number,\n ): Uint8Array | undefined {\n switch (codec) {\n case 'mp4a.40.2':\n if (channelCount === 1) {\n return new Uint8Array([0x00, 0xc8, 0x00, 0x80, 0x23, 0x80]);\n } else if (channelCount === 2) {\n return new Uint8Array([\n 0x21, 0x00, 0x49, 0x90, 0x02, 0x19, 0x00, 0x23, 0x80,\n ]);\n } else if (channelCount === 3) {\n return new Uint8Array([\n 0x00, 0xc8, 0x00, 0x80, 0x20, 0x84, 0x01, 0x26, 0x40, 0x08, 0x64,\n 0x00, 0x8e,\n ]);\n } else if (channelCount === 4) {\n return new Uint8Array([\n 0x00, 0xc8, 0x00, 0x80, 0x20, 0x84, 0x01, 0x26, 0x40, 0x08, 0x64,\n 0x00, 0x80, 0x2c, 0x80, 0x08, 0x02, 0x38,\n ]);\n } else if (channelCount === 5) {\n return new Uint8Array([\n 0x00, 0xc8, 0x00, 0x80, 0x20, 0x84, 0x01, 0x26, 0x40, 0x08, 0x64,\n 0x00, 0x82, 0x30, 0x04, 0x99, 0x00, 0x21, 0x90, 0x02, 0x38,\n ]);\n } else if (channelCount === 6) {\n return new Uint8Array([\n 0x00, 0xc8, 0x00, 0x80, 0x20, 0x84, 0x01, 0x26, 0x40, 0x08, 0x64,\n 0x00, 0x82, 0x30, 0x04, 0x99, 0x00, 0x21, 0x90, 0x02, 0x00, 0xb2,\n 0x00, 0x20, 0x08, 0xe0,\n ]);\n }\n\n break;\n // handle HE-AAC below (mp4a.40.5 / mp4a.40.29)\n default:\n if (channelCount === 1) {\n // ffmpeg -y -f lavfi -i \"aevalsrc=0:d=0.05\" -c:a libfdk_aac -profile:a aac_he -b:a 4k output.aac && hexdump -v -e '16/1 \"0x%x,\" \"\\n\"' -v output.aac\n return new Uint8Array([\n 0x1, 0x40, 0x22, 0x80, 0xa3, 0x4e, 0xe6, 0x80, 0xba, 0x8, 0x0, 0x0,\n 0x0, 0x1c, 0x6, 0xf1, 0xc1, 0xa, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,\n 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,\n 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,\n 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,\n 0x5a, 0x5e,\n ]);\n } else if (channelCount === 2) {\n // ffmpeg -y -f lavfi -i \"aevalsrc=0|0:d=0.05\" -c:a libfdk_aac -profile:a aac_he_v2 -b:a 4k output.aac && hexdump -v -e '16/1 \"0x%x,\" \"\\n\"' -v output.aac\n return new Uint8Array([\n 0x1, 0x40, 0x22, 0x80, 0xa3, 0x5e, 0xe6, 0x80, 0xba, 0x8, 0x0, 0x0,\n 0x0, 0x0, 0x95, 0x0, 0x6, 0xf1, 0xa1, 0xa, 0x5a, 0x5a, 0x5a, 0x5a,\n 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,\n 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,\n 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,\n 0x5a, 0x5e,\n ]);\n } else if (channelCount === 3) {\n // ffmpeg -y -f lavfi -i \"aevalsrc=0|0|0:d=0.05\" -c:a libfdk_aac -profile:a aac_he_v2 -b:a 4k output.aac && hexdump -v -e '16/1 \"0x%x,\" \"\\n\"' -v output.aac\n return new Uint8Array([\n 0x1, 0x40, 0x22, 0x80, 0xa3, 0x5e, 0xe6, 0x80, 0xba, 0x8, 0x0, 0x0,\n 0x0, 0x0, 0x95, 0x0, 0x6, 0xf1, 0xa1, 0xa, 0x5a, 0x5a, 0x5a, 0x5a,\n 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,\n 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,\n 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a, 0x5a,\n 0x5a, 0x5e,\n ]);\n }\n break;\n }\n return undefined;\n }\n}\n\nexport default AAC;\n", "/**\n * Generate MP4 Box\n */\n\nimport { appendUint8Array } from '../utils/mp4-tools';\n\ntype HdlrTypes = {\n video: Uint8Array;\n audio: Uint8Array;\n};\n\nconst UINT32_MAX = Math.pow(2, 32) - 1;\n\nclass MP4 {\n public static types: Record;\n private static HDLR_TYPES: HdlrTypes;\n private static STTS: Uint8Array;\n private static STSC: Uint8Array;\n private static STCO: Uint8Array;\n private static STSZ: Uint8Array;\n private static VMHD: Uint8Array;\n private static SMHD: Uint8Array;\n private static STSD: Uint8Array;\n private static FTYP: Uint8Array;\n private static DINF: Uint8Array;\n\n static init() {\n MP4.types = {\n avc1: [], // codingname\n avcC: [],\n btrt: [],\n dinf: [],\n dref: [],\n esds: [],\n ftyp: [],\n hdlr: [],\n mdat: [],\n mdhd: [],\n mdia: [],\n mfhd: [],\n minf: [],\n moof: [],\n moov: [],\n mp4a: [],\n '.mp3': [],\n dac3: [],\n 'ac-3': [],\n mvex: [],\n mvhd: [],\n pasp: [],\n sdtp: [],\n stbl: [],\n stco: [],\n stsc: [],\n stsd: [],\n stsz: [],\n stts: [],\n tfdt: [],\n tfhd: [],\n traf: [],\n trak: [],\n trun: [],\n trex: [],\n tkhd: [],\n vmhd: [],\n smhd: [],\n };\n\n let i: string;\n for (i in MP4.types) {\n if (MP4.types.hasOwnProperty(i)) {\n MP4.types[i] = [\n i.charCodeAt(0),\n i.charCodeAt(1),\n i.charCodeAt(2),\n i.charCodeAt(3),\n ];\n }\n }\n\n const videoHdlr = new Uint8Array([\n 0x00, // version 0\n 0x00,\n 0x00,\n 0x00, // flags\n 0x00,\n 0x00,\n 0x00,\n 0x00, // pre_defined\n 0x76,\n 0x69,\n 0x64,\n 0x65, // handler_type: 'vide'\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x56,\n 0x69,\n 0x64,\n 0x65,\n 0x6f,\n 0x48,\n 0x61,\n 0x6e,\n 0x64,\n 0x6c,\n 0x65,\n 0x72,\n 0x00, // name: 'VideoHandler'\n ]);\n\n const audioHdlr = new Uint8Array([\n 0x00, // version 0\n 0x00,\n 0x00,\n 0x00, // flags\n 0x00,\n 0x00,\n 0x00,\n 0x00, // pre_defined\n 0x73,\n 0x6f,\n 0x75,\n 0x6e, // handler_type: 'soun'\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x53,\n 0x6f,\n 0x75,\n 0x6e,\n 0x64,\n 0x48,\n 0x61,\n 0x6e,\n 0x64,\n 0x6c,\n 0x65,\n 0x72,\n 0x00, // name: 'SoundHandler'\n ]);\n\n MP4.HDLR_TYPES = {\n video: videoHdlr,\n audio: audioHdlr,\n };\n\n const dref = new Uint8Array([\n 0x00, // version 0\n 0x00,\n 0x00,\n 0x00, // flags\n 0x00,\n 0x00,\n 0x00,\n 0x01, // entry_count\n 0x00,\n 0x00,\n 0x00,\n 0x0c, // entry_size\n 0x75,\n 0x72,\n 0x6c,\n 0x20, // 'url' type\n 0x00, // version 0\n 0x00,\n 0x00,\n 0x01, // entry_flags\n ]);\n\n const stco = new Uint8Array([\n 0x00, // version\n 0x00,\n 0x00,\n 0x00, // flags\n 0x00,\n 0x00,\n 0x00,\n 0x00, // entry_count\n ]);\n\n MP4.STTS = MP4.STSC = MP4.STCO = stco;\n\n MP4.STSZ = new Uint8Array([\n 0x00, // version\n 0x00,\n 0x00,\n 0x00, // flags\n 0x00,\n 0x00,\n 0x00,\n 0x00, // sample_size\n 0x00,\n 0x00,\n 0x00,\n 0x00, // sample_count\n ]);\n MP4.VMHD = new Uint8Array([\n 0x00, // version\n 0x00,\n 0x00,\n 0x01, // flags\n 0x00,\n 0x00, // graphicsmode\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00, // opcolor\n ]);\n MP4.SMHD = new Uint8Array([\n 0x00, // version\n 0x00,\n 0x00,\n 0x00, // flags\n 0x00,\n 0x00, // balance\n 0x00,\n 0x00, // reserved\n ]);\n\n MP4.STSD = new Uint8Array([\n 0x00, // version 0\n 0x00,\n 0x00,\n 0x00, // flags\n 0x00,\n 0x00,\n 0x00,\n 0x01,\n ]); // entry_count\n\n const majorBrand = new Uint8Array([105, 115, 111, 109]); // isom\n const avc1Brand = new Uint8Array([97, 118, 99, 49]); // avc1\n const minorVersion = new Uint8Array([0, 0, 0, 1]);\n\n MP4.FTYP = MP4.box(\n MP4.types.ftyp,\n majorBrand,\n minorVersion,\n majorBrand,\n avc1Brand,\n );\n MP4.DINF = MP4.box(MP4.types.dinf, MP4.box(MP4.types.dref, dref));\n }\n\n static box(type, ...payload: Uint8Array[]) {\n let size = 8;\n let i = payload.length;\n const len = i;\n // calculate the total size we need to allocate\n while (i--) {\n size += payload[i].byteLength;\n }\n\n const result = new Uint8Array(size);\n result[0] = (size >> 24) & 0xff;\n result[1] = (size >> 16) & 0xff;\n result[2] = (size >> 8) & 0xff;\n result[3] = size & 0xff;\n result.set(type, 4);\n // copy the payload into the result\n for (i = 0, size = 8; i < len; i++) {\n // copy payload[i] array @ offset size\n result.set(payload[i], size);\n size += payload[i].byteLength;\n }\n return result;\n }\n\n static hdlr(type) {\n return MP4.box(MP4.types.hdlr, MP4.HDLR_TYPES[type]);\n }\n\n static mdat(data) {\n return MP4.box(MP4.types.mdat, data);\n }\n\n static mdhd(timescale, duration) {\n duration *= timescale;\n const upperWordDuration = Math.floor(duration / (UINT32_MAX + 1));\n const lowerWordDuration = Math.floor(duration % (UINT32_MAX + 1));\n return MP4.box(\n MP4.types.mdhd,\n new Uint8Array([\n 0x01, // version 1\n 0x00,\n 0x00,\n 0x00, // flags\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x02, // creation_time\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x03, // modification_time\n (timescale >> 24) & 0xff,\n (timescale >> 16) & 0xff,\n (timescale >> 8) & 0xff,\n timescale & 0xff, // timescale\n upperWordDuration >> 24,\n (upperWordDuration >> 16) & 0xff,\n (upperWordDuration >> 8) & 0xff,\n upperWordDuration & 0xff,\n lowerWordDuration >> 24,\n (lowerWordDuration >> 16) & 0xff,\n (lowerWordDuration >> 8) & 0xff,\n lowerWordDuration & 0xff,\n 0x55,\n 0xc4, // 'und' language (undetermined)\n 0x00,\n 0x00,\n ]),\n );\n }\n\n static mdia(track) {\n return MP4.box(\n MP4.types.mdia,\n MP4.mdhd(track.timescale, track.duration),\n MP4.hdlr(track.type),\n MP4.minf(track),\n );\n }\n\n static mfhd(sequenceNumber) {\n return MP4.box(\n MP4.types.mfhd,\n new Uint8Array([\n 0x00,\n 0x00,\n 0x00,\n 0x00, // flags\n sequenceNumber >> 24,\n (sequenceNumber >> 16) & 0xff,\n (sequenceNumber >> 8) & 0xff,\n sequenceNumber & 0xff, // sequence_number\n ]),\n );\n }\n\n static minf(track) {\n if (track.type === 'audio') {\n return MP4.box(\n MP4.types.minf,\n MP4.box(MP4.types.smhd, MP4.SMHD),\n MP4.DINF,\n MP4.stbl(track),\n );\n } else {\n return MP4.box(\n MP4.types.minf,\n MP4.box(MP4.types.vmhd, MP4.VMHD),\n MP4.DINF,\n MP4.stbl(track),\n );\n }\n }\n\n static moof(sn, baseMediaDecodeTime, track) {\n return MP4.box(\n MP4.types.moof,\n MP4.mfhd(sn),\n MP4.traf(track, baseMediaDecodeTime),\n );\n }\n\n static moov(tracks) {\n let i = tracks.length;\n const boxes: Uint8Array[] = [];\n\n while (i--) {\n boxes[i] = MP4.trak(tracks[i]);\n }\n\n return MP4.box.apply(\n null,\n [MP4.types.moov, MP4.mvhd(tracks[0].timescale, tracks[0].duration)]\n .concat(boxes)\n .concat(MP4.mvex(tracks)),\n );\n }\n\n static mvex(tracks) {\n let i = tracks.length;\n const boxes: Uint8Array[] = [];\n\n while (i--) {\n boxes[i] = MP4.trex(tracks[i]);\n }\n\n return MP4.box.apply(null, [MP4.types.mvex, ...boxes]);\n }\n\n static mvhd(timescale, duration) {\n duration *= timescale;\n const upperWordDuration = Math.floor(duration / (UINT32_MAX + 1));\n const lowerWordDuration = Math.floor(duration % (UINT32_MAX + 1));\n const bytes = new Uint8Array([\n 0x01, // version 1\n 0x00,\n 0x00,\n 0x00, // flags\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x02, // creation_time\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x03, // modification_time\n (timescale >> 24) & 0xff,\n (timescale >> 16) & 0xff,\n (timescale >> 8) & 0xff,\n timescale & 0xff, // timescale\n upperWordDuration >> 24,\n (upperWordDuration >> 16) & 0xff,\n (upperWordDuration >> 8) & 0xff,\n upperWordDuration & 0xff,\n lowerWordDuration >> 24,\n (lowerWordDuration >> 16) & 0xff,\n (lowerWordDuration >> 8) & 0xff,\n lowerWordDuration & 0xff,\n 0x00,\n 0x01,\n 0x00,\n 0x00, // 1.0 rate\n 0x01,\n 0x00, // 1.0 volume\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x01,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x01,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x40,\n 0x00,\n 0x00,\n 0x00, // transformation: unity matrix\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00, // pre_defined\n 0xff,\n 0xff,\n 0xff,\n 0xff, // next_track_ID\n ]);\n return MP4.box(MP4.types.mvhd, bytes);\n }\n\n static sdtp(track) {\n const samples = track.samples || [];\n const bytes = new Uint8Array(4 + samples.length);\n let i;\n let flags;\n // leave the full box header (4 bytes) all zero\n // write the sample table\n for (i = 0; i < samples.length; i++) {\n flags = samples[i].flags;\n bytes[i + 4] =\n (flags.dependsOn << 4) |\n (flags.isDependedOn << 2) |\n flags.hasRedundancy;\n }\n\n return MP4.box(MP4.types.sdtp, bytes);\n }\n\n static stbl(track) {\n return MP4.box(\n MP4.types.stbl,\n MP4.stsd(track),\n MP4.box(MP4.types.stts, MP4.STTS),\n MP4.box(MP4.types.stsc, MP4.STSC),\n MP4.box(MP4.types.stsz, MP4.STSZ),\n MP4.box(MP4.types.stco, MP4.STCO),\n );\n }\n\n static avc1(track) {\n let sps: number[] = [];\n let pps: number[] = [];\n let i;\n let data;\n let len;\n // assemble the SPSs\n\n for (i = 0; i < track.sps.length; i++) {\n data = track.sps[i];\n len = data.byteLength;\n sps.push((len >>> 8) & 0xff);\n sps.push(len & 0xff);\n\n // SPS\n sps = sps.concat(Array.prototype.slice.call(data));\n }\n\n // assemble the PPSs\n for (i = 0; i < track.pps.length; i++) {\n data = track.pps[i];\n len = data.byteLength;\n pps.push((len >>> 8) & 0xff);\n pps.push(len & 0xff);\n\n pps = pps.concat(Array.prototype.slice.call(data));\n }\n\n const avcc = MP4.box(\n MP4.types.avcC,\n new Uint8Array(\n [\n 0x01, // version\n sps[3], // profile\n sps[4], // profile compat\n sps[5], // level\n 0xfc | 3, // lengthSizeMinusOne, hard-coded to 4 bytes\n 0xe0 | track.sps.length, // 3bit reserved (111) + numOfSequenceParameterSets\n ]\n .concat(sps)\n .concat([\n track.pps.length, // numOfPictureParameterSets\n ])\n .concat(pps),\n ),\n ); // \"PPS\"\n const width = track.width;\n const height = track.height;\n const hSpacing = track.pixelRatio[0];\n const vSpacing = track.pixelRatio[1];\n\n return MP4.box(\n MP4.types.avc1,\n new Uint8Array([\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x01, // data_reference_index\n 0x00,\n 0x00, // pre_defined\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00, // pre_defined\n (width >> 8) & 0xff,\n width & 0xff, // width\n (height >> 8) & 0xff,\n height & 0xff, // height\n 0x00,\n 0x48,\n 0x00,\n 0x00, // horizresolution\n 0x00,\n 0x48,\n 0x00,\n 0x00, // vertresolution\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x01, // frame_count\n 0x12,\n 0x64,\n 0x61,\n 0x69,\n 0x6c, // dailymotion/hls.js\n 0x79,\n 0x6d,\n 0x6f,\n 0x74,\n 0x69,\n 0x6f,\n 0x6e,\n 0x2f,\n 0x68,\n 0x6c,\n 0x73,\n 0x2e,\n 0x6a,\n 0x73,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00, // compressorname\n 0x00,\n 0x18, // depth = 24\n 0x11,\n 0x11,\n ]), // pre_defined = -1\n avcc,\n MP4.box(\n MP4.types.btrt,\n new Uint8Array([\n 0x00,\n 0x1c,\n 0x9c,\n 0x80, // bufferSizeDB\n 0x00,\n 0x2d,\n 0xc6,\n 0xc0, // maxBitrate\n 0x00,\n 0x2d,\n 0xc6,\n 0xc0,\n ]),\n ), // avgBitrate\n MP4.box(\n MP4.types.pasp,\n new Uint8Array([\n hSpacing >> 24, // hSpacing\n (hSpacing >> 16) & 0xff,\n (hSpacing >> 8) & 0xff,\n hSpacing & 0xff,\n vSpacing >> 24, // vSpacing\n (vSpacing >> 16) & 0xff,\n (vSpacing >> 8) & 0xff,\n vSpacing & 0xff,\n ]),\n ),\n );\n }\n\n static esds(track) {\n const configlen = track.config.length;\n return new Uint8Array(\n [\n 0x00, // version 0\n 0x00,\n 0x00,\n 0x00, // flags\n\n 0x03, // descriptor_type\n 0x17 + configlen, // length\n 0x00,\n 0x01, // es_id\n 0x00, // stream_priority\n\n 0x04, // descriptor_type\n 0x0f + configlen, // length\n 0x40, // codec : mpeg4_audio\n 0x15, // stream_type\n 0x00,\n 0x00,\n 0x00, // buffer_size\n 0x00,\n 0x00,\n 0x00,\n 0x00, // maxBitrate\n 0x00,\n 0x00,\n 0x00,\n 0x00, // avgBitrate\n\n 0x05, // descriptor_type\n ]\n .concat([configlen])\n .concat(track.config)\n .concat([0x06, 0x01, 0x02]),\n ); // GASpecificConfig)); // length + audio config descriptor\n }\n\n static audioStsd(track) {\n const samplerate = track.samplerate;\n return new Uint8Array([\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x01, // data_reference_index\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n track.channelCount, // channelcount\n 0x00,\n 0x10, // sampleSize:16bits\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved2\n (samplerate >> 8) & 0xff,\n samplerate & 0xff, //\n 0x00,\n 0x00,\n ]);\n }\n\n static mp4a(track) {\n return MP4.box(\n MP4.types.mp4a,\n MP4.audioStsd(track),\n MP4.box(MP4.types.esds, MP4.esds(track)),\n );\n }\n\n static mp3(track) {\n return MP4.box(MP4.types['.mp3'], MP4.audioStsd(track));\n }\n\n static ac3(track) {\n return MP4.box(\n MP4.types['ac-3'],\n MP4.audioStsd(track),\n MP4.box(MP4.types.dac3, track.config),\n );\n }\n\n static stsd(track) {\n if (track.type === 'audio') {\n if (track.segmentCodec === 'mp3' && track.codec === 'mp3') {\n return MP4.box(MP4.types.stsd, MP4.STSD, MP4.mp3(track));\n }\n if (track.segmentCodec === 'ac3') {\n return MP4.box(MP4.types.stsd, MP4.STSD, MP4.ac3(track));\n }\n return MP4.box(MP4.types.stsd, MP4.STSD, MP4.mp4a(track));\n } else {\n return MP4.box(MP4.types.stsd, MP4.STSD, MP4.avc1(track));\n }\n }\n\n static tkhd(track) {\n const id = track.id;\n const duration = track.duration * track.timescale;\n const width = track.width;\n const height = track.height;\n const upperWordDuration = Math.floor(duration / (UINT32_MAX + 1));\n const lowerWordDuration = Math.floor(duration % (UINT32_MAX + 1));\n return MP4.box(\n MP4.types.tkhd,\n new Uint8Array([\n 0x01, // version 1\n 0x00,\n 0x00,\n 0x07, // flags\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x02, // creation_time\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x03, // modification_time\n (id >> 24) & 0xff,\n (id >> 16) & 0xff,\n (id >> 8) & 0xff,\n id & 0xff, // track_ID\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved\n upperWordDuration >> 24,\n (upperWordDuration >> 16) & 0xff,\n (upperWordDuration >> 8) & 0xff,\n upperWordDuration & 0xff,\n lowerWordDuration >> 24,\n (lowerWordDuration >> 16) & 0xff,\n (lowerWordDuration >> 8) & 0xff,\n lowerWordDuration & 0xff,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x00, // layer\n 0x00,\n 0x00, // alternate_group\n 0x00,\n 0x00, // non-audio track volume\n 0x00,\n 0x00, // reserved\n 0x00,\n 0x01,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x01,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x40,\n 0x00,\n 0x00,\n 0x00, // transformation: unity matrix\n (width >> 8) & 0xff,\n width & 0xff,\n 0x00,\n 0x00, // width\n (height >> 8) & 0xff,\n height & 0xff,\n 0x00,\n 0x00, // height\n ]),\n );\n }\n\n static traf(track, baseMediaDecodeTime) {\n const sampleDependencyTable = MP4.sdtp(track);\n const id = track.id;\n const upperWordBaseMediaDecodeTime = Math.floor(\n baseMediaDecodeTime / (UINT32_MAX + 1),\n );\n const lowerWordBaseMediaDecodeTime = Math.floor(\n baseMediaDecodeTime % (UINT32_MAX + 1),\n );\n return MP4.box(\n MP4.types.traf,\n MP4.box(\n MP4.types.tfhd,\n new Uint8Array([\n 0x00, // version 0\n 0x00,\n 0x00,\n 0x00, // flags\n id >> 24,\n (id >> 16) & 0xff,\n (id >> 8) & 0xff,\n id & 0xff, // track_ID\n ]),\n ),\n MP4.box(\n MP4.types.tfdt,\n new Uint8Array([\n 0x01, // version 1\n 0x00,\n 0x00,\n 0x00, // flags\n upperWordBaseMediaDecodeTime >> 24,\n (upperWordBaseMediaDecodeTime >> 16) & 0xff,\n (upperWordBaseMediaDecodeTime >> 8) & 0xff,\n upperWordBaseMediaDecodeTime & 0xff,\n lowerWordBaseMediaDecodeTime >> 24,\n (lowerWordBaseMediaDecodeTime >> 16) & 0xff,\n (lowerWordBaseMediaDecodeTime >> 8) & 0xff,\n lowerWordBaseMediaDecodeTime & 0xff,\n ]),\n ),\n MP4.trun(\n track,\n sampleDependencyTable.length +\n 16 + // tfhd\n 20 + // tfdt\n 8 + // traf header\n 16 + // mfhd\n 8 + // moof header\n 8,\n ), // mdat header\n sampleDependencyTable,\n );\n }\n\n /**\n * Generate a track box.\n * @param track a track definition\n */\n static trak(track) {\n track.duration = track.duration || 0xffffffff;\n return MP4.box(MP4.types.trak, MP4.tkhd(track), MP4.mdia(track));\n }\n\n static trex(track) {\n const id = track.id;\n return MP4.box(\n MP4.types.trex,\n new Uint8Array([\n 0x00, // version 0\n 0x00,\n 0x00,\n 0x00, // flags\n id >> 24,\n (id >> 16) & 0xff,\n (id >> 8) & 0xff,\n id & 0xff, // track_ID\n 0x00,\n 0x00,\n 0x00,\n 0x01, // default_sample_description_index\n 0x00,\n 0x00,\n 0x00,\n 0x00, // default_sample_duration\n 0x00,\n 0x00,\n 0x00,\n 0x00, // default_sample_size\n 0x00,\n 0x01,\n 0x00,\n 0x01, // default_sample_flags\n ]),\n );\n }\n\n static trun(track, offset) {\n const samples = track.samples || [];\n const len = samples.length;\n const arraylen = 12 + 16 * len;\n const array = new Uint8Array(arraylen);\n let i;\n let sample;\n let duration;\n let size;\n let flags;\n let cts;\n offset += 8 + arraylen;\n array.set(\n [\n track.type === 'video' ? 0x01 : 0x00, // version 1 for video with signed-int sample_composition_time_offset\n 0x00,\n 0x0f,\n 0x01, // flags\n (len >>> 24) & 0xff,\n (len >>> 16) & 0xff,\n (len >>> 8) & 0xff,\n len & 0xff, // sample_count\n (offset >>> 24) & 0xff,\n (offset >>> 16) & 0xff,\n (offset >>> 8) & 0xff,\n offset & 0xff, // data_offset\n ],\n 0,\n );\n for (i = 0; i < len; i++) {\n sample = samples[i];\n duration = sample.duration;\n size = sample.size;\n flags = sample.flags;\n cts = sample.cts;\n array.set(\n [\n (duration >>> 24) & 0xff,\n (duration >>> 16) & 0xff,\n (duration >>> 8) & 0xff,\n duration & 0xff, // sample_duration\n (size >>> 24) & 0xff,\n (size >>> 16) & 0xff,\n (size >>> 8) & 0xff,\n size & 0xff, // sample_size\n (flags.isLeading << 2) | flags.dependsOn,\n (flags.isDependedOn << 6) |\n (flags.hasRedundancy << 4) |\n (flags.paddingValue << 1) |\n flags.isNonSync,\n flags.degradPrio & (0xf0 << 8),\n flags.degradPrio & 0x0f, // sample_flags\n (cts >>> 24) & 0xff,\n (cts >>> 16) & 0xff,\n (cts >>> 8) & 0xff,\n cts & 0xff, // sample_composition_time_offset\n ],\n 12 + 16 * i,\n );\n }\n return MP4.box(MP4.types.trun, array);\n }\n\n static initSegment(tracks) {\n if (!MP4.types) {\n MP4.init();\n }\n\n const movie = MP4.moov(tracks);\n const result = appendUint8Array(MP4.FTYP, movie);\n return result;\n }\n}\n\nexport default MP4;\n", "const MPEG_TS_CLOCK_FREQ_HZ = 90000;\n\nexport type RationalTimestamp = {\n baseTime: number; // ticks\n timescale: number; // ticks per second\n};\n\nexport function toTimescaleFromBase(\n baseTime: number,\n destScale: number,\n srcBase: number = 1,\n round: boolean = false,\n): number {\n const result = baseTime * destScale * srcBase; // equivalent to `(value * scale) / (1 / base)`\n return round ? Math.round(result) : result;\n}\n\nexport function toTimescaleFromScale(\n baseTime: number,\n destScale: number,\n srcScale: number = 1,\n round: boolean = false,\n): number {\n return toTimescaleFromBase(baseTime, destScale, 1 / srcScale, round);\n}\n\nexport function toMsFromMpegTsClock(\n baseTime: number,\n round: boolean = false,\n): number {\n return toTimescaleFromBase(baseTime, 1000, 1 / MPEG_TS_CLOCK_FREQ_HZ, round);\n}\n\nexport function toMpegTsClockFromTimescale(\n baseTime: number,\n srcScale: number = 1,\n): number {\n return toTimescaleFromBase(baseTime, MPEG_TS_CLOCK_FREQ_HZ, 1 / srcScale);\n}\n", "import AAC from './aac-helper';\nimport MP4 from './mp4-generator';\nimport type { HlsEventEmitter } from '../events';\nimport { Events } from '../events';\nimport { ErrorTypes, ErrorDetails } from '../errors';\nimport { logger } from '../utils/logger';\nimport {\n InitSegmentData,\n Remuxer,\n RemuxerResult,\n RemuxedMetadata,\n RemuxedTrack,\n RemuxedUserdata,\n} from '../types/remuxer';\nimport { PlaylistLevelType } from '../types/loader';\nimport {\n RationalTimestamp,\n toMsFromMpegTsClock,\n} from '../utils/timescale-conversion';\nimport type {\n AudioSample,\n VideoSample,\n DemuxedAudioTrack,\n DemuxedVideoTrack,\n DemuxedMetadataTrack,\n DemuxedUserdataTrack,\n} from '../types/demuxer';\nimport type { TrackSet } from '../types/track';\nimport type { SourceBufferName } from '../types/buffer';\nimport type { Fragment } from '../loader/fragment';\nimport type { HlsConfig } from '../config';\n\nconst MAX_SILENT_FRAME_DURATION = 10 * 1000; // 10 seconds\nconst AAC_SAMPLES_PER_FRAME = 1024;\nconst MPEG_AUDIO_SAMPLE_PER_FRAME = 1152;\nconst AC3_SAMPLES_PER_FRAME = 1536;\n\nlet chromeVersion: number | null = null;\nlet safariWebkitVersion: number | null = null;\n\nexport default class MP4Remuxer implements Remuxer {\n private observer: HlsEventEmitter;\n private config: HlsConfig;\n private typeSupported: any;\n private ISGenerated: boolean = false;\n private _initPTS: RationalTimestamp | null = null;\n private _initDTS: RationalTimestamp | null = null;\n private nextAvcDts: number | null = null;\n private nextAudioPts: number | null = null;\n private videoSampleDuration: number | null = null;\n private isAudioContiguous: boolean = false;\n private isVideoContiguous: boolean = false;\n private videoTrackConfig?: {\n width?: number;\n height?: number;\n pixelRatio?: [number, number];\n };\n\n constructor(\n observer: HlsEventEmitter,\n config: HlsConfig,\n typeSupported,\n vendor = '',\n ) {\n this.observer = observer;\n this.config = config;\n this.typeSupported = typeSupported;\n this.ISGenerated = false;\n\n if (chromeVersion === null) {\n const userAgent = navigator.userAgent || '';\n const result = userAgent.match(/Chrome\\/(\\d+)/i);\n chromeVersion = result ? parseInt(result[1]) : 0;\n }\n if (safariWebkitVersion === null) {\n const result = navigator.userAgent.match(/Safari\\/(\\d+)/i);\n safariWebkitVersion = result ? parseInt(result[1]) : 0;\n }\n }\n\n destroy() {\n // @ts-ignore\n this.config = this.videoTrackConfig = this._initPTS = this._initDTS = null;\n }\n\n resetTimeStamp(defaultTimeStamp: RationalTimestamp | null) {\n logger.log('[mp4-remuxer]: initPTS & initDTS reset');\n this._initPTS = this._initDTS = defaultTimeStamp;\n }\n\n resetNextTimestamp() {\n logger.log('[mp4-remuxer]: reset next timestamp');\n this.isVideoContiguous = false;\n this.isAudioContiguous = false;\n }\n\n resetInitSegment() {\n logger.log('[mp4-remuxer]: ISGenerated flag reset');\n this.ISGenerated = false;\n this.videoTrackConfig = undefined;\n }\n\n getVideoStartPts(videoSamples: VideoSample[]) {\n // Get the minimum PTS value relative to the first sample's PTS, normalized for 33-bit wrapping\n let rolloverDetected = false;\n const firstPts = videoSamples[0].pts;\n const startPTS = videoSamples.reduce((minPTS, sample) => {\n let pts = sample.pts;\n let delta = pts - minPTS;\n if (delta < -4294967296) {\n // 2^32, see PTSNormalize for reasoning, but we're hitting a rollover here, and we don't want that to impact the timeOffset calculation\n rolloverDetected = true;\n pts = normalizePts(pts, firstPts);\n delta = pts - minPTS;\n }\n if (delta > 0) {\n return minPTS;\n }\n return pts;\n }, firstPts);\n if (rolloverDetected) {\n logger.debug('PTS rollover detected');\n }\n return startPTS;\n }\n\n remux(\n audioTrack: DemuxedAudioTrack,\n videoTrack: DemuxedVideoTrack,\n id3Track: DemuxedMetadataTrack,\n textTrack: DemuxedUserdataTrack,\n timeOffset: number,\n accurateTimeOffset: boolean,\n flush: boolean,\n playlistType: PlaylistLevelType,\n ): RemuxerResult {\n let video: RemuxedTrack | undefined;\n let audio: RemuxedTrack | undefined;\n let initSegment: InitSegmentData | undefined;\n let text: RemuxedUserdata | undefined;\n let id3: RemuxedMetadata | undefined;\n let independent: boolean | undefined;\n let audioTimeOffset = timeOffset;\n let videoTimeOffset = timeOffset;\n\n // If we're remuxing audio and video progressively, wait until we've received enough samples for each track before proceeding.\n // This is done to synchronize the audio and video streams. We know if the current segment will have samples if the \"pid\"\n // parameter is greater than -1. The pid is set when the PMT is parsed, which contains the tracks list.\n // However, if the initSegment has already been generated, or we've reached the end of a segment (flush),\n // then we can remux one track without waiting for the other.\n const hasAudio = audioTrack.pid > -1;\n const hasVideo = videoTrack.pid > -1;\n const length = videoTrack.samples.length;\n const enoughAudioSamples = audioTrack.samples.length > 0;\n const enoughVideoSamples = (flush && length > 0) || length > 1;\n const canRemuxAvc =\n ((!hasAudio || enoughAudioSamples) &&\n (!hasVideo || enoughVideoSamples)) ||\n this.ISGenerated ||\n flush;\n\n if (canRemuxAvc) {\n if (this.ISGenerated) {\n const config = this.videoTrackConfig;\n if (\n config &&\n (videoTrack.width !== config.width ||\n videoTrack.height !== config.height ||\n videoTrack.pixelRatio?.[0] !== config.pixelRatio?.[0] ||\n videoTrack.pixelRatio?.[1] !== config.pixelRatio?.[1])\n ) {\n this.resetInitSegment();\n }\n } else {\n initSegment = this.generateIS(\n audioTrack,\n videoTrack,\n timeOffset,\n accurateTimeOffset,\n );\n }\n\n const isVideoContiguous = this.isVideoContiguous;\n let firstKeyFrameIndex = -1;\n let firstKeyFramePTS;\n\n if (enoughVideoSamples) {\n firstKeyFrameIndex = findKeyframeIndex(videoTrack.samples);\n if (!isVideoContiguous && this.config.forceKeyFrameOnDiscontinuity) {\n independent = true;\n if (firstKeyFrameIndex > 0) {\n logger.warn(\n `[mp4-remuxer]: Dropped ${firstKeyFrameIndex} out of ${length} video samples due to a missing keyframe`,\n );\n const startPTS = this.getVideoStartPts(videoTrack.samples);\n videoTrack.samples = videoTrack.samples.slice(firstKeyFrameIndex);\n videoTrack.dropped += firstKeyFrameIndex;\n videoTimeOffset +=\n (videoTrack.samples[0].pts - startPTS) /\n videoTrack.inputTimeScale;\n firstKeyFramePTS = videoTimeOffset;\n } else if (firstKeyFrameIndex === -1) {\n logger.warn(\n `[mp4-remuxer]: No keyframe found out of ${length} video samples`,\n );\n independent = false;\n }\n }\n }\n\n if (this.ISGenerated) {\n if (enoughAudioSamples && enoughVideoSamples) {\n // timeOffset is expected to be the offset of the first timestamp of this fragment (first DTS)\n // if first audio DTS is not aligned with first video DTS then we need to take that into account\n // when providing timeOffset to remuxAudio / remuxVideo. if we don't do that, there might be a permanent / small\n // drift between audio and video streams\n const startPTS = this.getVideoStartPts(videoTrack.samples);\n const tsDelta =\n normalizePts(audioTrack.samples[0].pts, startPTS) - startPTS;\n const audiovideoTimestampDelta = tsDelta / videoTrack.inputTimeScale;\n audioTimeOffset += Math.max(0, audiovideoTimestampDelta);\n videoTimeOffset += Math.max(0, -audiovideoTimestampDelta);\n }\n\n // Purposefully remuxing audio before video, so that remuxVideo can use nextAudioPts, which is calculated in remuxAudio.\n if (enoughAudioSamples) {\n // if initSegment was generated without audio samples, regenerate it again\n if (!audioTrack.samplerate) {\n logger.warn(\n '[mp4-remuxer]: regenerate InitSegment as audio detected',\n );\n initSegment = this.generateIS(\n audioTrack,\n videoTrack,\n timeOffset,\n accurateTimeOffset,\n );\n }\n audio = this.remuxAudio(\n audioTrack,\n audioTimeOffset,\n this.isAudioContiguous,\n accurateTimeOffset,\n hasVideo ||\n enoughVideoSamples ||\n playlistType === PlaylistLevelType.AUDIO\n ? videoTimeOffset\n : undefined,\n );\n if (enoughVideoSamples) {\n const audioTrackLength = audio ? audio.endPTS - audio.startPTS : 0;\n // if initSegment was generated without video samples, regenerate it again\n if (!videoTrack.inputTimeScale) {\n logger.warn(\n '[mp4-remuxer]: regenerate InitSegment as video detected',\n );\n initSegment = this.generateIS(\n audioTrack,\n videoTrack,\n timeOffset,\n accurateTimeOffset,\n );\n }\n video = this.remuxVideo(\n videoTrack,\n videoTimeOffset,\n isVideoContiguous,\n audioTrackLength,\n );\n }\n } else if (enoughVideoSamples) {\n video = this.remuxVideo(\n videoTrack,\n videoTimeOffset,\n isVideoContiguous,\n 0,\n );\n }\n if (video) {\n video.firstKeyFrame = firstKeyFrameIndex;\n video.independent = firstKeyFrameIndex !== -1;\n video.firstKeyFramePTS = firstKeyFramePTS;\n }\n }\n }\n\n // Allow ID3 and text to remux, even if more audio/video samples are required\n if (this.ISGenerated && this._initPTS && this._initDTS) {\n if (id3Track.samples.length) {\n id3 = flushTextTrackMetadataCueSamples(\n id3Track,\n timeOffset,\n this._initPTS,\n this._initDTS,\n );\n }\n\n if (textTrack.samples.length) {\n text = flushTextTrackUserdataCueSamples(\n textTrack,\n timeOffset,\n this._initPTS,\n );\n }\n }\n\n return {\n audio,\n video,\n initSegment,\n independent,\n text,\n id3,\n };\n }\n\n generateIS(\n audioTrack: DemuxedAudioTrack,\n videoTrack: DemuxedVideoTrack,\n timeOffset: number,\n accurateTimeOffset: boolean,\n ): InitSegmentData | undefined {\n const audioSamples = audioTrack.samples;\n const videoSamples = videoTrack.samples;\n const typeSupported = this.typeSupported;\n const tracks: TrackSet = {};\n const _initPTS = this._initPTS;\n let computePTSDTS = !_initPTS || accurateTimeOffset;\n let container = 'audio/mp4';\n let initPTS: number | undefined;\n let initDTS: number | undefined;\n let timescale: number | undefined;\n\n if (computePTSDTS) {\n initPTS = initDTS = Infinity;\n }\n\n if (audioTrack.config && audioSamples.length) {\n // let's use audio sampling rate as MP4 time scale.\n // rationale is that there is a integer nb of audio frames per audio sample (1024 for AAC)\n // using audio sampling rate here helps having an integer MP4 frame duration\n // this avoids potential rounding issue and AV sync issue\n audioTrack.timescale = audioTrack.samplerate;\n switch (audioTrack.segmentCodec) {\n case 'mp3':\n if (typeSupported.mpeg) {\n // Chrome and Safari\n container = 'audio/mpeg';\n audioTrack.codec = '';\n } else if (typeSupported.mp3) {\n // Firefox\n audioTrack.codec = 'mp3';\n }\n break;\n\n case 'ac3':\n audioTrack.codec = 'ac-3';\n break;\n }\n tracks.audio = {\n id: 'audio',\n container: container,\n codec: audioTrack.codec,\n initSegment:\n audioTrack.segmentCodec === 'mp3' && typeSupported.mpeg\n ? new Uint8Array(0)\n : MP4.initSegment([audioTrack]),\n metadata: {\n channelCount: audioTrack.channelCount,\n },\n };\n if (computePTSDTS) {\n timescale = audioTrack.inputTimeScale;\n if (!_initPTS || timescale !== _initPTS.timescale) {\n // remember first PTS of this demuxing context. for audio, PTS = DTS\n initPTS = initDTS =\n audioSamples[0].pts - Math.round(timescale * timeOffset);\n } else {\n computePTSDTS = false;\n }\n }\n }\n\n if (videoTrack.sps && videoTrack.pps && videoSamples.length) {\n // let's use input time scale as MP4 video timescale\n // we use input time scale straight away to avoid rounding issues on frame duration / cts computation\n videoTrack.timescale = videoTrack.inputTimeScale;\n tracks.video = {\n id: 'main',\n container: 'video/mp4',\n codec: videoTrack.codec,\n initSegment: MP4.initSegment([videoTrack]),\n metadata: {\n width: videoTrack.width,\n height: videoTrack.height,\n },\n };\n if (computePTSDTS) {\n timescale = videoTrack.inputTimeScale;\n if (!_initPTS || timescale !== _initPTS.timescale) {\n const startPTS = this.getVideoStartPts(videoSamples);\n const startOffset = Math.round(timescale * timeOffset);\n initDTS = Math.min(\n initDTS as number,\n normalizePts(videoSamples[0].dts, startPTS) - startOffset,\n );\n initPTS = Math.min(initPTS as number, startPTS - startOffset);\n } else {\n computePTSDTS = false;\n }\n }\n this.videoTrackConfig = {\n width: videoTrack.width,\n height: videoTrack.height,\n pixelRatio: videoTrack.pixelRatio,\n };\n }\n\n if (Object.keys(tracks).length) {\n this.ISGenerated = true;\n if (computePTSDTS) {\n this._initPTS = {\n baseTime: initPTS as number,\n timescale: timescale as number,\n };\n this._initDTS = {\n baseTime: initDTS as number,\n timescale: timescale as number,\n };\n } else {\n initPTS = timescale = undefined;\n }\n\n return {\n tracks,\n initPTS,\n timescale,\n };\n }\n }\n\n remuxVideo(\n track: DemuxedVideoTrack,\n timeOffset: number,\n contiguous: boolean,\n audioTrackLength: number,\n ): RemuxedTrack | undefined {\n const timeScale: number = track.inputTimeScale;\n const inputSamples: Array = track.samples;\n const outputSamples: Array = [];\n const nbSamples = inputSamples.length;\n const initPTS = this._initPTS as RationalTimestamp;\n let nextAvcDts = this.nextAvcDts;\n let offset = 8;\n let mp4SampleDuration = this.videoSampleDuration;\n let firstDTS;\n let lastDTS;\n let minPTS: number = Number.POSITIVE_INFINITY;\n let maxPTS: number = Number.NEGATIVE_INFINITY;\n let sortSamples = false;\n\n // if parsed fragment is contiguous with last one, let's use last DTS value as reference\n if (!contiguous || nextAvcDts === null) {\n const pts = timeOffset * timeScale;\n const cts =\n inputSamples[0].pts -\n normalizePts(inputSamples[0].dts, inputSamples[0].pts);\n if (\n chromeVersion &&\n nextAvcDts !== null &&\n Math.abs(pts - cts - nextAvcDts) < 15000\n ) {\n // treat as contigous to adjust samples that would otherwise produce video buffer gaps in Chrome\n contiguous = true;\n } else {\n // if not contiguous, let's use target timeOffset\n nextAvcDts = pts - cts;\n }\n }\n\n // PTS is coded on 33bits, and can loop from -2^32 to 2^32\n // PTSNormalize will make PTS/DTS value monotonic, we use last known DTS value as reference value\n const initTime = (initPTS.baseTime * timeScale) / initPTS.timescale;\n for (let i = 0; i < nbSamples; i++) {\n const sample = inputSamples[i];\n sample.pts = normalizePts(sample.pts - initTime, nextAvcDts);\n sample.dts = normalizePts(sample.dts - initTime, nextAvcDts);\n if (sample.dts < inputSamples[i > 0 ? i - 1 : i].dts) {\n sortSamples = true;\n }\n }\n\n // sort video samples by DTS then PTS then demux id order\n if (sortSamples) {\n inputSamples.sort(function (a, b) {\n const deltadts = a.dts - b.dts;\n const deltapts = a.pts - b.pts;\n return deltadts || deltapts;\n });\n }\n\n // Get first/last DTS\n firstDTS = inputSamples[0].dts;\n lastDTS = inputSamples[inputSamples.length - 1].dts;\n\n // Sample duration (as expected by trun MP4 boxes), should be the delta between sample DTS\n // set this constant duration as being the avg delta between consecutive DTS.\n const inputDuration = lastDTS - firstDTS;\n const averageSampleDuration = inputDuration\n ? Math.round(inputDuration / (nbSamples - 1))\n : mp4SampleDuration || track.inputTimeScale / 30;\n\n // if fragment are contiguous, detect hole/overlapping between fragments\n if (contiguous) {\n // check timestamp continuity across consecutive fragments (this is to remove inter-fragment gap/hole)\n const delta = firstDTS - nextAvcDts;\n const foundHole = delta > averageSampleDuration;\n const foundOverlap = delta < -1;\n if (foundHole || foundOverlap) {\n if (foundHole) {\n logger.warn(\n `AVC: ${toMsFromMpegTsClock(\n delta,\n true,\n )} ms (${delta}dts) hole between fragments detected at ${timeOffset.toFixed(\n 3,\n )}`,\n );\n } else {\n logger.warn(\n `AVC: ${toMsFromMpegTsClock(\n -delta,\n true,\n )} ms (${delta}dts) overlapping between fragments detected at ${timeOffset.toFixed(\n 3,\n )}`,\n );\n }\n if (\n !foundOverlap ||\n nextAvcDts >= inputSamples[0].pts ||\n chromeVersion\n ) {\n firstDTS = nextAvcDts;\n const firstPTS = inputSamples[0].pts - delta;\n if (foundHole) {\n inputSamples[0].dts = firstDTS;\n inputSamples[0].pts = firstPTS;\n } else {\n for (let i = 0; i < inputSamples.length; i++) {\n if (inputSamples[i].dts > firstPTS) {\n break;\n }\n inputSamples[i].dts -= delta;\n inputSamples[i].pts -= delta;\n }\n }\n logger.log(\n `Video: Initial PTS/DTS adjusted: ${toMsFromMpegTsClock(\n firstPTS,\n true,\n )}/${toMsFromMpegTsClock(\n firstDTS,\n true,\n )}, delta: ${toMsFromMpegTsClock(delta, true)} ms`,\n );\n }\n }\n }\n\n firstDTS = Math.max(0, firstDTS);\n\n let nbNalu = 0;\n let naluLen = 0;\n let dtsStep = firstDTS;\n for (let i = 0; i < nbSamples; i++) {\n // compute total/avc sample length and nb of NAL units\n const sample = inputSamples[i];\n const units = sample.units;\n const nbUnits = units.length;\n let sampleLen = 0;\n for (let j = 0; j < nbUnits; j++) {\n sampleLen += units[j].data.length;\n }\n\n naluLen += sampleLen;\n nbNalu += nbUnits;\n sample.length = sampleLen;\n\n // ensure sample monotonic DTS\n if (sample.dts < dtsStep) {\n sample.dts = dtsStep;\n dtsStep += (averageSampleDuration / 4) | 0 || 1;\n } else {\n dtsStep = sample.dts;\n }\n\n minPTS = Math.min(sample.pts, minPTS);\n maxPTS = Math.max(sample.pts, maxPTS);\n }\n lastDTS = inputSamples[nbSamples - 1].dts;\n\n /* concatenate the video data and construct the mdat in place\n (need 8 more bytes to fill length and mpdat type) */\n const mdatSize = naluLen + 4 * nbNalu + 8;\n let mdat;\n try {\n mdat = new Uint8Array(mdatSize);\n } catch (err) {\n this.observer.emit(Events.ERROR, Events.ERROR, {\n type: ErrorTypes.MUX_ERROR,\n details: ErrorDetails.REMUX_ALLOC_ERROR,\n fatal: false,\n error: err,\n bytes: mdatSize,\n reason: `fail allocating video mdat ${mdatSize}`,\n });\n return;\n }\n const view = new DataView(mdat.buffer);\n view.setUint32(0, mdatSize);\n mdat.set(MP4.types.mdat, 4);\n\n let stretchedLastFrame = false;\n let minDtsDelta = Number.POSITIVE_INFINITY;\n let minPtsDelta = Number.POSITIVE_INFINITY;\n let maxDtsDelta = Number.NEGATIVE_INFINITY;\n let maxPtsDelta = Number.NEGATIVE_INFINITY;\n for (let i = 0; i < nbSamples; i++) {\n const VideoSample = inputSamples[i];\n const VideoSampleUnits = VideoSample.units;\n let mp4SampleLength = 0;\n // convert NALU bitstream to MP4 format (prepend NALU with size field)\n for (let j = 0, nbUnits = VideoSampleUnits.length; j < nbUnits; j++) {\n const unit = VideoSampleUnits[j];\n const unitData = unit.data;\n const unitDataLen = unit.data.byteLength;\n view.setUint32(offset, unitDataLen);\n offset += 4;\n mdat.set(unitData, offset);\n offset += unitDataLen;\n mp4SampleLength += 4 + unitDataLen;\n }\n\n // expected sample duration is the Decoding Timestamp diff of consecutive samples\n let ptsDelta;\n if (i < nbSamples - 1) {\n mp4SampleDuration = inputSamples[i + 1].dts - VideoSample.dts;\n ptsDelta = inputSamples[i + 1].pts - VideoSample.pts;\n } else {\n const config = this.config;\n const lastFrameDuration =\n i > 0\n ? VideoSample.dts - inputSamples[i - 1].dts\n : averageSampleDuration;\n ptsDelta =\n i > 0\n ? VideoSample.pts - inputSamples[i - 1].pts\n : averageSampleDuration;\n if (config.stretchShortVideoTrack && this.nextAudioPts !== null) {\n // In some cases, a segment's audio track duration may exceed the video track duration.\n // Since we've already remuxed audio, and we know how long the audio track is, we look to\n // see if the delta to the next segment is longer than maxBufferHole.\n // If so, playback would potentially get stuck, so we artificially inflate\n // the duration of the last frame to minimize any potential gap between segments.\n const gapTolerance = Math.floor(config.maxBufferHole * timeScale);\n const deltaToFrameEnd =\n (audioTrackLength\n ? minPTS + audioTrackLength * timeScale\n : this.nextAudioPts) - VideoSample.pts;\n if (deltaToFrameEnd > gapTolerance) {\n // We subtract lastFrameDuration from deltaToFrameEnd to try to prevent any video\n // frame overlap. maxBufferHole should be >> lastFrameDuration anyway.\n mp4SampleDuration = deltaToFrameEnd - lastFrameDuration;\n if (mp4SampleDuration < 0) {\n mp4SampleDuration = lastFrameDuration;\n } else {\n stretchedLastFrame = true;\n }\n logger.log(\n `[mp4-remuxer]: It is approximately ${\n deltaToFrameEnd / 90\n } ms to the next segment; using duration ${\n mp4SampleDuration / 90\n } ms for the last video frame.`,\n );\n } else {\n mp4SampleDuration = lastFrameDuration;\n }\n } else {\n mp4SampleDuration = lastFrameDuration;\n }\n }\n const compositionTimeOffset = Math.round(\n VideoSample.pts - VideoSample.dts,\n );\n minDtsDelta = Math.min(minDtsDelta, mp4SampleDuration);\n maxDtsDelta = Math.max(maxDtsDelta, mp4SampleDuration);\n minPtsDelta = Math.min(minPtsDelta, ptsDelta);\n maxPtsDelta = Math.max(maxPtsDelta, ptsDelta);\n\n outputSamples.push(\n new Mp4Sample(\n VideoSample.key,\n mp4SampleDuration,\n mp4SampleLength,\n compositionTimeOffset,\n ),\n );\n }\n\n if (outputSamples.length) {\n if (chromeVersion) {\n if (chromeVersion < 70) {\n // Chrome workaround, mark first sample as being a Random Access Point (keyframe) to avoid sourcebuffer append issue\n // https://code.google.com/p/chromium/issues/detail?id=229412\n const flags = outputSamples[0].flags;\n flags.dependsOn = 2;\n flags.isNonSync = 0;\n }\n } else if (safariWebkitVersion) {\n // Fix for \"CNN special report, with CC\" in test-streams (Safari browser only)\n // Ignore DTS when frame durations are irregular. Safari MSE does not handle this leading to gaps.\n if (\n maxPtsDelta - minPtsDelta < maxDtsDelta - minDtsDelta &&\n averageSampleDuration / maxDtsDelta < 0.025 &&\n outputSamples[0].cts === 0\n ) {\n logger.warn(\n 'Found irregular gaps in sample duration. Using PTS instead of DTS to determine MP4 sample duration.',\n );\n let dts = firstDTS;\n for (let i = 0, len = outputSamples.length; i < len; i++) {\n const nextDts = dts + outputSamples[i].duration;\n const pts = dts + outputSamples[i].cts;\n if (i < len - 1) {\n const nextPts = nextDts + outputSamples[i + 1].cts;\n outputSamples[i].duration = nextPts - pts;\n } else {\n outputSamples[i].duration = i\n ? outputSamples[i - 1].duration\n : averageSampleDuration;\n }\n outputSamples[i].cts = 0;\n dts = nextDts;\n }\n }\n }\n }\n // next AVC sample DTS should be equal to last sample DTS + last sample duration (in PES timescale)\n mp4SampleDuration =\n stretchedLastFrame || !mp4SampleDuration\n ? averageSampleDuration\n : mp4SampleDuration;\n this.nextAvcDts = nextAvcDts = lastDTS + mp4SampleDuration;\n this.videoSampleDuration = mp4SampleDuration;\n this.isVideoContiguous = true;\n const moof = MP4.moof(\n track.sequenceNumber++,\n firstDTS,\n Object.assign({}, track, {\n samples: outputSamples,\n }),\n );\n const type: SourceBufferName = 'video';\n const data = {\n data1: moof,\n data2: mdat,\n startPTS: minPTS / timeScale,\n endPTS: (maxPTS + mp4SampleDuration) / timeScale,\n startDTS: firstDTS / timeScale,\n endDTS: (nextAvcDts as number) / timeScale,\n type,\n hasAudio: false,\n hasVideo: true,\n nb: outputSamples.length,\n dropped: track.dropped,\n };\n track.samples = [];\n track.dropped = 0;\n return data;\n }\n\n getSamplesPerFrame(track: DemuxedAudioTrack) {\n switch (track.segmentCodec) {\n case 'mp3':\n return MPEG_AUDIO_SAMPLE_PER_FRAME;\n case 'ac3':\n return AC3_SAMPLES_PER_FRAME;\n default:\n return AAC_SAMPLES_PER_FRAME;\n }\n }\n\n remuxAudio(\n track: DemuxedAudioTrack,\n timeOffset: number,\n contiguous: boolean,\n accurateTimeOffset: boolean,\n videoTimeOffset?: number,\n ): RemuxedTrack | undefined {\n const inputTimeScale: number = track.inputTimeScale;\n const mp4timeScale: number = track.samplerate\n ? track.samplerate\n : inputTimeScale;\n const scaleFactor: number = inputTimeScale / mp4timeScale;\n const mp4SampleDuration: number = this.getSamplesPerFrame(track);\n const inputSampleDuration: number = mp4SampleDuration * scaleFactor;\n const initPTS = this._initPTS as RationalTimestamp;\n const rawMPEG: boolean =\n track.segmentCodec === 'mp3' && this.typeSupported.mpeg;\n const outputSamples: Array = [];\n const alignedWithVideo = videoTimeOffset !== undefined;\n\n let inputSamples: Array = track.samples;\n let offset: number = rawMPEG ? 0 : 8;\n let nextAudioPts: number = this.nextAudioPts || -1;\n\n // window.audioSamples ? window.audioSamples.push(inputSamples.map(s => s.pts)) : (window.audioSamples = [inputSamples.map(s => s.pts)]);\n\n // for audio samples, also consider consecutive fragments as being contiguous (even if a level switch occurs),\n // for sake of clarity:\n // consecutive fragments are frags with\n // - less than 100ms gaps between new time offset (if accurate) and next expected PTS OR\n // - less than 20 audio frames distance\n // contiguous fragments are consecutive fragments from same quality level (same level, new SN = old SN + 1)\n // this helps ensuring audio continuity\n // and this also avoids audio glitches/cut when switching quality, or reporting wrong duration on first audio frame\n const timeOffsetMpegTS = timeOffset * inputTimeScale;\n const initTime = (initPTS.baseTime * inputTimeScale) / initPTS.timescale;\n this.isAudioContiguous = contiguous =\n contiguous ||\n ((inputSamples.length &&\n nextAudioPts > 0 &&\n ((accurateTimeOffset &&\n Math.abs(timeOffsetMpegTS - nextAudioPts) < 9000) ||\n Math.abs(\n normalizePts(inputSamples[0].pts - initTime, timeOffsetMpegTS) -\n nextAudioPts,\n ) <\n 20 * inputSampleDuration)) as boolean);\n\n // compute normalized PTS\n inputSamples.forEach(function (sample) {\n sample.pts = normalizePts(sample.pts - initTime, timeOffsetMpegTS);\n });\n\n if (!contiguous || nextAudioPts < 0) {\n // filter out sample with negative PTS that are not playable anyway\n // if we don't remove these negative samples, they will shift all audio samples forward.\n // leading to audio overlap between current / next fragment\n inputSamples = inputSamples.filter((sample) => sample.pts >= 0);\n\n // in case all samples have negative PTS, and have been filtered out, return now\n if (!inputSamples.length) {\n return;\n }\n\n if (videoTimeOffset === 0) {\n // Set the start to 0 to match video so that start gaps larger than inputSampleDuration are filled with silence\n nextAudioPts = 0;\n } else if (accurateTimeOffset && !alignedWithVideo) {\n // When not seeking, not live, and LevelDetails.PTSKnown, use fragment start as predicted next audio PTS\n nextAudioPts = Math.max(0, timeOffsetMpegTS);\n } else {\n // if frags are not contiguous and if we cant trust time offset, let's use first sample PTS as next audio PTS\n nextAudioPts = inputSamples[0].pts;\n }\n }\n\n // If the audio track is missing samples, the frames seem to get \"left-shifted\" within the\n // resulting mp4 segment, causing sync issues and leaving gaps at the end of the audio segment.\n // In an effort to prevent this from happening, we inject frames here where there are gaps.\n // When possible, we inject a silent frame; when that's not possible, we duplicate the last\n // frame.\n\n if (track.segmentCodec === 'aac') {\n const maxAudioFramesDrift = this.config.maxAudioFramesDrift;\n for (let i = 0, nextPts = nextAudioPts; i < inputSamples.length; i++) {\n // First, let's see how far off this frame is from where we expect it to be\n const sample = inputSamples[i];\n const pts = sample.pts;\n const delta = pts - nextPts;\n const duration = Math.abs((1000 * delta) / inputTimeScale);\n\n // When remuxing with video, if we're overlapping by more than a duration, drop this sample to stay in sync\n if (\n delta <= -maxAudioFramesDrift * inputSampleDuration &&\n alignedWithVideo\n ) {\n if (i === 0) {\n logger.warn(\n `Audio frame @ ${(pts / inputTimeScale).toFixed(\n 3,\n )}s overlaps nextAudioPts by ${Math.round(\n (1000 * delta) / inputTimeScale,\n )} ms.`,\n );\n this.nextAudioPts = nextAudioPts = nextPts = pts;\n }\n } // eslint-disable-line brace-style\n\n // Insert missing frames if:\n // 1: We're more than maxAudioFramesDrift frame away\n // 2: Not more than MAX_SILENT_FRAME_DURATION away\n // 3: currentTime (aka nextPtsNorm) is not 0\n // 4: remuxing with video (videoTimeOffset !== undefined)\n else if (\n delta >= maxAudioFramesDrift * inputSampleDuration &&\n duration < MAX_SILENT_FRAME_DURATION &&\n alignedWithVideo\n ) {\n let missing = Math.round(delta / inputSampleDuration);\n // Adjust nextPts so that silent samples are aligned with media pts. This will prevent media samples from\n // later being shifted if nextPts is based on timeOffset and delta is not a multiple of inputSampleDuration.\n nextPts = pts - missing * inputSampleDuration;\n if (nextPts < 0) {\n missing--;\n nextPts += inputSampleDuration;\n }\n if (i === 0) {\n this.nextAudioPts = nextAudioPts = nextPts;\n }\n logger.warn(\n `[mp4-remuxer]: Injecting ${missing} audio frame @ ${(\n nextPts / inputTimeScale\n ).toFixed(3)}s due to ${Math.round(\n (1000 * delta) / inputTimeScale,\n )} ms gap.`,\n );\n for (let j = 0; j < missing; j++) {\n const newStamp = Math.max(nextPts as number, 0);\n let fillFrame = AAC.getSilentFrame(\n track.manifestCodec || track.codec,\n track.channelCount,\n );\n if (!fillFrame) {\n logger.log(\n '[mp4-remuxer]: Unable to get silent frame for given audio codec; duplicating last frame instead.',\n );\n fillFrame = sample.unit.subarray();\n }\n inputSamples.splice(i, 0, {\n unit: fillFrame,\n pts: newStamp,\n });\n nextPts += inputSampleDuration;\n i++;\n }\n }\n sample.pts = nextPts;\n nextPts += inputSampleDuration;\n }\n }\n let firstPTS: number | null = null;\n let lastPTS: number | null = null;\n let mdat: any;\n let mdatSize: number = 0;\n let sampleLength: number = inputSamples.length;\n while (sampleLength--) {\n mdatSize += inputSamples[sampleLength].unit.byteLength;\n }\n for (let j = 0, nbSamples = inputSamples.length; j < nbSamples; j++) {\n const audioSample = inputSamples[j];\n const unit = audioSample.unit;\n let pts = audioSample.pts;\n if (lastPTS !== null) {\n // If we have more than one sample, set the duration of the sample to the \"real\" duration; the PTS diff with\n // the previous sample\n const prevSample = outputSamples[j - 1];\n prevSample.duration = Math.round((pts - lastPTS) / scaleFactor);\n } else {\n if (contiguous && track.segmentCodec === 'aac') {\n // set PTS/DTS to expected PTS/DTS\n pts = nextAudioPts;\n }\n // remember first PTS of our audioSamples\n firstPTS = pts;\n if (mdatSize > 0) {\n /* concatenate the audio data and construct the mdat in place\n (need 8 more bytes to fill length and mdat type) */\n mdatSize += offset;\n try {\n mdat = new Uint8Array(mdatSize);\n } catch (err) {\n this.observer.emit(Events.ERROR, Events.ERROR, {\n type: ErrorTypes.MUX_ERROR,\n details: ErrorDetails.REMUX_ALLOC_ERROR,\n fatal: false,\n error: err,\n bytes: mdatSize,\n reason: `fail allocating audio mdat ${mdatSize}`,\n });\n return;\n }\n if (!rawMPEG) {\n const view = new DataView(mdat.buffer);\n view.setUint32(0, mdatSize);\n mdat.set(MP4.types.mdat, 4);\n }\n } else {\n // no audio samples\n return;\n }\n }\n mdat.set(unit, offset);\n const unitLen = unit.byteLength;\n offset += unitLen;\n // Default the sample's duration to the computed mp4SampleDuration, which will either be 1024 for AAC or 1152 for MPEG\n // In the case that we have 1 sample, this will be the duration. If we have more than one sample, the duration\n // becomes the PTS diff with the previous sample\n outputSamples.push(new Mp4Sample(true, mp4SampleDuration, unitLen, 0));\n lastPTS = pts;\n }\n\n // We could end up with no audio samples if all input samples were overlapping with the previously remuxed ones\n const nbSamples = outputSamples.length;\n if (!nbSamples) {\n return;\n }\n\n // The next audio sample PTS should be equal to last sample PTS + duration\n const lastSample = outputSamples[outputSamples.length - 1];\n this.nextAudioPts = nextAudioPts =\n lastPTS! + scaleFactor * lastSample.duration;\n\n // Set the track samples from inputSamples to outputSamples before remuxing\n const moof = rawMPEG\n ? new Uint8Array(0)\n : MP4.moof(\n track.sequenceNumber++,\n firstPTS! / scaleFactor,\n Object.assign({}, track, { samples: outputSamples }),\n );\n\n // Clear the track samples. This also clears the samples array in the demuxer, since the reference is shared\n track.samples = [];\n const start = firstPTS! / inputTimeScale;\n const end = nextAudioPts / inputTimeScale;\n const type: SourceBufferName = 'audio';\n const audioData = {\n data1: moof,\n data2: mdat,\n startPTS: start,\n endPTS: end,\n startDTS: start,\n endDTS: end,\n type,\n hasAudio: true,\n hasVideo: false,\n nb: nbSamples,\n };\n\n this.isAudioContiguous = true;\n return audioData;\n }\n\n remuxEmptyAudio(\n track: DemuxedAudioTrack,\n timeOffset: number,\n contiguous: boolean,\n videoData: Fragment,\n ): RemuxedTrack | undefined {\n const inputTimeScale: number = track.inputTimeScale;\n const mp4timeScale: number = track.samplerate\n ? track.samplerate\n : inputTimeScale;\n const scaleFactor: number = inputTimeScale / mp4timeScale;\n const nextAudioPts: number | null = this.nextAudioPts;\n // sync with video's timestamp\n const initDTS = this._initDTS as RationalTimestamp;\n const init90kHz = (initDTS.baseTime * 90000) / initDTS.timescale;\n const startDTS: number =\n (nextAudioPts !== null\n ? nextAudioPts\n : videoData.startDTS * inputTimeScale) + init90kHz;\n const endDTS: number = videoData.endDTS * inputTimeScale + init90kHz;\n // one sample's duration value\n const frameDuration: number = scaleFactor * AAC_SAMPLES_PER_FRAME;\n // samples count of this segment's duration\n const nbSamples: number = Math.ceil((endDTS - startDTS) / frameDuration);\n // silent frame\n const silentFrame: Uint8Array | undefined = AAC.getSilentFrame(\n track.manifestCodec || track.codec,\n track.channelCount,\n );\n\n logger.warn('[mp4-remuxer]: remux empty Audio');\n // Can't remux if we can't generate a silent frame...\n if (!silentFrame) {\n logger.trace(\n '[mp4-remuxer]: Unable to remuxEmptyAudio since we were unable to get a silent frame for given audio codec',\n );\n return;\n }\n\n const samples: Array = [];\n for (let i = 0; i < nbSamples; i++) {\n const stamp = startDTS + i * frameDuration;\n samples.push({ unit: silentFrame, pts: stamp, dts: stamp });\n }\n track.samples = samples;\n\n return this.remuxAudio(track, timeOffset, contiguous, false);\n }\n}\n\nexport function normalizePts(value: number, reference: number | null): number {\n let offset;\n if (reference === null) {\n return value;\n }\n\n if (reference < value) {\n // - 2^33\n offset = -8589934592;\n } else {\n // + 2^33\n offset = 8589934592;\n }\n /* PTS is 33bit (from 0 to 2^33 -1)\n if diff between value and reference is bigger than half of the amplitude (2^32) then it means that\n PTS looping occured. fill the gap */\n while (Math.abs(value - reference) > 4294967296) {\n value += offset;\n }\n\n return value;\n}\n\nfunction findKeyframeIndex(samples: Array): number {\n for (let i = 0; i < samples.length; i++) {\n if (samples[i].key) {\n return i;\n }\n }\n return -1;\n}\n\nexport function flushTextTrackMetadataCueSamples(\n track: DemuxedMetadataTrack,\n timeOffset: number,\n initPTS: RationalTimestamp,\n initDTS: RationalTimestamp,\n): RemuxedMetadata | undefined {\n const length = track.samples.length;\n if (!length) {\n return;\n }\n const inputTimeScale = track.inputTimeScale;\n for (let index = 0; index < length; index++) {\n const sample = track.samples[index];\n // setting id3 pts, dts to relative time\n // using this._initPTS and this._initDTS to calculate relative time\n sample.pts =\n normalizePts(\n sample.pts - (initPTS.baseTime * inputTimeScale) / initPTS.timescale,\n timeOffset * inputTimeScale,\n ) / inputTimeScale;\n sample.dts =\n normalizePts(\n sample.dts - (initDTS.baseTime * inputTimeScale) / initDTS.timescale,\n timeOffset * inputTimeScale,\n ) / inputTimeScale;\n }\n const samples = track.samples;\n track.samples = [];\n return {\n samples,\n };\n}\n\nexport function flushTextTrackUserdataCueSamples(\n track: DemuxedUserdataTrack,\n timeOffset: number,\n initPTS: RationalTimestamp,\n): RemuxedUserdata | undefined {\n const length = track.samples.length;\n if (!length) {\n return;\n }\n\n const inputTimeScale = track.inputTimeScale;\n for (let index = 0; index < length; index++) {\n const sample = track.samples[index];\n // setting text pts, dts to relative time\n // using this._initPTS and this._initDTS to calculate relative time\n sample.pts =\n normalizePts(\n sample.pts - (initPTS.baseTime * inputTimeScale) / initPTS.timescale,\n timeOffset * inputTimeScale,\n ) / inputTimeScale;\n }\n track.samples.sort((a, b) => a.pts - b.pts);\n const samples = track.samples;\n track.samples = [];\n return {\n samples,\n };\n}\n\ntype Mp4SampleFlags = {\n isLeading: 0;\n isDependedOn: 0;\n hasRedundancy: 0;\n degradPrio: 0;\n dependsOn: 1 | 2;\n isNonSync: 0 | 1;\n};\n\nclass Mp4Sample {\n public size: number;\n public duration: number;\n public cts: number;\n public flags: Mp4SampleFlags;\n\n constructor(\n isKeyframe: boolean,\n duration: number,\n size: number,\n cts: number,\n ) {\n this.duration = duration;\n this.size = size;\n this.cts = cts;\n this.flags = {\n isLeading: 0,\n isDependedOn: 0,\n hasRedundancy: 0,\n degradPrio: 0,\n dependsOn: isKeyframe ? 2 : 1,\n isNonSync: isKeyframe ? 0 : 1,\n };\n }\n}\n", "import {\n flushTextTrackMetadataCueSamples,\n flushTextTrackUserdataCueSamples,\n} from './mp4-remuxer';\nimport {\n InitData,\n InitDataTrack,\n patchEncyptionData,\n} from '../utils/mp4-tools';\nimport {\n getDuration,\n getStartDTS,\n offsetStartDTS,\n parseInitSegment,\n} from '../utils/mp4-tools';\nimport { ElementaryStreamTypes } from '../loader/fragment';\nimport { logger } from '../utils/logger';\nimport { getCodecCompatibleName } from '../utils/codecs';\nimport type { TrackSet } from '../types/track';\nimport type {\n InitSegmentData,\n RemuxedTrack,\n Remuxer,\n RemuxerResult,\n} from '../types/remuxer';\nimport type {\n DemuxedAudioTrack,\n DemuxedMetadataTrack,\n DemuxedUserdataTrack,\n PassthroughTrack,\n} from '../types/demuxer';\nimport type { DecryptData } from '../loader/level-key';\nimport type { RationalTimestamp } from '../utils/timescale-conversion';\n\nclass PassThroughRemuxer implements Remuxer {\n private emitInitSegment: boolean = false;\n private audioCodec?: string;\n private videoCodec?: string;\n private initData?: InitData;\n private initPTS: RationalTimestamp | null = null;\n private initTracks?: TrackSet;\n private lastEndTime: number | null = null;\n\n public destroy() {}\n\n public resetTimeStamp(defaultInitPTS: RationalTimestamp | null) {\n this.initPTS = defaultInitPTS;\n this.lastEndTime = null;\n }\n\n public resetNextTimestamp() {\n this.lastEndTime = null;\n }\n\n public resetInitSegment(\n initSegment: Uint8Array | undefined,\n audioCodec: string | undefined,\n videoCodec: string | undefined,\n decryptdata: DecryptData | null,\n ) {\n this.audioCodec = audioCodec;\n this.videoCodec = videoCodec;\n this.generateInitSegment(patchEncyptionData(initSegment, decryptdata));\n this.emitInitSegment = true;\n }\n\n private generateInitSegment(initSegment: Uint8Array | undefined): void {\n let { audioCodec, videoCodec } = this;\n if (!initSegment?.byteLength) {\n this.initTracks = undefined;\n this.initData = undefined;\n return;\n }\n const initData = (this.initData = parseInitSegment(initSegment));\n\n // Get codec from initSegment or fallback to default\n if (initData.audio) {\n audioCodec = getParsedTrackCodec(\n initData.audio,\n ElementaryStreamTypes.AUDIO,\n );\n }\n\n if (initData.video) {\n videoCodec = getParsedTrackCodec(\n initData.video,\n ElementaryStreamTypes.VIDEO,\n );\n }\n\n const tracks: TrackSet = {};\n if (initData.audio && initData.video) {\n tracks.audiovideo = {\n container: 'video/mp4',\n codec: audioCodec + ',' + videoCodec,\n initSegment,\n id: 'main',\n };\n } else if (initData.audio) {\n tracks.audio = {\n container: 'audio/mp4',\n codec: audioCodec,\n initSegment,\n id: 'audio',\n };\n } else if (initData.video) {\n tracks.video = {\n container: 'video/mp4',\n codec: videoCodec,\n initSegment,\n id: 'main',\n };\n } else {\n logger.warn(\n '[passthrough-remuxer.ts]: initSegment does not contain moov or trak boxes.',\n );\n }\n this.initTracks = tracks;\n }\n\n public remux(\n audioTrack: DemuxedAudioTrack,\n videoTrack: PassthroughTrack,\n id3Track: DemuxedMetadataTrack,\n textTrack: DemuxedUserdataTrack,\n timeOffset: number,\n accurateTimeOffset: boolean,\n ): RemuxerResult {\n let { initPTS, lastEndTime } = this;\n const result: RemuxerResult = {\n audio: undefined,\n video: undefined,\n text: textTrack,\n id3: id3Track,\n initSegment: undefined,\n };\n\n // If we haven't yet set a lastEndDTS, or it was reset, set it to the provided timeOffset. We want to use the\n // lastEndDTS over timeOffset whenever possible; during progressive playback, the media source will not update\n // the media duration (which is what timeOffset is provided as) before we need to process the next chunk.\n if (!Number.isFinite(lastEndTime!)) {\n lastEndTime = this.lastEndTime = timeOffset || 0;\n }\n\n // The binary segment data is added to the videoTrack in the mp4demuxer. We don't check to see if the data is only\n // audio or video (or both); adding it to video was an arbitrary choice.\n const data = videoTrack.samples;\n if (!data?.length) {\n return result;\n }\n\n const initSegment: InitSegmentData = {\n initPTS: undefined,\n timescale: 1,\n };\n let initData = this.initData;\n if (!initData?.length) {\n this.generateInitSegment(data);\n initData = this.initData;\n }\n if (!initData?.length) {\n // We can't remux if the initSegment could not be generated\n logger.warn('[passthrough-remuxer.ts]: Failed to generate initSegment.');\n return result;\n }\n if (this.emitInitSegment) {\n initSegment.tracks = this.initTracks as TrackSet;\n this.emitInitSegment = false;\n }\n\n const duration = getDuration(data, initData);\n const startDTS = getStartDTS(initData, data);\n const decodeTime = startDTS === null ? timeOffset : startDTS;\n if (\n isInvalidInitPts(initPTS, decodeTime, timeOffset, duration) ||\n (initSegment.timescale !== initPTS.timescale && accurateTimeOffset)\n ) {\n initSegment.initPTS = decodeTime - timeOffset;\n if (initPTS && initPTS.timescale === 1) {\n logger.warn(\n `Adjusting initPTS by ${initSegment.initPTS - initPTS.baseTime}`,\n );\n }\n this.initPTS = initPTS = {\n baseTime: initSegment.initPTS,\n timescale: 1,\n };\n }\n\n const startTime = audioTrack\n ? decodeTime - initPTS.baseTime / initPTS.timescale\n : (lastEndTime as number);\n const endTime = startTime + duration;\n offsetStartDTS(initData, data, initPTS.baseTime / initPTS.timescale);\n\n if (duration > 0) {\n this.lastEndTime = endTime;\n } else {\n logger.warn('Duration parsed from mp4 should be greater than zero');\n this.resetNextTimestamp();\n }\n\n const hasAudio = !!initData.audio;\n const hasVideo = !!initData.video;\n\n let type: any = '';\n if (hasAudio) {\n type += 'audio';\n }\n\n if (hasVideo) {\n type += 'video';\n }\n\n const track: RemuxedTrack = {\n data1: data,\n startPTS: startTime,\n startDTS: startTime,\n endPTS: endTime,\n endDTS: endTime,\n type,\n hasAudio,\n hasVideo,\n nb: 1,\n dropped: 0,\n };\n\n result.audio = track.type === 'audio' ? track : undefined;\n result.video = track.type !== 'audio' ? track : undefined;\n result.initSegment = initSegment;\n result.id3 = flushTextTrackMetadataCueSamples(\n id3Track,\n timeOffset,\n initPTS,\n initPTS,\n );\n\n if (textTrack.samples.length) {\n result.text = flushTextTrackUserdataCueSamples(\n textTrack,\n timeOffset,\n initPTS,\n );\n }\n\n return result;\n }\n}\n\nfunction isInvalidInitPts(\n initPTS: RationalTimestamp | null,\n startDTS: number,\n timeOffset: number,\n duration: number,\n): initPTS is null {\n if (initPTS === null) {\n return true;\n }\n // InitPTS is invalid when distance from program would be more than segment duration or a minimum of one second\n const minDuration = Math.max(duration, 1);\n const startTime = startDTS - initPTS.baseTime / initPTS.timescale;\n return Math.abs(startTime - timeOffset) > minDuration;\n}\n\nfunction getParsedTrackCodec(\n track: InitDataTrack,\n type: ElementaryStreamTypes.AUDIO | ElementaryStreamTypes.VIDEO,\n): string {\n const parsedCodec = track?.codec;\n if (parsedCodec && parsedCodec.length > 4) {\n return parsedCodec;\n }\n if (type === ElementaryStreamTypes.AUDIO) {\n if (\n parsedCodec === 'ec-3' ||\n parsedCodec === 'ac-3' ||\n parsedCodec === 'alac'\n ) {\n return parsedCodec;\n }\n if (parsedCodec === 'fLaC' || parsedCodec === 'Opus') {\n // Opting not to get `preferManagedMediaSource` from player config for isSupported() check for simplicity\n const preferManagedMediaSource = false;\n return getCodecCompatibleName(parsedCodec, preferManagedMediaSource);\n }\n const result = 'mp4a.40.5';\n logger.info(\n `Parsed audio codec \"${parsedCodec}\" or audio object type not handled. Using \"${result}\"`,\n );\n return result;\n }\n // Provide defaults based on codec type\n // This allows for some playback of some fmp4 playlists without CODECS defined in manifest\n logger.warn(`Unhandled video codec \"${parsedCodec}\"`);\n if (parsedCodec === 'hvc1' || parsedCodec === 'hev1') {\n return 'hvc1.1.6.L120.90';\n }\n if (parsedCodec === 'av01') {\n return 'av01.0.04M.08';\n }\n return 'avc1.42e01e';\n}\nexport default PassThroughRemuxer;\n", "import type { HlsEventEmitter } from '../events';\nimport { Events } from '../events';\nimport { ErrorTypes, ErrorDetails } from '../errors';\nimport Decrypter from '../crypt/decrypter';\nimport AACDemuxer from './audio/aacdemuxer';\nimport MP4Demuxer from '../demux/mp4demuxer';\nimport TSDemuxer, { TypeSupported } from '../demux/tsdemuxer';\nimport MP3Demuxer from './audio/mp3demuxer';\nimport { AC3Demuxer } from './audio/ac3-demuxer';\nimport MP4Remuxer from '../remux/mp4-remuxer';\nimport PassThroughRemuxer from '../remux/passthrough-remuxer';\nimport { logger } from '../utils/logger';\nimport type { Demuxer, DemuxerResult, KeyData } from '../types/demuxer';\nimport type { Remuxer } from '../types/remuxer';\nimport type { TransmuxerResult, ChunkMetadata } from '../types/transmuxer';\nimport type { HlsConfig } from '../config';\nimport type { DecryptData } from '../loader/level-key';\nimport type { PlaylistLevelType } from '../types/loader';\nimport type { RationalTimestamp } from '../utils/timescale-conversion';\nimport { optionalSelf } from '../utils/global';\n\nlet now;\n// performance.now() not available on WebWorker, at least on Safari Desktop\ntry {\n now = self.performance.now.bind(self.performance);\n} catch (err) {\n logger.debug('Unable to use Performance API on this environment');\n now = optionalSelf?.Date.now;\n}\n\ntype MuxConfig =\n | { demux: typeof MP4Demuxer; remux: typeof PassThroughRemuxer }\n | { demux: typeof TSDemuxer; remux: typeof MP4Remuxer }\n | { demux: typeof AC3Demuxer; remux: typeof MP4Remuxer }\n | { demux: typeof AACDemuxer; remux: typeof MP4Remuxer }\n | { demux: typeof MP3Demuxer; remux: typeof MP4Remuxer };\n\nconst muxConfig: MuxConfig[] = [\n { demux: MP4Demuxer, remux: PassThroughRemuxer },\n { demux: TSDemuxer, remux: MP4Remuxer },\n { demux: AACDemuxer, remux: MP4Remuxer },\n { demux: MP3Demuxer, remux: MP4Remuxer },\n];\n\nif (__USE_M2TS_ADVANCED_CODECS__) {\n muxConfig.splice(2, 0, { demux: AC3Demuxer, remux: MP4Remuxer });\n}\n\nexport default class Transmuxer {\n public async: boolean = false;\n private observer: HlsEventEmitter;\n private typeSupported: TypeSupported;\n private config: HlsConfig;\n private vendor: string;\n private id: PlaylistLevelType;\n private demuxer?: Demuxer;\n private remuxer?: Remuxer;\n private decrypter?: Decrypter;\n private probe!: Function;\n private decryptionPromise: Promise | null = null;\n private transmuxConfig!: TransmuxConfig;\n private currentTransmuxState!: TransmuxState;\n\n constructor(\n observer: HlsEventEmitter,\n typeSupported: TypeSupported,\n config: HlsConfig,\n vendor: string,\n id: PlaylistLevelType,\n ) {\n this.observer = observer;\n this.typeSupported = typeSupported;\n this.config = config;\n this.vendor = vendor;\n this.id = id;\n }\n\n configure(transmuxConfig: TransmuxConfig) {\n this.transmuxConfig = transmuxConfig;\n if (this.decrypter) {\n this.decrypter.reset();\n }\n }\n\n push(\n data: ArrayBuffer,\n decryptdata: DecryptData | null,\n chunkMeta: ChunkMetadata,\n state?: TransmuxState,\n ): TransmuxerResult | Promise {\n const stats = chunkMeta.transmuxing;\n stats.executeStart = now();\n\n let uintData: Uint8Array = new Uint8Array(data);\n const { currentTransmuxState, transmuxConfig } = this;\n if (state) {\n this.currentTransmuxState = state;\n }\n\n const {\n contiguous,\n discontinuity,\n trackSwitch,\n accurateTimeOffset,\n timeOffset,\n initSegmentChange,\n } = state || currentTransmuxState;\n const {\n audioCodec,\n videoCodec,\n defaultInitPts,\n duration,\n initSegmentData,\n } = transmuxConfig;\n\n const keyData = getEncryptionType(uintData, decryptdata);\n if (keyData && keyData.method === 'AES-128') {\n const decrypter = this.getDecrypter();\n // Software decryption is synchronous; webCrypto is not\n if (decrypter.isSync()) {\n // Software decryption is progressive. Progressive decryption may not return a result on each call. Any cached\n // data is handled in the flush() call\n let decryptedData = decrypter.softwareDecrypt(\n uintData,\n keyData.key.buffer,\n keyData.iv.buffer,\n );\n // For Low-Latency HLS Parts, decrypt in place, since part parsing is expected on push progress\n const loadingParts = chunkMeta.part > -1;\n if (loadingParts) {\n decryptedData = decrypter.flush();\n }\n if (!decryptedData) {\n stats.executeEnd = now();\n return emptyResult(chunkMeta);\n }\n uintData = new Uint8Array(decryptedData);\n } else {\n this.decryptionPromise = decrypter\n .webCryptoDecrypt(uintData, keyData.key.buffer, keyData.iv.buffer)\n .then((decryptedData): TransmuxerResult => {\n // Calling push here is important; if flush() is called while this is still resolving, this ensures that\n // the decrypted data has been transmuxed\n const result = this.push(\n decryptedData,\n null,\n chunkMeta,\n ) as TransmuxerResult;\n this.decryptionPromise = null;\n return result;\n });\n return this.decryptionPromise!;\n }\n }\n\n const resetMuxers = this.needsProbing(discontinuity, trackSwitch);\n if (resetMuxers) {\n const error = this.configureTransmuxer(uintData);\n if (error) {\n logger.warn(`[transmuxer] ${error.message}`);\n this.observer.emit(Events.ERROR, Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.FRAG_PARSING_ERROR,\n fatal: false,\n error,\n reason: error.message,\n });\n stats.executeEnd = now();\n return emptyResult(chunkMeta);\n }\n }\n\n if (discontinuity || trackSwitch || initSegmentChange || resetMuxers) {\n this.resetInitSegment(\n initSegmentData,\n audioCodec,\n videoCodec,\n duration,\n decryptdata,\n );\n }\n\n if (discontinuity || initSegmentChange || resetMuxers) {\n this.resetInitialTimestamp(defaultInitPts);\n }\n\n if (!contiguous) {\n this.resetContiguity();\n }\n\n const result = this.transmux(\n uintData,\n keyData,\n timeOffset,\n accurateTimeOffset,\n chunkMeta,\n );\n const currentState = this.currentTransmuxState;\n\n currentState.contiguous = true;\n currentState.discontinuity = false;\n currentState.trackSwitch = false;\n\n stats.executeEnd = now();\n return result;\n }\n\n // Due to data caching, flush calls can produce more than one TransmuxerResult (hence the Array type)\n flush(\n chunkMeta: ChunkMetadata,\n ): TransmuxerResult[] | Promise {\n const stats = chunkMeta.transmuxing;\n stats.executeStart = now();\n\n const { decrypter, currentTransmuxState, decryptionPromise } = this;\n\n if (decryptionPromise) {\n // Upon resolution, the decryption promise calls push() and returns its TransmuxerResult up the stack. Therefore\n // only flushing is required for async decryption\n return decryptionPromise.then(() => {\n return this.flush(chunkMeta);\n });\n }\n\n const transmuxResults: TransmuxerResult[] = [];\n const { timeOffset } = currentTransmuxState;\n if (decrypter) {\n // The decrypter may have data cached, which needs to be demuxed. In this case we'll have two TransmuxResults\n // This happens in the case that we receive only 1 push call for a segment (either for non-progressive downloads,\n // or for progressive downloads with small segments)\n const decryptedData = decrypter.flush();\n if (decryptedData) {\n // Push always returns a TransmuxerResult if decryptdata is null\n transmuxResults.push(\n this.push(decryptedData, null, chunkMeta) as TransmuxerResult,\n );\n }\n }\n\n const { demuxer, remuxer } = this;\n if (!demuxer || !remuxer) {\n // If probing failed, then Hls.js has been given content its not able to handle\n stats.executeEnd = now();\n return [emptyResult(chunkMeta)];\n }\n\n const demuxResultOrPromise = demuxer.flush(timeOffset);\n if (isPromise(demuxResultOrPromise)) {\n // Decrypt final SAMPLE-AES samples\n return demuxResultOrPromise.then((demuxResult) => {\n this.flushRemux(transmuxResults, demuxResult, chunkMeta);\n return transmuxResults;\n });\n }\n\n this.flushRemux(transmuxResults, demuxResultOrPromise, chunkMeta);\n return transmuxResults;\n }\n\n private flushRemux(\n transmuxResults: TransmuxerResult[],\n demuxResult: DemuxerResult,\n chunkMeta: ChunkMetadata,\n ) {\n const { audioTrack, videoTrack, id3Track, textTrack } = demuxResult;\n const { accurateTimeOffset, timeOffset } = this.currentTransmuxState;\n logger.log(\n `[transmuxer.ts]: Flushed fragment ${chunkMeta.sn}${\n chunkMeta.part > -1 ? ' p: ' + chunkMeta.part : ''\n } of level ${chunkMeta.level}`,\n );\n const remuxResult = this.remuxer!.remux(\n audioTrack,\n videoTrack,\n id3Track,\n textTrack,\n timeOffset,\n accurateTimeOffset,\n true,\n this.id,\n );\n transmuxResults.push({\n remuxResult,\n chunkMeta,\n });\n\n chunkMeta.transmuxing.executeEnd = now();\n }\n\n resetInitialTimestamp(defaultInitPts: RationalTimestamp | null) {\n const { demuxer, remuxer } = this;\n if (!demuxer || !remuxer) {\n return;\n }\n demuxer.resetTimeStamp(defaultInitPts);\n remuxer.resetTimeStamp(defaultInitPts);\n }\n\n resetContiguity() {\n const { demuxer, remuxer } = this;\n if (!demuxer || !remuxer) {\n return;\n }\n demuxer.resetContiguity();\n remuxer.resetNextTimestamp();\n }\n\n resetInitSegment(\n initSegmentData: Uint8Array | undefined,\n audioCodec: string | undefined,\n videoCodec: string | undefined,\n trackDuration: number,\n decryptdata: DecryptData | null,\n ) {\n const { demuxer, remuxer } = this;\n if (!demuxer || !remuxer) {\n return;\n }\n demuxer.resetInitSegment(\n initSegmentData,\n audioCodec,\n videoCodec,\n trackDuration,\n );\n remuxer.resetInitSegment(\n initSegmentData,\n audioCodec,\n videoCodec,\n decryptdata,\n );\n }\n\n destroy(): void {\n if (this.demuxer) {\n this.demuxer.destroy();\n this.demuxer = undefined;\n }\n if (this.remuxer) {\n this.remuxer.destroy();\n this.remuxer = undefined;\n }\n }\n\n private transmux(\n data: Uint8Array,\n keyData: KeyData | null,\n timeOffset: number,\n accurateTimeOffset: boolean,\n chunkMeta: ChunkMetadata,\n ): TransmuxerResult | Promise {\n let result: TransmuxerResult | Promise;\n if (keyData && keyData.method === 'SAMPLE-AES') {\n result = this.transmuxSampleAes(\n data,\n keyData,\n timeOffset,\n accurateTimeOffset,\n chunkMeta,\n );\n } else {\n result = this.transmuxUnencrypted(\n data,\n timeOffset,\n accurateTimeOffset,\n chunkMeta,\n );\n }\n return result;\n }\n\n private transmuxUnencrypted(\n data: Uint8Array,\n timeOffset: number,\n accurateTimeOffset: boolean,\n chunkMeta: ChunkMetadata,\n ): TransmuxerResult {\n const { audioTrack, videoTrack, id3Track, textTrack } = (\n this.demuxer as Demuxer\n ).demux(data, timeOffset, false, !this.config.progressive);\n const remuxResult = this.remuxer!.remux(\n audioTrack,\n videoTrack,\n id3Track,\n textTrack,\n timeOffset,\n accurateTimeOffset,\n false,\n this.id,\n );\n return {\n remuxResult,\n chunkMeta,\n };\n }\n\n private transmuxSampleAes(\n data: Uint8Array,\n decryptData: KeyData,\n timeOffset: number,\n accurateTimeOffset: boolean,\n chunkMeta: ChunkMetadata,\n ): Promise {\n return (this.demuxer as Demuxer)\n .demuxSampleAes(data, decryptData, timeOffset)\n .then((demuxResult) => {\n const remuxResult = this.remuxer!.remux(\n demuxResult.audioTrack,\n demuxResult.videoTrack,\n demuxResult.id3Track,\n demuxResult.textTrack,\n timeOffset,\n accurateTimeOffset,\n false,\n this.id,\n );\n return {\n remuxResult,\n chunkMeta,\n };\n });\n }\n\n private configureTransmuxer(data: Uint8Array): void | Error {\n const { config, observer, typeSupported, vendor } = this;\n // probe for content type\n let mux;\n for (let i = 0, len = muxConfig.length; i < len; i++) {\n if (muxConfig[i].demux?.probe(data)) {\n mux = muxConfig[i];\n break;\n }\n }\n if (!mux) {\n return new Error('Failed to find demuxer by probing fragment data');\n }\n // so let's check that current remuxer and demuxer are still valid\n const demuxer = this.demuxer;\n const remuxer = this.remuxer;\n const Remuxer: MuxConfig['remux'] = mux.remux;\n const Demuxer: MuxConfig['demux'] = mux.demux;\n if (!remuxer || !(remuxer instanceof Remuxer)) {\n this.remuxer = new Remuxer(observer, config, typeSupported, vendor);\n }\n if (!demuxer || !(demuxer instanceof Demuxer)) {\n this.demuxer = new Demuxer(observer, config, typeSupported);\n this.probe = Demuxer.probe;\n }\n }\n\n private needsProbing(discontinuity: boolean, trackSwitch: boolean): boolean {\n // in case of continuity change, or track switch\n // we might switch from content type (AAC container to TS container, or TS to fmp4 for example)\n return !this.demuxer || !this.remuxer || discontinuity || trackSwitch;\n }\n\n private getDecrypter(): Decrypter {\n let decrypter = this.decrypter;\n if (!decrypter) {\n decrypter = this.decrypter = new Decrypter(this.config);\n }\n return decrypter;\n }\n}\n\nfunction getEncryptionType(\n data: Uint8Array,\n decryptData: DecryptData | null,\n): KeyData | null {\n let encryptionType: KeyData | null = null;\n if (\n data.byteLength > 0 &&\n decryptData?.key != null &&\n decryptData.iv !== null &&\n decryptData.method != null\n ) {\n encryptionType = decryptData as KeyData;\n }\n return encryptionType;\n}\n\nconst emptyResult = (chunkMeta): TransmuxerResult => ({\n remuxResult: {},\n chunkMeta,\n});\n\nexport function isPromise(p: Promise | any): p is Promise {\n return 'then' in p && p.then instanceof Function;\n}\n\nexport class TransmuxConfig {\n public audioCodec?: string;\n public videoCodec?: string;\n public initSegmentData?: Uint8Array;\n public duration: number;\n public defaultInitPts: RationalTimestamp | null;\n\n constructor(\n audioCodec: string | undefined,\n videoCodec: string | undefined,\n initSegmentData: Uint8Array | undefined,\n duration: number,\n defaultInitPts?: RationalTimestamp,\n ) {\n this.audioCodec = audioCodec;\n this.videoCodec = videoCodec;\n this.initSegmentData = initSegmentData;\n this.duration = duration;\n this.defaultInitPts = defaultInitPts || null;\n }\n}\n\nexport class TransmuxState {\n public discontinuity: boolean;\n public contiguous: boolean;\n public accurateTimeOffset: boolean;\n public trackSwitch: boolean;\n public timeOffset: number;\n public initSegmentChange: boolean;\n\n constructor(\n discontinuity: boolean,\n contiguous: boolean,\n accurateTimeOffset: boolean,\n trackSwitch: boolean,\n timeOffset: number,\n initSegmentChange: boolean,\n ) {\n this.discontinuity = discontinuity;\n this.contiguous = contiguous;\n this.accurateTimeOffset = accurateTimeOffset;\n this.trackSwitch = trackSwitch;\n this.timeOffset = timeOffset;\n this.initSegmentChange = initSegmentChange;\n }\n}\n", "'use strict';\n\nvar has = Object.prototype.hasOwnProperty\n , prefix = '~';\n\n/**\n * Constructor to create a storage for our `EE` objects.\n * An `Events` instance is a plain object whose properties are event names.\n *\n * @constructor\n * @private\n */\nfunction Events() {}\n\n//\n// We try to not inherit from `Object.prototype`. In some engines creating an\n// instance in this way is faster than calling `Object.create(null)` directly.\n// If `Object.create(null)` is not supported we prefix the event names with a\n// character to make sure that the built-in object properties are not\n// overridden or used as an attack vector.\n//\nif (Object.create) {\n Events.prototype = Object.create(null);\n\n //\n // This hack is needed because the `__proto__` property is still inherited in\n // some old browsers like Android 4, iPhone 5.1, Opera 11 and Safari 5.\n //\n if (!new Events().__proto__) prefix = false;\n}\n\n/**\n * Representation of a single event listener.\n *\n * @param {Function} fn The listener function.\n * @param {*} context The context to invoke the listener with.\n * @param {Boolean} [once=false] Specify if the listener is a one-time listener.\n * @constructor\n * @private\n */\nfunction EE(fn, context, once) {\n this.fn = fn;\n this.context = context;\n this.once = once || false;\n}\n\n/**\n * Add a listener for a given event.\n *\n * @param {EventEmitter} emitter Reference to the `EventEmitter` instance.\n * @param {(String|Symbol)} event The event name.\n * @param {Function} fn The listener function.\n * @param {*} context The context to invoke the listener with.\n * @param {Boolean} once Specify if the listener is a one-time listener.\n * @returns {EventEmitter}\n * @private\n */\nfunction addListener(emitter, event, fn, context, once) {\n if (typeof fn !== 'function') {\n throw new TypeError('The listener must be a function');\n }\n\n var listener = new EE(fn, context || emitter, once)\n , evt = prefix ? prefix + event : event;\n\n if (!emitter._events[evt]) emitter._events[evt] = listener, emitter._eventsCount++;\n else if (!emitter._events[evt].fn) emitter._events[evt].push(listener);\n else emitter._events[evt] = [emitter._events[evt], listener];\n\n return emitter;\n}\n\n/**\n * Clear event by name.\n *\n * @param {EventEmitter} emitter Reference to the `EventEmitter` instance.\n * @param {(String|Symbol)} evt The Event name.\n * @private\n */\nfunction clearEvent(emitter, evt) {\n if (--emitter._eventsCount === 0) emitter._events = new Events();\n else delete emitter._events[evt];\n}\n\n/**\n * Minimal `EventEmitter` interface that is molded against the Node.js\n * `EventEmitter` interface.\n *\n * @constructor\n * @public\n */\nfunction EventEmitter() {\n this._events = new Events();\n this._eventsCount = 0;\n}\n\n/**\n * Return an array listing the events for which the emitter has registered\n * listeners.\n *\n * @returns {Array}\n * @public\n */\nEventEmitter.prototype.eventNames = function eventNames() {\n var names = []\n , events\n , name;\n\n if (this._eventsCount === 0) return names;\n\n for (name in (events = this._events)) {\n if (has.call(events, name)) names.push(prefix ? name.slice(1) : name);\n }\n\n if (Object.getOwnPropertySymbols) {\n return names.concat(Object.getOwnPropertySymbols(events));\n }\n\n return names;\n};\n\n/**\n * Return the listeners registered for a given event.\n *\n * @param {(String|Symbol)} event The event name.\n * @returns {Array} The registered listeners.\n * @public\n */\nEventEmitter.prototype.listeners = function listeners(event) {\n var evt = prefix ? prefix + event : event\n , handlers = this._events[evt];\n\n if (!handlers) return [];\n if (handlers.fn) return [handlers.fn];\n\n for (var i = 0, l = handlers.length, ee = new Array(l); i < l; i++) {\n ee[i] = handlers[i].fn;\n }\n\n return ee;\n};\n\n/**\n * Return the number of listeners listening to a given event.\n *\n * @param {(String|Symbol)} event The event name.\n * @returns {Number} The number of listeners.\n * @public\n */\nEventEmitter.prototype.listenerCount = function listenerCount(event) {\n var evt = prefix ? prefix + event : event\n , listeners = this._events[evt];\n\n if (!listeners) return 0;\n if (listeners.fn) return 1;\n return listeners.length;\n};\n\n/**\n * Calls each of the listeners registered for a given event.\n *\n * @param {(String|Symbol)} event The event name.\n * @returns {Boolean} `true` if the event had listeners, else `false`.\n * @public\n */\nEventEmitter.prototype.emit = function emit(event, a1, a2, a3, a4, a5) {\n var evt = prefix ? prefix + event : event;\n\n if (!this._events[evt]) return false;\n\n var listeners = this._events[evt]\n , len = arguments.length\n , args\n , i;\n\n if (listeners.fn) {\n if (listeners.once) this.removeListener(event, listeners.fn, undefined, true);\n\n switch (len) {\n case 1: return listeners.fn.call(listeners.context), true;\n case 2: return listeners.fn.call(listeners.context, a1), true;\n case 3: return listeners.fn.call(listeners.context, a1, a2), true;\n case 4: return listeners.fn.call(listeners.context, a1, a2, a3), true;\n case 5: return listeners.fn.call(listeners.context, a1, a2, a3, a4), true;\n case 6: return listeners.fn.call(listeners.context, a1, a2, a3, a4, a5), true;\n }\n\n for (i = 1, args = new Array(len -1); i < len; i++) {\n args[i - 1] = arguments[i];\n }\n\n listeners.fn.apply(listeners.context, args);\n } else {\n var length = listeners.length\n , j;\n\n for (i = 0; i < length; i++) {\n if (listeners[i].once) this.removeListener(event, listeners[i].fn, undefined, true);\n\n switch (len) {\n case 1: listeners[i].fn.call(listeners[i].context); break;\n case 2: listeners[i].fn.call(listeners[i].context, a1); break;\n case 3: listeners[i].fn.call(listeners[i].context, a1, a2); break;\n case 4: listeners[i].fn.call(listeners[i].context, a1, a2, a3); break;\n default:\n if (!args) for (j = 1, args = new Array(len -1); j < len; j++) {\n args[j - 1] = arguments[j];\n }\n\n listeners[i].fn.apply(listeners[i].context, args);\n }\n }\n }\n\n return true;\n};\n\n/**\n * Add a listener for a given event.\n *\n * @param {(String|Symbol)} event The event name.\n * @param {Function} fn The listener function.\n * @param {*} [context=this] The context to invoke the listener with.\n * @returns {EventEmitter} `this`.\n * @public\n */\nEventEmitter.prototype.on = function on(event, fn, context) {\n return addListener(this, event, fn, context, false);\n};\n\n/**\n * Add a one-time listener for a given event.\n *\n * @param {(String|Symbol)} event The event name.\n * @param {Function} fn The listener function.\n * @param {*} [context=this] The context to invoke the listener with.\n * @returns {EventEmitter} `this`.\n * @public\n */\nEventEmitter.prototype.once = function once(event, fn, context) {\n return addListener(this, event, fn, context, true);\n};\n\n/**\n * Remove the listeners of a given event.\n *\n * @param {(String|Symbol)} event The event name.\n * @param {Function} fn Only remove the listeners that match this function.\n * @param {*} context Only remove the listeners that have this context.\n * @param {Boolean} once Only remove one-time listeners.\n * @returns {EventEmitter} `this`.\n * @public\n */\nEventEmitter.prototype.removeListener = function removeListener(event, fn, context, once) {\n var evt = prefix ? prefix + event : event;\n\n if (!this._events[evt]) return this;\n if (!fn) {\n clearEvent(this, evt);\n return this;\n }\n\n var listeners = this._events[evt];\n\n if (listeners.fn) {\n if (\n listeners.fn === fn &&\n (!once || listeners.once) &&\n (!context || listeners.context === context)\n ) {\n clearEvent(this, evt);\n }\n } else {\n for (var i = 0, events = [], length = listeners.length; i < length; i++) {\n if (\n listeners[i].fn !== fn ||\n (once && !listeners[i].once) ||\n (context && listeners[i].context !== context)\n ) {\n events.push(listeners[i]);\n }\n }\n\n //\n // Reset the array, or remove it completely if we have no more listeners.\n //\n if (events.length) this._events[evt] = events.length === 1 ? events[0] : events;\n else clearEvent(this, evt);\n }\n\n return this;\n};\n\n/**\n * Remove all listeners, or those of the specified event.\n *\n * @param {(String|Symbol)} [event] The event name.\n * @returns {EventEmitter} `this`.\n * @public\n */\nEventEmitter.prototype.removeAllListeners = function removeAllListeners(event) {\n var evt;\n\n if (event) {\n evt = prefix ? prefix + event : event;\n if (this._events[evt]) clearEvent(this, evt);\n } else {\n this._events = new Events();\n this._eventsCount = 0;\n }\n\n return this;\n};\n\n//\n// Alias methods names because people roll like that.\n//\nEventEmitter.prototype.off = EventEmitter.prototype.removeListener;\nEventEmitter.prototype.addListener = EventEmitter.prototype.on;\n\n//\n// Expose the prefix.\n//\nEventEmitter.prefixed = prefix;\n\n//\n// Allow `EventEmitter` to be imported as module namespace.\n//\nEventEmitter.EventEmitter = EventEmitter;\n\n//\n// Expose the module.\n//\nif ('undefined' !== typeof module) {\n module.exports = EventEmitter;\n}\n", "import {\n WorkerContext,\n hasUMDWorker,\n injectWorker,\n loadWorker,\n} from './inject-worker';\nimport { Events } from '../events';\nimport Transmuxer, {\n TransmuxConfig,\n TransmuxState,\n isPromise,\n} from '../demux/transmuxer';\nimport { logger } from '../utils/logger';\nimport { ErrorTypes, ErrorDetails } from '../errors';\nimport { getMediaSource } from '../utils/mediasource-helper';\nimport { EventEmitter } from 'eventemitter3';\nimport { Fragment, Part } from '../loader/fragment';\nimport type { ChunkMetadata, TransmuxerResult } from '../types/transmuxer';\nimport type Hls from '../hls';\nimport type { HlsEventEmitter, HlsListeners } from '../events';\nimport type { PlaylistLevelType } from '../types/loader';\nimport type { TypeSupported } from './tsdemuxer';\nimport type { RationalTimestamp } from '../utils/timescale-conversion';\n\nexport default class TransmuxerInterface {\n public error: Error | null = null;\n private hls: Hls;\n private id: PlaylistLevelType;\n private observer: HlsEventEmitter;\n private frag: Fragment | null = null;\n private part: Part | null = null;\n private useWorker: boolean;\n private workerContext: WorkerContext | null = null;\n private onwmsg?: (\n event: MessageEvent<{ event: string; data?: any } | null>,\n ) => void;\n private transmuxer: Transmuxer | null = null;\n private onTransmuxComplete: (transmuxResult: TransmuxerResult) => void;\n private onFlush: (chunkMeta: ChunkMetadata) => void;\n\n constructor(\n hls: Hls,\n id: PlaylistLevelType,\n onTransmuxComplete: (transmuxResult: TransmuxerResult) => void,\n onFlush: (chunkMeta: ChunkMetadata) => void,\n ) {\n const config = hls.config;\n this.hls = hls;\n this.id = id;\n this.useWorker = !!config.enableWorker;\n this.onTransmuxComplete = onTransmuxComplete;\n this.onFlush = onFlush;\n\n const forwardMessage = (ev, data) => {\n data = data || {};\n data.frag = this.frag;\n data.id = this.id;\n if (ev === Events.ERROR) {\n this.error = data.error;\n }\n this.hls.trigger(ev, data);\n };\n\n // forward events to main thread\n this.observer = new EventEmitter() as HlsEventEmitter;\n this.observer.on(Events.FRAG_DECRYPTED, forwardMessage);\n this.observer.on(Events.ERROR, forwardMessage);\n\n const MediaSource = getMediaSource(config.preferManagedMediaSource) || {\n isTypeSupported: () => false,\n };\n const m2tsTypeSupported: TypeSupported = {\n mpeg: MediaSource.isTypeSupported('audio/mpeg'),\n mp3: MediaSource.isTypeSupported('audio/mp4; codecs=\"mp3\"'),\n ac3: __USE_M2TS_ADVANCED_CODECS__\n ? MediaSource.isTypeSupported('audio/mp4; codecs=\"ac-3\"')\n : false,\n };\n\n if (this.useWorker && typeof Worker !== 'undefined') {\n const canCreateWorker = config.workerPath || hasUMDWorker();\n if (canCreateWorker) {\n try {\n if (config.workerPath) {\n logger.log(`loading Web Worker ${config.workerPath} for \"${id}\"`);\n this.workerContext = loadWorker(config.workerPath);\n } else {\n logger.log(`injecting Web Worker for \"${id}\"`);\n this.workerContext = injectWorker();\n }\n this.onwmsg = (event) => this.onWorkerMessage(event);\n const { worker } = this.workerContext;\n worker.addEventListener('message', this.onwmsg);\n worker.onerror = (event) => {\n const error = new Error(\n `${event.message} (${event.filename}:${event.lineno})`,\n );\n config.enableWorker = false;\n logger.warn(`Error in \"${id}\" Web Worker, fallback to inline`);\n this.hls.trigger(Events.ERROR, {\n type: ErrorTypes.OTHER_ERROR,\n details: ErrorDetails.INTERNAL_EXCEPTION,\n fatal: false,\n event: 'demuxerWorker',\n error,\n });\n };\n worker.postMessage({\n cmd: 'init',\n typeSupported: m2tsTypeSupported,\n vendor: '',\n id: id,\n config: JSON.stringify(config),\n });\n } catch (err) {\n logger.warn(\n `Error setting up \"${id}\" Web Worker, fallback to inline`,\n err,\n );\n this.resetWorker();\n this.error = null;\n this.transmuxer = new Transmuxer(\n this.observer,\n m2tsTypeSupported,\n config,\n '',\n id,\n );\n }\n return;\n }\n }\n\n this.transmuxer = new Transmuxer(\n this.observer,\n m2tsTypeSupported,\n config,\n '',\n id,\n );\n }\n\n resetWorker() {\n if (this.workerContext) {\n const { worker, objectURL } = this.workerContext;\n if (objectURL) {\n // revoke the Object URL that was used to create transmuxer worker, so as not to leak it\n self.URL.revokeObjectURL(objectURL);\n }\n worker.removeEventListener('message', this.onwmsg as any);\n worker.onerror = null;\n worker.terminate();\n this.workerContext = null;\n }\n }\n\n destroy() {\n if (this.workerContext) {\n this.resetWorker();\n this.onwmsg = undefined;\n } else {\n const transmuxer = this.transmuxer;\n if (transmuxer) {\n transmuxer.destroy();\n this.transmuxer = null;\n }\n }\n const observer = this.observer;\n if (observer) {\n observer.removeAllListeners();\n }\n this.frag = null;\n // @ts-ignore\n this.observer = null;\n // @ts-ignore\n this.hls = null;\n }\n\n push(\n data: ArrayBuffer,\n initSegmentData: Uint8Array | undefined,\n audioCodec: string | undefined,\n videoCodec: string | undefined,\n frag: Fragment,\n part: Part | null,\n duration: number,\n accurateTimeOffset: boolean,\n chunkMeta: ChunkMetadata,\n defaultInitPTS?: RationalTimestamp,\n ) {\n chunkMeta.transmuxing.start = self.performance.now();\n const { transmuxer } = this;\n const timeOffset = part ? part.start : frag.start;\n // TODO: push \"clear-lead\" decrypt data for unencrypted fragments in streams with encrypted ones\n const decryptdata = frag.decryptdata;\n const lastFrag = this.frag;\n\n const discontinuity = !(lastFrag && frag.cc === lastFrag.cc);\n const trackSwitch = !(lastFrag && chunkMeta.level === lastFrag.level);\n const snDiff = lastFrag ? chunkMeta.sn - (lastFrag.sn as number) : -1;\n const partDiff = this.part ? chunkMeta.part - this.part.index : -1;\n const progressive =\n snDiff === 0 &&\n chunkMeta.id > 1 &&\n chunkMeta.id === lastFrag?.stats.chunkCount;\n const contiguous =\n !trackSwitch &&\n (snDiff === 1 ||\n (snDiff === 0 && (partDiff === 1 || (progressive && partDiff <= 0))));\n const now = self.performance.now();\n\n if (trackSwitch || snDiff || frag.stats.parsing.start === 0) {\n frag.stats.parsing.start = now;\n }\n if (part && (partDiff || !contiguous)) {\n part.stats.parsing.start = now;\n }\n const initSegmentChange = !(\n lastFrag && frag.initSegment?.url === lastFrag.initSegment?.url\n );\n const state = new TransmuxState(\n discontinuity,\n contiguous,\n accurateTimeOffset,\n trackSwitch,\n timeOffset,\n initSegmentChange,\n );\n if (!contiguous || discontinuity || initSegmentChange) {\n logger.log(`[transmuxer-interface, ${frag.type}]: Starting new transmux session for sn: ${chunkMeta.sn} p: ${chunkMeta.part} level: ${chunkMeta.level} id: ${chunkMeta.id}\n discontinuity: ${discontinuity}\n trackSwitch: ${trackSwitch}\n contiguous: ${contiguous}\n accurateTimeOffset: ${accurateTimeOffset}\n timeOffset: ${timeOffset}\n initSegmentChange: ${initSegmentChange}`);\n const config = new TransmuxConfig(\n audioCodec,\n videoCodec,\n initSegmentData,\n duration,\n defaultInitPTS,\n );\n this.configureTransmuxer(config);\n }\n\n this.frag = frag;\n this.part = part;\n\n // Frags with sn of 'initSegment' are not transmuxed\n if (this.workerContext) {\n // post fragment payload as transferable objects for ArrayBuffer (no copy)\n this.workerContext.worker.postMessage(\n {\n cmd: 'demux',\n data,\n decryptdata,\n chunkMeta,\n state,\n },\n data instanceof ArrayBuffer ? [data] : [],\n );\n } else if (transmuxer) {\n const transmuxResult = transmuxer.push(\n data,\n decryptdata,\n chunkMeta,\n state,\n );\n if (isPromise(transmuxResult)) {\n transmuxer.async = true;\n transmuxResult\n .then((data) => {\n this.handleTransmuxComplete(data);\n })\n .catch((error) => {\n this.transmuxerError(\n error,\n chunkMeta,\n 'transmuxer-interface push error',\n );\n });\n } else {\n transmuxer.async = false;\n this.handleTransmuxComplete(transmuxResult as TransmuxerResult);\n }\n }\n }\n\n flush(chunkMeta: ChunkMetadata) {\n chunkMeta.transmuxing.start = self.performance.now();\n const { transmuxer } = this;\n if (this.workerContext) {\n 1;\n this.workerContext.worker.postMessage({\n cmd: 'flush',\n chunkMeta,\n });\n } else if (transmuxer) {\n let transmuxResult = transmuxer.flush(chunkMeta);\n const asyncFlush = isPromise(transmuxResult);\n if (asyncFlush || transmuxer.async) {\n if (!isPromise(transmuxResult)) {\n transmuxResult = Promise.resolve(transmuxResult);\n }\n transmuxResult\n .then((data) => {\n this.handleFlushResult(data, chunkMeta);\n })\n .catch((error) => {\n this.transmuxerError(\n error,\n chunkMeta,\n 'transmuxer-interface flush error',\n );\n });\n } else {\n this.handleFlushResult(\n transmuxResult as Array,\n chunkMeta,\n );\n }\n }\n }\n\n private transmuxerError(\n error: Error,\n chunkMeta: ChunkMetadata,\n reason: string,\n ) {\n if (!this.hls) {\n return;\n }\n this.error = error;\n this.hls.trigger(Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.FRAG_PARSING_ERROR,\n chunkMeta,\n frag: this.frag || undefined,\n fatal: false,\n error,\n err: error,\n reason,\n });\n }\n\n private handleFlushResult(\n results: Array,\n chunkMeta: ChunkMetadata,\n ) {\n results.forEach((result) => {\n this.handleTransmuxComplete(result);\n });\n this.onFlush(chunkMeta);\n }\n\n private onWorkerMessage(\n event: MessageEvent<{ event: string; data?: any } | null>,\n ) {\n const data = event.data;\n if (!data?.event) {\n logger.warn(\n `worker message received with no ${data ? 'event name' : 'data'}`,\n );\n return;\n }\n const hls = this.hls;\n if (!this.hls) {\n return;\n }\n switch (data.event) {\n case 'init': {\n const objectURL = this.workerContext?.objectURL;\n if (objectURL) {\n // revoke the Object URL that was used to create transmuxer worker, so as not to leak it\n self.URL.revokeObjectURL(objectURL);\n }\n break;\n }\n\n case 'transmuxComplete': {\n this.handleTransmuxComplete(data.data);\n break;\n }\n\n case 'flush': {\n this.onFlush(data.data);\n break;\n }\n\n // pass logs from the worker thread to the main logger\n case 'workerLog':\n if (logger[data.data.logType]) {\n logger[data.data.logType](data.data.message);\n }\n break;\n\n default: {\n data.data = data.data || {};\n data.data.frag = this.frag;\n data.data.id = this.id;\n hls.trigger(data.event as keyof HlsListeners, data.data);\n break;\n }\n }\n }\n\n private configureTransmuxer(config: TransmuxConfig) {\n const { transmuxer } = this;\n if (this.workerContext) {\n this.workerContext.worker.postMessage({\n cmd: 'configure',\n config,\n });\n } else if (transmuxer) {\n transmuxer.configure(config);\n }\n }\n\n private handleTransmuxComplete(result: TransmuxerResult) {\n result.chunkMeta.transmuxing.end = self.performance.now();\n this.onTransmuxComplete(result);\n }\n}\n", "import BaseStreamController, { State } from './base-stream-controller';\nimport { Events } from '../events';\nimport { Bufferable, BufferHelper } from '../utils/buffer-helper';\nimport { FragmentState } from './fragment-tracker';\nimport { Level } from '../types/level';\nimport { PlaylistContextType, PlaylistLevelType } from '../types/loader';\nimport { Fragment, ElementaryStreamTypes, Part } from '../loader/fragment';\nimport ChunkCache from '../demux/chunk-cache';\nimport TransmuxerInterface from '../demux/transmuxer-interface';\nimport { ChunkMetadata } from '../types/transmuxer';\nimport { fragmentWithinToleranceTest } from './fragment-finders';\nimport { alignMediaPlaylistByPDT } from '../utils/discontinuities';\nimport { ErrorDetails } from '../errors';\nimport { audioMatchPredicate, matchesOption } from '../utils/rendition-helper';\nimport type { NetworkComponentAPI } from '../types/component-api';\nimport type Hls from '../hls';\nimport type { FragmentTracker } from './fragment-tracker';\nimport type KeyLoader from '../loader/key-loader';\nimport type { TransmuxerResult } from '../types/transmuxer';\nimport type { LevelDetails } from '../loader/level-details';\nimport type { TrackSet } from '../types/track';\nimport type {\n BufferCreatedData,\n AudioTracksUpdatedData,\n AudioTrackSwitchingData,\n LevelLoadedData,\n TrackLoadedData,\n BufferAppendingData,\n BufferFlushedData,\n InitPTSFoundData,\n FragLoadedData,\n FragParsingMetadataData,\n FragParsingUserdataData,\n FragBufferedData,\n ErrorData,\n BufferFlushingData,\n} from '../types/events';\nimport type { MediaPlaylist } from '../types/media-playlist';\n\nconst TICK_INTERVAL = 100; // how often to tick in ms\n\ntype WaitingForPTSData = {\n frag: Fragment;\n part: Part | null;\n cache: ChunkCache;\n complete: boolean;\n};\n\nclass AudioStreamController\n extends BaseStreamController\n implements NetworkComponentAPI\n{\n private videoBuffer: Bufferable | null = null;\n private videoTrackCC: number = -1;\n private waitingVideoCC: number = -1;\n private bufferedTrack: MediaPlaylist | null = null;\n private switchingTrack: MediaPlaylist | null = null;\n private trackId: number = -1;\n private waitingData: WaitingForPTSData | null = null;\n private mainDetails: LevelDetails | null = null;\n private flushing: boolean = false;\n private bufferFlushed: boolean = false;\n private cachedTrackLoadedData: TrackLoadedData | null = null;\n\n constructor(\n hls: Hls,\n fragmentTracker: FragmentTracker,\n keyLoader: KeyLoader,\n ) {\n super(\n hls,\n fragmentTracker,\n keyLoader,\n '[audio-stream-controller]',\n PlaylistLevelType.AUDIO,\n );\n this._registerListeners();\n }\n\n protected onHandlerDestroying() {\n this._unregisterListeners();\n super.onHandlerDestroying();\n this.mainDetails = null;\n this.bufferedTrack = null;\n this.switchingTrack = null;\n }\n\n private _registerListeners() {\n const { hls } = this;\n hls.on(Events.MEDIA_ATTACHED, this.onMediaAttached, this);\n hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.on(Events.LEVEL_LOADED, this.onLevelLoaded, this);\n hls.on(Events.AUDIO_TRACKS_UPDATED, this.onAudioTracksUpdated, this);\n hls.on(Events.AUDIO_TRACK_SWITCHING, this.onAudioTrackSwitching, this);\n hls.on(Events.AUDIO_TRACK_LOADED, this.onAudioTrackLoaded, this);\n hls.on(Events.ERROR, this.onError, this);\n hls.on(Events.BUFFER_RESET, this.onBufferReset, this);\n hls.on(Events.BUFFER_CREATED, this.onBufferCreated, this);\n hls.on(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);\n hls.on(Events.BUFFER_FLUSHED, this.onBufferFlushed, this);\n hls.on(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);\n hls.on(Events.FRAG_BUFFERED, this.onFragBuffered, this);\n }\n\n private _unregisterListeners() {\n const { hls } = this;\n hls.off(Events.MEDIA_ATTACHED, this.onMediaAttached, this);\n hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.off(Events.LEVEL_LOADED, this.onLevelLoaded, this);\n hls.off(Events.AUDIO_TRACKS_UPDATED, this.onAudioTracksUpdated, this);\n hls.off(Events.AUDIO_TRACK_SWITCHING, this.onAudioTrackSwitching, this);\n hls.off(Events.AUDIO_TRACK_LOADED, this.onAudioTrackLoaded, this);\n hls.off(Events.ERROR, this.onError, this);\n hls.off(Events.BUFFER_RESET, this.onBufferReset, this);\n hls.off(Events.BUFFER_CREATED, this.onBufferCreated, this);\n hls.off(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);\n hls.off(Events.BUFFER_FLUSHED, this.onBufferFlushed, this);\n hls.off(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);\n hls.off(Events.FRAG_BUFFERED, this.onFragBuffered, this);\n }\n\n // INIT_PTS_FOUND is triggered when the video track parsed in the stream-controller has a new PTS value\n onInitPtsFound(\n event: Events.INIT_PTS_FOUND,\n { frag, id, initPTS, timescale }: InitPTSFoundData,\n ) {\n // Always update the new INIT PTS\n // Can change due level switch\n if (id === 'main') {\n const cc = frag.cc;\n this.initPTS[frag.cc] = { baseTime: initPTS, timescale };\n this.log(`InitPTS for cc: ${cc} found from main: ${initPTS}`);\n this.videoTrackCC = cc;\n // If we are waiting, tick immediately to unblock audio fragment transmuxing\n if (this.state === State.WAITING_INIT_PTS) {\n this.tick();\n }\n }\n }\n\n startLoad(startPosition: number) {\n if (!this.levels) {\n this.startPosition = startPosition;\n this.state = State.STOPPED;\n return;\n }\n const lastCurrentTime = this.lastCurrentTime;\n this.stopLoad();\n this.setInterval(TICK_INTERVAL);\n if (lastCurrentTime > 0 && startPosition === -1) {\n this.log(\n `Override startPosition with lastCurrentTime @${lastCurrentTime.toFixed(\n 3,\n )}`,\n );\n startPosition = lastCurrentTime;\n this.state = State.IDLE;\n } else {\n this.loadedmetadata = false;\n this.state = State.WAITING_TRACK;\n }\n this.nextLoadPosition =\n this.startPosition =\n this.lastCurrentTime =\n startPosition;\n\n this.tick();\n }\n\n doTick() {\n switch (this.state) {\n case State.IDLE:\n this.doTickIdle();\n break;\n case State.WAITING_TRACK: {\n const { levels, trackId } = this;\n const details = levels?.[trackId]?.details;\n if (details) {\n if (this.waitForCdnTuneIn(details)) {\n break;\n }\n this.state = State.WAITING_INIT_PTS;\n }\n break;\n }\n case State.FRAG_LOADING_WAITING_RETRY: {\n const now = performance.now();\n const retryDate = this.retryDate;\n // if current time is gt than retryDate, or if media seeking let's switch to IDLE state to retry loading\n if (!retryDate || now >= retryDate || this.media?.seeking) {\n const { levels, trackId } = this;\n this.log('RetryDate reached, switch back to IDLE state');\n this.resetStartWhenNotLoaded(levels?.[trackId] || null);\n this.state = State.IDLE;\n }\n break;\n }\n case State.WAITING_INIT_PTS: {\n // Ensure we don't get stuck in the WAITING_INIT_PTS state if the waiting frag CC doesn't match any initPTS\n const waitingData = this.waitingData;\n if (waitingData) {\n const { frag, part, cache, complete } = waitingData;\n if (this.initPTS[frag.cc] !== undefined) {\n this.waitingData = null;\n this.waitingVideoCC = -1;\n this.state = State.FRAG_LOADING;\n const payload = cache.flush();\n const data: FragLoadedData = {\n frag,\n part,\n payload,\n networkDetails: null,\n };\n this._handleFragmentLoadProgress(data);\n if (complete) {\n super._handleFragmentLoadComplete(data);\n }\n } else if (this.videoTrackCC !== this.waitingVideoCC) {\n // Drop waiting fragment if videoTrackCC has changed since waitingFragment was set and initPTS was not found\n this.log(\n `Waiting fragment cc (${frag.cc}) cancelled because video is at cc ${this.videoTrackCC}`,\n );\n this.clearWaitingFragment();\n } else {\n // Drop waiting fragment if an earlier fragment is needed\n const pos = this.getLoadPosition();\n const bufferInfo = BufferHelper.bufferInfo(\n this.mediaBuffer,\n pos,\n this.config.maxBufferHole,\n );\n const waitingFragmentAtPosition = fragmentWithinToleranceTest(\n bufferInfo.end,\n this.config.maxFragLookUpTolerance,\n frag,\n );\n if (waitingFragmentAtPosition < 0) {\n this.log(\n `Waiting fragment cc (${frag.cc}) @ ${frag.start} cancelled because another fragment at ${bufferInfo.end} is needed`,\n );\n this.clearWaitingFragment();\n }\n }\n } else {\n this.state = State.IDLE;\n }\n }\n }\n\n this.onTickEnd();\n }\n\n clearWaitingFragment() {\n const waitingData = this.waitingData;\n if (waitingData) {\n this.fragmentTracker.removeFragment(waitingData.frag);\n this.waitingData = null;\n this.waitingVideoCC = -1;\n this.state = State.IDLE;\n }\n }\n\n protected resetLoadingState() {\n this.clearWaitingFragment();\n super.resetLoadingState();\n }\n\n protected onTickEnd() {\n const { media } = this;\n if (!media?.readyState) {\n // Exit early if we don't have media or if the media hasn't buffered anything yet (readyState 0)\n return;\n }\n\n this.lastCurrentTime = media.currentTime;\n }\n\n private doTickIdle() {\n const { hls, levels, media, trackId } = this;\n const config = hls.config;\n\n // 1. if buffering is suspended\n // 2. if video not attached AND\n // start fragment already requested OR start frag prefetch not enabled\n // 3. if tracks or track not loaded and selected\n // then exit loop\n // => if media not attached but start frag prefetch is enabled and start frag not requested yet, we will not exit loop\n if (\n !this.buffering ||\n (!media && (this.startFragRequested || !config.startFragPrefetch)) ||\n !levels?.[trackId]\n ) {\n return;\n }\n\n const levelInfo = levels[trackId];\n\n const trackDetails = levelInfo.details;\n if (\n !trackDetails ||\n (trackDetails.live && this.levelLastLoaded !== levelInfo) ||\n this.waitForCdnTuneIn(trackDetails)\n ) {\n this.state = State.WAITING_TRACK;\n return;\n }\n\n const bufferable = this.mediaBuffer ? this.mediaBuffer : this.media;\n if (this.bufferFlushed && bufferable) {\n this.bufferFlushed = false;\n this.afterBufferFlushed(\n bufferable,\n ElementaryStreamTypes.AUDIO,\n PlaylistLevelType.AUDIO,\n );\n }\n\n const bufferInfo = this.getFwdBufferInfo(\n bufferable,\n PlaylistLevelType.AUDIO,\n );\n if (bufferInfo === null) {\n return;\n }\n\n if (!this.switchingTrack && this._streamEnded(bufferInfo, trackDetails)) {\n hls.trigger(Events.BUFFER_EOS, { type: 'audio' });\n this.state = State.ENDED;\n return;\n }\n\n const mainBufferInfo = this.getFwdBufferInfo(\n this.videoBuffer ? this.videoBuffer : this.media,\n PlaylistLevelType.MAIN,\n );\n const bufferLen = bufferInfo.len;\n const maxBufLen = this.getMaxBufferLength(mainBufferInfo?.len);\n\n const fragments = trackDetails.fragments;\n const start = fragments[0].start;\n const loadPosition = this.getLoadPosition();\n const targetBufferTime = this.flushing ? loadPosition : bufferInfo.end;\n\n if (this.switchingTrack && media) {\n const pos = loadPosition;\n // if currentTime (pos) is less than alt audio playlist start time, it means that alt audio is ahead of currentTime\n if (trackDetails.PTSKnown && pos < start) {\n // if everything is buffered from pos to start or if audio buffer upfront, let's seek to start\n if (bufferInfo.end > start || bufferInfo.nextStart) {\n this.log(\n 'Alt audio track ahead of main track, seek to start of alt audio track',\n );\n media.currentTime = start + 0.05;\n }\n }\n }\n\n // if buffer length is less than maxBufLen, or near the end, find a fragment to load\n if (\n bufferLen >= maxBufLen &&\n !this.switchingTrack &&\n targetBufferTime < fragments[fragments.length - 1].start\n ) {\n return;\n }\n\n let frag = this.getNextFragment(targetBufferTime, trackDetails);\n let atGap = false;\n // Avoid loop loading by using nextLoadPosition set for backtracking and skipping consecutive GAP tags\n if (frag && this.isLoopLoading(frag, targetBufferTime)) {\n atGap = !!frag.gap;\n frag = this.getNextFragmentLoopLoading(\n frag,\n trackDetails,\n bufferInfo,\n PlaylistLevelType.MAIN,\n maxBufLen,\n );\n }\n if (!frag) {\n this.bufferFlushed = true;\n return;\n }\n\n // Buffer audio up to one target duration ahead of main buffer\n const atBufferSyncLimit =\n mainBufferInfo &&\n frag.start > mainBufferInfo.end + trackDetails.targetduration;\n if (\n atBufferSyncLimit ||\n // Or wait for main buffer after buffing some audio\n (!mainBufferInfo?.len && bufferInfo.len)\n ) {\n // Check fragment-tracker for main fragments since GAP segments do not show up in bufferInfo\n const mainFrag = this.getAppendedFrag(frag.start, PlaylistLevelType.MAIN);\n if (mainFrag === null) {\n return;\n }\n // Bridge gaps in main buffer\n atGap ||=\n !!mainFrag.gap || (!!atBufferSyncLimit && mainBufferInfo.len === 0);\n if (\n (atBufferSyncLimit && !atGap) ||\n (atGap && bufferInfo.nextStart && bufferInfo.nextStart < mainFrag.end)\n ) {\n return;\n }\n }\n\n this.loadFragment(frag, levelInfo, targetBufferTime);\n }\n\n protected getMaxBufferLength(mainBufferLength?: number): number {\n const maxConfigBuffer = super.getMaxBufferLength();\n if (!mainBufferLength) {\n return maxConfigBuffer;\n }\n return Math.min(\n Math.max(maxConfigBuffer, mainBufferLength),\n this.config.maxMaxBufferLength,\n );\n }\n\n onMediaDetaching() {\n this.videoBuffer = null;\n this.bufferFlushed = this.flushing = false;\n super.onMediaDetaching();\n }\n\n onAudioTracksUpdated(\n event: Events.AUDIO_TRACKS_UPDATED,\n { audioTracks }: AudioTracksUpdatedData,\n ) {\n // Reset tranxmuxer is essential for large context switches (Content Steering)\n this.resetTransmuxer();\n this.levels = audioTracks.map((mediaPlaylist) => new Level(mediaPlaylist));\n }\n\n onAudioTrackSwitching(\n event: Events.AUDIO_TRACK_SWITCHING,\n data: AudioTrackSwitchingData,\n ) {\n // if any URL found on new audio track, it is an alternate audio track\n const altAudio = !!data.url;\n this.trackId = data.id;\n const { fragCurrent } = this;\n\n if (fragCurrent) {\n fragCurrent.abortRequests();\n this.removeUnbufferedFrags(fragCurrent.start);\n }\n this.resetLoadingState();\n // destroy useless transmuxer when switching audio to main\n if (!altAudio) {\n this.resetTransmuxer();\n } else {\n // switching to audio track, start timer if not already started\n this.setInterval(TICK_INTERVAL);\n }\n\n // should we switch tracks ?\n if (altAudio) {\n this.switchingTrack = data;\n // main audio track are handled by stream-controller, just do something if switching to alt audio track\n this.state = State.IDLE;\n this.flushAudioIfNeeded(data);\n } else {\n this.switchingTrack = null;\n this.bufferedTrack = data;\n this.state = State.STOPPED;\n }\n this.tick();\n }\n\n onManifestLoading() {\n this.fragmentTracker.removeAllFragments();\n this.startPosition = this.lastCurrentTime = 0;\n this.bufferFlushed = this.flushing = false;\n this.levels =\n this.mainDetails =\n this.waitingData =\n this.bufferedTrack =\n this.cachedTrackLoadedData =\n this.switchingTrack =\n null;\n this.startFragRequested = false;\n this.trackId = this.videoTrackCC = this.waitingVideoCC = -1;\n }\n\n onLevelLoaded(event: Events.LEVEL_LOADED, data: LevelLoadedData) {\n this.mainDetails = data.details;\n if (this.cachedTrackLoadedData !== null) {\n this.hls.trigger(Events.AUDIO_TRACK_LOADED, this.cachedTrackLoadedData);\n this.cachedTrackLoadedData = null;\n }\n }\n\n onAudioTrackLoaded(event: Events.AUDIO_TRACK_LOADED, data: TrackLoadedData) {\n if (this.mainDetails == null) {\n this.cachedTrackLoadedData = data;\n return;\n }\n const { levels } = this;\n const { details: newDetails, id: trackId } = data;\n if (!levels) {\n this.warn(`Audio tracks were reset while loading level ${trackId}`);\n return;\n }\n this.log(\n `Audio track ${trackId} loaded [${newDetails.startSN},${\n newDetails.endSN\n }]${\n newDetails.lastPartSn\n ? `[part-${newDetails.lastPartSn}-${newDetails.lastPartIndex}]`\n : ''\n },duration:${newDetails.totalduration}`,\n );\n\n const track = levels[trackId];\n let sliding = 0;\n if (newDetails.live || track.details?.live) {\n this.checkLiveUpdate(newDetails);\n const mainDetails = this.mainDetails;\n if (newDetails.deltaUpdateFailed || !mainDetails) {\n return;\n }\n if (\n !track.details &&\n newDetails.hasProgramDateTime &&\n mainDetails.hasProgramDateTime\n ) {\n // Make sure our audio rendition is aligned with the \"main\" rendition, using\n // pdt as our reference times.\n alignMediaPlaylistByPDT(newDetails, mainDetails);\n sliding = newDetails.fragments[0].start;\n } else {\n sliding = this.alignPlaylists(\n newDetails,\n track.details,\n this.levelLastLoaded?.details,\n );\n }\n }\n track.details = newDetails;\n this.levelLastLoaded = track;\n\n // compute start position if we are aligned with the main playlist\n if (!this.startFragRequested && (this.mainDetails || !newDetails.live)) {\n this.setStartPosition(this.mainDetails || newDetails, sliding);\n }\n // only switch back to IDLE state if we were waiting for track to start downloading a new fragment\n if (\n this.state === State.WAITING_TRACK &&\n !this.waitForCdnTuneIn(newDetails)\n ) {\n this.state = State.IDLE;\n }\n\n // trigger handler right now\n this.tick();\n }\n\n _handleFragmentLoadProgress(data: FragLoadedData) {\n const { frag, part, payload } = data;\n const { config, trackId, levels } = this;\n if (!levels) {\n this.warn(\n `Audio tracks were reset while fragment load was in progress. Fragment ${frag.sn} of level ${frag.level} will not be buffered`,\n );\n return;\n }\n\n const track = levels[trackId] as Level;\n if (!track) {\n this.warn('Audio track is undefined on fragment load progress');\n return;\n }\n const details = track.details as LevelDetails;\n if (!details) {\n this.warn('Audio track details undefined on fragment load progress');\n this.removeUnbufferedFrags(frag.start);\n return;\n }\n const audioCodec =\n config.defaultAudioCodec || track.audioCodec || 'mp4a.40.2';\n\n let transmuxer = this.transmuxer;\n if (!transmuxer) {\n transmuxer = this.transmuxer = new TransmuxerInterface(\n this.hls,\n PlaylistLevelType.AUDIO,\n this._handleTransmuxComplete.bind(this),\n this._handleTransmuxerFlush.bind(this),\n );\n }\n\n // Check if we have video initPTS\n // If not we need to wait for it\n const initPTS = this.initPTS[frag.cc];\n const initSegmentData = frag.initSegment?.data;\n if (initPTS !== undefined) {\n // this.log(`Transmuxing ${sn} of [${details.startSN} ,${details.endSN}],track ${trackId}`);\n // time Offset is accurate if level PTS is known, or if playlist is not sliding (not live)\n const accurateTimeOffset = false; // details.PTSKnown || !details.live;\n const partIndex = part ? part.index : -1;\n const partial = partIndex !== -1;\n const chunkMeta = new ChunkMetadata(\n frag.level,\n frag.sn as number,\n frag.stats.chunkCount,\n payload.byteLength,\n partIndex,\n partial,\n );\n transmuxer.push(\n payload,\n initSegmentData,\n audioCodec,\n '',\n frag,\n part,\n details.totalduration,\n accurateTimeOffset,\n chunkMeta,\n initPTS,\n );\n } else {\n this.log(\n `Unknown video PTS for cc ${frag.cc}, waiting for video PTS before demuxing audio frag ${frag.sn} of [${details.startSN} ,${details.endSN}],track ${trackId}`,\n );\n const { cache } = (this.waitingData = this.waitingData || {\n frag,\n part,\n cache: new ChunkCache(),\n complete: false,\n });\n cache.push(new Uint8Array(payload));\n this.waitingVideoCC = this.videoTrackCC;\n this.state = State.WAITING_INIT_PTS;\n }\n }\n\n protected _handleFragmentLoadComplete(fragLoadedData: FragLoadedData) {\n if (this.waitingData) {\n this.waitingData.complete = true;\n return;\n }\n super._handleFragmentLoadComplete(fragLoadedData);\n }\n\n onBufferReset(/* event: Events.BUFFER_RESET */) {\n // reset reference to sourcebuffers\n this.mediaBuffer = this.videoBuffer = null;\n this.loadedmetadata = false;\n }\n\n onBufferCreated(event: Events.BUFFER_CREATED, data: BufferCreatedData) {\n const audioTrack = data.tracks.audio;\n if (audioTrack) {\n this.mediaBuffer = audioTrack.buffer || null;\n }\n if (data.tracks.video) {\n this.videoBuffer = data.tracks.video.buffer || null;\n }\n }\n\n onFragBuffered(event: Events.FRAG_BUFFERED, data: FragBufferedData) {\n const { frag, part } = data;\n if (frag.type !== PlaylistLevelType.AUDIO) {\n if (!this.loadedmetadata && frag.type === PlaylistLevelType.MAIN) {\n const bufferable = this.videoBuffer || this.media;\n if (bufferable) {\n const bufferedTimeRanges = BufferHelper.getBuffered(bufferable);\n if (bufferedTimeRanges.length) {\n this.loadedmetadata = true;\n }\n }\n }\n return;\n }\n if (this.fragContextChanged(frag)) {\n // If a level switch was requested while a fragment was buffering, it will emit the FRAG_BUFFERED event upon completion\n // Avoid setting state back to IDLE or concluding the audio switch; otherwise, the switched-to track will not buffer\n this.warn(\n `Fragment ${frag.sn}${part ? ' p: ' + part.index : ''} of level ${\n frag.level\n } finished buffering, but was aborted. state: ${\n this.state\n }, audioSwitch: ${\n this.switchingTrack ? this.switchingTrack.name : 'false'\n }`,\n );\n return;\n }\n if (frag.sn !== 'initSegment') {\n this.fragPrevious = frag;\n const track = this.switchingTrack;\n if (track) {\n this.bufferedTrack = track;\n this.switchingTrack = null;\n this.hls.trigger(Events.AUDIO_TRACK_SWITCHED, { ...track });\n }\n }\n this.fragBufferedComplete(frag, part);\n }\n\n private onError(event: Events.ERROR, data: ErrorData) {\n if (data.fatal) {\n this.state = State.ERROR;\n return;\n }\n switch (data.details) {\n case ErrorDetails.FRAG_GAP:\n case ErrorDetails.FRAG_PARSING_ERROR:\n case ErrorDetails.FRAG_DECRYPT_ERROR:\n case ErrorDetails.FRAG_LOAD_ERROR:\n case ErrorDetails.FRAG_LOAD_TIMEOUT:\n case ErrorDetails.KEY_LOAD_ERROR:\n case ErrorDetails.KEY_LOAD_TIMEOUT:\n this.onFragmentOrKeyLoadError(PlaylistLevelType.AUDIO, data);\n break;\n case ErrorDetails.AUDIO_TRACK_LOAD_ERROR:\n case ErrorDetails.AUDIO_TRACK_LOAD_TIMEOUT:\n case ErrorDetails.LEVEL_PARSING_ERROR:\n // in case of non fatal error while loading track, if not retrying to load track, switch back to IDLE\n if (\n !data.levelRetry &&\n this.state === State.WAITING_TRACK &&\n data.context?.type === PlaylistContextType.AUDIO_TRACK\n ) {\n this.state = State.IDLE;\n }\n break;\n case ErrorDetails.BUFFER_APPEND_ERROR:\n case ErrorDetails.BUFFER_FULL_ERROR:\n if (!data.parent || data.parent !== 'audio') {\n return;\n }\n if (data.details === ErrorDetails.BUFFER_APPEND_ERROR) {\n this.resetLoadingState();\n return;\n }\n if (this.reduceLengthAndFlushBuffer(data)) {\n this.bufferedTrack = null;\n super.flushMainBuffer(0, Number.POSITIVE_INFINITY, 'audio');\n }\n break;\n case ErrorDetails.INTERNAL_EXCEPTION:\n this.recoverWorkerError(data);\n break;\n default:\n break;\n }\n }\n\n private onBufferFlushing(\n event: Events.BUFFER_FLUSHING,\n { type }: BufferFlushingData,\n ) {\n if (type !== ElementaryStreamTypes.VIDEO) {\n this.flushing = true;\n }\n }\n\n private onBufferFlushed(\n event: Events.BUFFER_FLUSHED,\n { type }: BufferFlushedData,\n ) {\n if (type !== ElementaryStreamTypes.VIDEO) {\n this.flushing = false;\n this.bufferFlushed = true;\n if (this.state === State.ENDED) {\n this.state = State.IDLE;\n }\n const mediaBuffer = this.mediaBuffer || this.media;\n if (mediaBuffer) {\n this.afterBufferFlushed(mediaBuffer, type, PlaylistLevelType.AUDIO);\n this.tick();\n }\n }\n }\n\n private _handleTransmuxComplete(transmuxResult: TransmuxerResult) {\n const id = 'audio';\n const { hls } = this;\n const { remuxResult, chunkMeta } = transmuxResult;\n\n const context = this.getCurrentContext(chunkMeta);\n if (!context) {\n this.resetWhenMissingContext(chunkMeta);\n return;\n }\n const { frag, part, level } = context;\n const { details } = level;\n const { audio, text, id3, initSegment } = remuxResult;\n\n // Check if the current fragment has been aborted. We check this by first seeing if we're still playing the current level.\n // If we are, subsequently check if the currently loading fragment (fragCurrent) has changed.\n if (this.fragContextChanged(frag) || !details) {\n this.fragmentTracker.removeFragment(frag);\n return;\n }\n\n this.state = State.PARSING;\n if (this.switchingTrack && audio) {\n this.completeAudioSwitch(this.switchingTrack);\n }\n\n if (initSegment?.tracks) {\n const mapFragment = frag.initSegment || frag;\n this._bufferInitSegment(\n level,\n initSegment.tracks,\n mapFragment,\n chunkMeta,\n );\n hls.trigger(Events.FRAG_PARSING_INIT_SEGMENT, {\n frag: mapFragment,\n id,\n tracks: initSegment.tracks,\n });\n // Only flush audio from old audio tracks when PTS is known on new audio track\n }\n if (audio) {\n const { startPTS, endPTS, startDTS, endDTS } = audio;\n if (part) {\n part.elementaryStreams[ElementaryStreamTypes.AUDIO] = {\n startPTS,\n endPTS,\n startDTS,\n endDTS,\n };\n }\n frag.setElementaryStreamInfo(\n ElementaryStreamTypes.AUDIO,\n startPTS,\n endPTS,\n startDTS,\n endDTS,\n );\n this.bufferFragmentData(audio, frag, part, chunkMeta);\n }\n\n if (id3?.samples?.length) {\n const emittedID3: FragParsingMetadataData = Object.assign(\n {\n id,\n frag,\n details,\n },\n id3,\n );\n hls.trigger(Events.FRAG_PARSING_METADATA, emittedID3);\n }\n if (text) {\n const emittedText: FragParsingUserdataData = Object.assign(\n {\n id,\n frag,\n details,\n },\n text,\n );\n hls.trigger(Events.FRAG_PARSING_USERDATA, emittedText);\n }\n }\n\n private _bufferInitSegment(\n currentLevel: Level,\n tracks: TrackSet,\n frag: Fragment,\n chunkMeta: ChunkMetadata,\n ) {\n if (this.state !== State.PARSING) {\n return;\n }\n // delete any video track found on audio transmuxer\n if (tracks.video) {\n delete tracks.video;\n }\n\n // include levelCodec in audio and video tracks\n const track = tracks.audio;\n if (!track) {\n return;\n }\n\n track.id = 'audio';\n\n const variantAudioCodecs = currentLevel.audioCodec;\n this.log(\n `Init audio buffer, container:${track.container}, codecs[level/parsed]=[${variantAudioCodecs}/${track.codec}]`,\n );\n // SourceBuffer will use track.levelCodec if defined\n if (variantAudioCodecs && variantAudioCodecs.split(',').length === 1) {\n track.levelCodec = variantAudioCodecs;\n }\n this.hls.trigger(Events.BUFFER_CODECS, tracks);\n const initSegment = track.initSegment;\n if (initSegment?.byteLength) {\n const segment: BufferAppendingData = {\n type: 'audio',\n frag,\n part: null,\n chunkMeta,\n parent: frag.type,\n data: initSegment,\n };\n this.hls.trigger(Events.BUFFER_APPENDING, segment);\n }\n // trigger handler right now\n this.tickImmediate();\n }\n\n protected loadFragment(\n frag: Fragment,\n track: Level,\n targetBufferTime: number,\n ) {\n // only load if fragment is not loaded or if in audio switch\n const fragState = this.fragmentTracker.getState(frag);\n this.fragCurrent = frag;\n\n // we force a frag loading in audio switch as fragment tracker might not have evicted previous frags in case of quick audio switch\n if (\n this.switchingTrack ||\n fragState === FragmentState.NOT_LOADED ||\n fragState === FragmentState.PARTIAL\n ) {\n if (frag.sn === 'initSegment') {\n this._loadInitSegment(frag, track);\n } else if (track.details?.live && !this.initPTS[frag.cc]) {\n this.log(\n `Waiting for video PTS in continuity counter ${frag.cc} of live stream before loading audio fragment ${frag.sn} of level ${this.trackId}`,\n );\n this.state = State.WAITING_INIT_PTS;\n const mainDetails = this.mainDetails;\n if (\n mainDetails &&\n mainDetails.fragments[0].start !== track.details.fragments[0].start\n ) {\n alignMediaPlaylistByPDT(track.details, mainDetails);\n }\n } else {\n this.startFragRequested = true;\n super.loadFragment(frag, track, targetBufferTime);\n }\n } else {\n this.clearTrackerIfNeeded(frag);\n }\n }\n\n private flushAudioIfNeeded(switchingTrack: MediaPlaylist) {\n if (this.media && this.bufferedTrack) {\n const { name, lang, assocLang, characteristics, audioCodec, channels } =\n this.bufferedTrack;\n if (\n !matchesOption(\n { name, lang, assocLang, characteristics, audioCodec, channels },\n switchingTrack,\n audioMatchPredicate,\n )\n ) {\n this.log('Switching audio track : flushing all audio');\n super.flushMainBuffer(0, Number.POSITIVE_INFINITY, 'audio');\n this.bufferedTrack = null;\n }\n }\n }\n\n private completeAudioSwitch(switchingTrack: MediaPlaylist) {\n const { hls } = this;\n this.flushAudioIfNeeded(switchingTrack);\n this.bufferedTrack = switchingTrack;\n this.switchingTrack = null;\n hls.trigger(Events.AUDIO_TRACK_SWITCHED, { ...switchingTrack });\n }\n}\nexport default AudioStreamController;\n", "import type { Level } from '../types/level';\nimport type { MediaAttributes, MediaPlaylist } from '../types/media-playlist';\n\nexport function subtitleOptionsIdentical(\n trackList1: MediaPlaylist[] | Level[],\n trackList2: MediaPlaylist[],\n): boolean {\n if (trackList1.length !== trackList2.length) {\n return false;\n }\n for (let i = 0; i < trackList1.length; i++) {\n if (\n !mediaAttributesIdentical(\n trackList1[i].attrs as MediaAttributes,\n trackList2[i].attrs,\n )\n ) {\n return false;\n }\n }\n return true;\n}\n\nexport function mediaAttributesIdentical(\n attrs1: MediaAttributes,\n attrs2: MediaAttributes,\n customAttributes?: string[],\n): boolean {\n // Media options with the same rendition ID must be bit identical\n const stableRenditionId = attrs1['STABLE-RENDITION-ID'];\n if (stableRenditionId && !customAttributes) {\n return stableRenditionId === attrs2['STABLE-RENDITION-ID'];\n }\n // When rendition ID is not present, compare attributes\n return !(\n customAttributes || [\n 'LANGUAGE',\n 'NAME',\n 'CHARACTERISTICS',\n 'AUTOSELECT',\n 'DEFAULT',\n 'FORCED',\n 'ASSOC-LANGUAGE',\n ]\n ).some(\n (subtitleAttribute) =>\n attrs1[subtitleAttribute] !== attrs2[subtitleAttribute],\n );\n}\n\nexport function subtitleTrackMatchesTextTrack(\n subtitleTrack: Pick,\n textTrack: TextTrack,\n) {\n return (\n textTrack.label.toLowerCase() === subtitleTrack.name.toLowerCase() &&\n (!textTrack.language ||\n textTrack.language.toLowerCase() ===\n (subtitleTrack.lang || '').toLowerCase())\n );\n}\n", "import BasePlaylistController from './base-playlist-controller';\nimport { Events } from '../events';\nimport { ErrorTypes, ErrorDetails } from '../errors';\nimport { PlaylistContextType } from '../types/loader';\nimport { mediaAttributesIdentical } from '../utils/media-option-attributes';\nimport {\n audioMatchPredicate,\n findClosestLevelWithAudioGroup,\n findMatchingOption,\n matchesOption,\n} from '../utils/rendition-helper';\nimport type Hls from '../hls';\nimport type {\n AudioSelectionOption,\n MediaPlaylist,\n} from '../types/media-playlist';\nimport type { HlsUrlParameters } from '../types/level';\nimport type {\n ManifestParsedData,\n AudioTracksUpdatedData,\n ErrorData,\n LevelLoadingData,\n AudioTrackLoadedData,\n LevelSwitchingData,\n} from '../types/events';\n\nclass AudioTrackController extends BasePlaylistController {\n private tracks: MediaPlaylist[] = [];\n private groupIds: (string | undefined)[] | null = null;\n private tracksInGroup: MediaPlaylist[] = [];\n private trackId: number = -1;\n private currentTrack: MediaPlaylist | null = null;\n private selectDefaultTrack: boolean = true;\n\n constructor(hls: Hls) {\n super(hls, '[audio-track-controller]');\n this.registerListeners();\n }\n\n private registerListeners() {\n const { hls } = this;\n hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.on(Events.MANIFEST_PARSED, this.onManifestParsed, this);\n hls.on(Events.LEVEL_LOADING, this.onLevelLoading, this);\n hls.on(Events.LEVEL_SWITCHING, this.onLevelSwitching, this);\n hls.on(Events.AUDIO_TRACK_LOADED, this.onAudioTrackLoaded, this);\n hls.on(Events.ERROR, this.onError, this);\n }\n\n private unregisterListeners() {\n const { hls } = this;\n hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.off(Events.MANIFEST_PARSED, this.onManifestParsed, this);\n hls.off(Events.LEVEL_LOADING, this.onLevelLoading, this);\n hls.off(Events.LEVEL_SWITCHING, this.onLevelSwitching, this);\n hls.off(Events.AUDIO_TRACK_LOADED, this.onAudioTrackLoaded, this);\n hls.off(Events.ERROR, this.onError, this);\n }\n\n public destroy() {\n this.unregisterListeners();\n this.tracks.length = 0;\n this.tracksInGroup.length = 0;\n this.currentTrack = null;\n super.destroy();\n }\n\n protected onManifestLoading(): void {\n this.tracks = [];\n this.tracksInGroup = [];\n this.groupIds = null;\n this.currentTrack = null;\n this.trackId = -1;\n this.selectDefaultTrack = true;\n }\n\n protected onManifestParsed(\n event: Events.MANIFEST_PARSED,\n data: ManifestParsedData,\n ): void {\n this.tracks = data.audioTracks || [];\n }\n\n protected onAudioTrackLoaded(\n event: Events.AUDIO_TRACK_LOADED,\n data: AudioTrackLoadedData,\n ): void {\n const { id, groupId, details } = data;\n const trackInActiveGroup = this.tracksInGroup[id];\n\n if (!trackInActiveGroup || trackInActiveGroup.groupId !== groupId) {\n this.warn(\n `Audio track with id:${id} and group:${groupId} not found in active group ${trackInActiveGroup?.groupId}`,\n );\n return;\n }\n\n const curDetails = trackInActiveGroup.details;\n trackInActiveGroup.details = data.details;\n this.log(\n `Audio track ${id} \"${trackInActiveGroup.name}\" lang:${trackInActiveGroup.lang} group:${groupId} loaded [${details.startSN}-${details.endSN}]`,\n );\n\n if (id === this.trackId) {\n this.playlistLoaded(id, data, curDetails);\n }\n }\n\n protected onLevelLoading(\n event: Events.LEVEL_LOADING,\n data: LevelLoadingData,\n ): void {\n this.switchLevel(data.level);\n }\n\n protected onLevelSwitching(\n event: Events.LEVEL_SWITCHING,\n data: LevelSwitchingData,\n ): void {\n this.switchLevel(data.level);\n }\n\n private switchLevel(levelIndex: number) {\n const levelInfo = this.hls.levels[levelIndex];\n if (!levelInfo) {\n return;\n }\n const audioGroups = levelInfo.audioGroups || null;\n const currentGroups = this.groupIds;\n let currentTrack = this.currentTrack;\n if (\n !audioGroups ||\n currentGroups?.length !== audioGroups?.length ||\n audioGroups?.some((groupId) => currentGroups?.indexOf(groupId) === -1)\n ) {\n this.groupIds = audioGroups;\n this.trackId = -1;\n this.currentTrack = null;\n\n const audioTracks = this.tracks.filter(\n (track): boolean =>\n !audioGroups || audioGroups.indexOf(track.groupId) !== -1,\n );\n if (audioTracks.length) {\n // Disable selectDefaultTrack if there are no default tracks\n if (\n this.selectDefaultTrack &&\n !audioTracks.some((track) => track.default)\n ) {\n this.selectDefaultTrack = false;\n }\n // track.id should match hls.audioTracks index\n audioTracks.forEach((track, i) => {\n track.id = i;\n });\n } else if (!currentTrack && !this.tracksInGroup.length) {\n // Do not dispatch AUDIO_TRACKS_UPDATED when there were and are no tracks\n return;\n }\n this.tracksInGroup = audioTracks;\n\n // Find preferred track\n const audioPreference = this.hls.config.audioPreference;\n if (!currentTrack && audioPreference) {\n const groupIndex = findMatchingOption(\n audioPreference,\n audioTracks,\n audioMatchPredicate,\n );\n if (groupIndex > -1) {\n currentTrack = audioTracks[groupIndex];\n } else {\n const allIndex = findMatchingOption(audioPreference, this.tracks);\n currentTrack = this.tracks[allIndex];\n }\n }\n\n // Select initial track\n let trackId = this.findTrackId(currentTrack);\n if (trackId === -1 && currentTrack) {\n trackId = this.findTrackId(null);\n }\n\n // Dispatch events and load track if needed\n const audioTracksUpdated: AudioTracksUpdatedData = { audioTracks };\n this.log(\n `Updating audio tracks, ${\n audioTracks.length\n } track(s) found in group(s): ${audioGroups?.join(',')}`,\n );\n this.hls.trigger(Events.AUDIO_TRACKS_UPDATED, audioTracksUpdated);\n\n const selectedTrackId = this.trackId;\n if (trackId !== -1 && selectedTrackId === -1) {\n this.setAudioTrack(trackId);\n } else if (audioTracks.length && selectedTrackId === -1) {\n const error = new Error(\n `No audio track selected for current audio group-ID(s): ${this.groupIds?.join(\n ',',\n )} track count: ${audioTracks.length}`,\n );\n this.warn(error.message);\n\n this.hls.trigger(Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.AUDIO_TRACK_LOAD_ERROR,\n fatal: true,\n error,\n });\n }\n } else if (this.shouldReloadPlaylist(currentTrack)) {\n // Retry playlist loading if no playlist is or has been loaded yet\n this.setAudioTrack(this.trackId);\n }\n }\n\n protected onError(event: Events.ERROR, data: ErrorData): void {\n if (data.fatal || !data.context) {\n return;\n }\n\n if (\n data.context.type === PlaylistContextType.AUDIO_TRACK &&\n data.context.id === this.trackId &&\n (!this.groupIds || this.groupIds.indexOf(data.context.groupId) !== -1)\n ) {\n this.requestScheduled = -1;\n this.checkRetry(data);\n }\n }\n\n get allAudioTracks(): MediaPlaylist[] {\n return this.tracks;\n }\n\n get audioTracks(): MediaPlaylist[] {\n return this.tracksInGroup;\n }\n\n get audioTrack(): number {\n return this.trackId;\n }\n\n set audioTrack(newId: number) {\n // If audio track is selected from API then don't choose from the manifest default track\n this.selectDefaultTrack = false;\n this.setAudioTrack(newId);\n }\n\n public setAudioOption(\n audioOption: MediaPlaylist | AudioSelectionOption | undefined,\n ): MediaPlaylist | null {\n const hls = this.hls;\n hls.config.audioPreference = audioOption;\n if (audioOption) {\n const allAudioTracks = this.allAudioTracks;\n this.selectDefaultTrack = false;\n if (allAudioTracks.length) {\n // First see if current option matches (no switch op)\n const currentTrack = this.currentTrack;\n if (\n currentTrack &&\n matchesOption(audioOption, currentTrack, audioMatchPredicate)\n ) {\n return currentTrack;\n }\n // Find option in available tracks (tracksInGroup)\n const groupIndex = findMatchingOption(\n audioOption,\n this.tracksInGroup,\n audioMatchPredicate,\n );\n if (groupIndex > -1) {\n const track = this.tracksInGroup[groupIndex];\n this.setAudioTrack(groupIndex);\n return track;\n } else if (currentTrack) {\n // Find option in nearest level audio group\n let searchIndex = hls.loadLevel;\n if (searchIndex === -1) {\n searchIndex = hls.firstAutoLevel;\n }\n const switchIndex = findClosestLevelWithAudioGroup(\n audioOption,\n hls.levels,\n allAudioTracks,\n searchIndex,\n audioMatchPredicate,\n );\n if (switchIndex === -1) {\n // could not find matching variant\n return null;\n }\n // and switch level to acheive the audio group switch\n hls.nextLoadLevel = switchIndex;\n }\n if (audioOption.channels || audioOption.audioCodec) {\n // Could not find a match with codec / channels predicate\n // Find a match without channels or codec\n const withoutCodecAndChannelsMatch = findMatchingOption(\n audioOption,\n allAudioTracks,\n );\n if (withoutCodecAndChannelsMatch > -1) {\n return allAudioTracks[withoutCodecAndChannelsMatch];\n }\n }\n }\n }\n return null;\n }\n\n private setAudioTrack(newId: number): void {\n const tracks = this.tracksInGroup;\n\n // check if level idx is valid\n if (newId < 0 || newId >= tracks.length) {\n this.warn(`Invalid audio track id: ${newId}`);\n return;\n }\n\n // stopping live reloading timer if any\n this.clearTimer();\n\n this.selectDefaultTrack = false;\n const lastTrack = this.currentTrack;\n const track = tracks[newId];\n const trackLoaded = track.details && !track.details.live;\n if (newId === this.trackId && track === lastTrack && trackLoaded) {\n return;\n }\n this.log(\n `Switching to audio-track ${newId} \"${track.name}\" lang:${track.lang} group:${track.groupId} channels:${track.channels}`,\n );\n this.trackId = newId;\n this.currentTrack = track;\n this.hls.trigger(Events.AUDIO_TRACK_SWITCHING, { ...track });\n // Do not reload track unless live\n if (trackLoaded) {\n return;\n }\n const hlsUrlParameters = this.switchParams(\n track.url,\n lastTrack?.details,\n track.details,\n );\n this.loadPlaylist(hlsUrlParameters);\n }\n\n private findTrackId(currentTrack: MediaPlaylist | null): number {\n const audioTracks = this.tracksInGroup;\n for (let i = 0; i < audioTracks.length; i++) {\n const track = audioTracks[i];\n if (this.selectDefaultTrack && !track.default) {\n continue;\n }\n if (\n !currentTrack ||\n matchesOption(currentTrack, track, audioMatchPredicate)\n ) {\n return i;\n }\n }\n if (currentTrack) {\n const { name, lang, assocLang, characteristics, audioCodec, channels } =\n currentTrack;\n for (let i = 0; i < audioTracks.length; i++) {\n const track = audioTracks[i];\n if (\n matchesOption(\n { name, lang, assocLang, characteristics, audioCodec, channels },\n track,\n audioMatchPredicate,\n )\n ) {\n return i;\n }\n }\n for (let i = 0; i < audioTracks.length; i++) {\n const track = audioTracks[i];\n if (\n mediaAttributesIdentical(currentTrack.attrs, track.attrs, [\n 'LANGUAGE',\n 'ASSOC-LANGUAGE',\n 'CHARACTERISTICS',\n ])\n ) {\n return i;\n }\n }\n for (let i = 0; i < audioTracks.length; i++) {\n const track = audioTracks[i];\n if (\n mediaAttributesIdentical(currentTrack.attrs, track.attrs, [\n 'LANGUAGE',\n ])\n ) {\n return i;\n }\n }\n }\n return -1;\n }\n\n protected loadPlaylist(hlsUrlParameters?: HlsUrlParameters): void {\n const audioTrack = this.currentTrack;\n if (this.shouldLoadPlaylist(audioTrack) && audioTrack) {\n super.loadPlaylist();\n const id = audioTrack.id;\n const groupId = audioTrack.groupId as string;\n let url = audioTrack.url;\n if (hlsUrlParameters) {\n try {\n url = hlsUrlParameters.addDirectives(url);\n } catch (error) {\n this.warn(\n `Could not construct new URL with HLS Delivery Directives: ${error}`,\n );\n }\n }\n // track not retrieved yet, or live playlist we need to (re)load it\n this.log(\n `loading audio-track playlist ${id} \"${audioTrack.name}\" lang:${audioTrack.lang} group:${groupId}`,\n );\n this.clearTimer();\n this.hls.trigger(Events.AUDIO_TRACK_LOADING, {\n url,\n id,\n groupId,\n deliveryDirectives: hlsUrlParameters || null,\n });\n }\n }\n}\n\nexport default AudioTrackController;\n", "import { Events } from '../events';\nimport { Bufferable, BufferHelper } from '../utils/buffer-helper';\nimport { findFragmentByPTS } from './fragment-finders';\nimport { alignMediaPlaylistByPDT } from '../utils/discontinuities';\nimport { addSliding } from '../utils/level-helper';\nimport { FragmentState } from './fragment-tracker';\nimport BaseStreamController, { State } from './base-stream-controller';\nimport { PlaylistLevelType } from '../types/loader';\nimport { Level } from '../types/level';\nimport { subtitleOptionsIdentical } from '../utils/media-option-attributes';\nimport { ErrorDetails, ErrorTypes } from '../errors';\nimport type { NetworkComponentAPI } from '../types/component-api';\nimport type Hls from '../hls';\nimport type { FragmentTracker } from './fragment-tracker';\nimport type KeyLoader from '../loader/key-loader';\nimport type { LevelDetails } from '../loader/level-details';\nimport type { Fragment } from '../loader/fragment';\nimport type {\n ErrorData,\n FragLoadedData,\n SubtitleFragProcessed,\n SubtitleTracksUpdatedData,\n TrackLoadedData,\n TrackSwitchedData,\n BufferFlushingData,\n LevelLoadedData,\n FragBufferedData,\n} from '../types/events';\n\nconst TICK_INTERVAL = 500; // how often to tick in ms\n\ninterface TimeRange {\n start: number;\n end: number;\n}\n\nexport class SubtitleStreamController\n extends BaseStreamController\n implements NetworkComponentAPI\n{\n private currentTrackId: number = -1;\n private tracksBuffered: Array = [];\n private mainDetails: LevelDetails | null = null;\n\n constructor(\n hls: Hls,\n fragmentTracker: FragmentTracker,\n keyLoader: KeyLoader,\n ) {\n super(\n hls,\n fragmentTracker,\n keyLoader,\n '[subtitle-stream-controller]',\n PlaylistLevelType.SUBTITLE,\n );\n this._registerListeners();\n }\n\n protected onHandlerDestroying() {\n this._unregisterListeners();\n super.onHandlerDestroying();\n this.mainDetails = null;\n }\n\n private _registerListeners() {\n const { hls } = this;\n hls.on(Events.MEDIA_ATTACHED, this.onMediaAttached, this);\n hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.on(Events.LEVEL_LOADED, this.onLevelLoaded, this);\n hls.on(Events.ERROR, this.onError, this);\n hls.on(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);\n hls.on(Events.SUBTITLE_TRACK_SWITCH, this.onSubtitleTrackSwitch, this);\n hls.on(Events.SUBTITLE_TRACK_LOADED, this.onSubtitleTrackLoaded, this);\n hls.on(Events.SUBTITLE_FRAG_PROCESSED, this.onSubtitleFragProcessed, this);\n hls.on(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);\n hls.on(Events.FRAG_BUFFERED, this.onFragBuffered, this);\n }\n\n private _unregisterListeners() {\n const { hls } = this;\n hls.off(Events.MEDIA_ATTACHED, this.onMediaAttached, this);\n hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.off(Events.LEVEL_LOADED, this.onLevelLoaded, this);\n hls.off(Events.ERROR, this.onError, this);\n hls.off(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);\n hls.off(Events.SUBTITLE_TRACK_SWITCH, this.onSubtitleTrackSwitch, this);\n hls.off(Events.SUBTITLE_TRACK_LOADED, this.onSubtitleTrackLoaded, this);\n hls.off(Events.SUBTITLE_FRAG_PROCESSED, this.onSubtitleFragProcessed, this);\n hls.off(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);\n hls.off(Events.FRAG_BUFFERED, this.onFragBuffered, this);\n }\n\n startLoad(startPosition: number) {\n this.stopLoad();\n this.state = State.IDLE;\n\n this.setInterval(TICK_INTERVAL);\n\n this.nextLoadPosition =\n this.startPosition =\n this.lastCurrentTime =\n startPosition;\n\n this.tick();\n }\n\n onManifestLoading() {\n this.mainDetails = null;\n this.fragmentTracker.removeAllFragments();\n }\n\n onMediaDetaching(): void {\n this.tracksBuffered = [];\n super.onMediaDetaching();\n }\n\n onLevelLoaded(event: Events.LEVEL_LOADED, data: LevelLoadedData) {\n this.mainDetails = data.details;\n }\n\n onSubtitleFragProcessed(\n event: Events.SUBTITLE_FRAG_PROCESSED,\n data: SubtitleFragProcessed,\n ) {\n const { frag, success } = data;\n this.fragPrevious = frag;\n this.state = State.IDLE;\n if (!success) {\n return;\n }\n\n const buffered = this.tracksBuffered[this.currentTrackId];\n if (!buffered) {\n return;\n }\n\n // Create/update a buffered array matching the interface used by BufferHelper.bufferedInfo\n // so we can re-use the logic used to detect how much has been buffered\n let timeRange: TimeRange | undefined;\n const fragStart = frag.start;\n for (let i = 0; i < buffered.length; i++) {\n if (fragStart >= buffered[i].start && fragStart <= buffered[i].end) {\n timeRange = buffered[i];\n break;\n }\n }\n\n const fragEnd = frag.start + frag.duration;\n if (timeRange) {\n timeRange.end = fragEnd;\n } else {\n timeRange = {\n start: fragStart,\n end: fragEnd,\n };\n buffered.push(timeRange);\n }\n this.fragmentTracker.fragBuffered(frag);\n this.fragBufferedComplete(frag, null);\n }\n\n onBufferFlushing(event: Events.BUFFER_FLUSHING, data: BufferFlushingData) {\n const { startOffset, endOffset } = data;\n if (startOffset === 0 && endOffset !== Number.POSITIVE_INFINITY) {\n const endOffsetSubtitles = endOffset - 1;\n if (endOffsetSubtitles <= 0) {\n return;\n }\n data.endOffsetSubtitles = Math.max(0, endOffsetSubtitles);\n this.tracksBuffered.forEach((buffered) => {\n for (let i = 0; i < buffered.length; ) {\n if (buffered[i].end <= endOffsetSubtitles) {\n buffered.shift();\n continue;\n } else if (buffered[i].start < endOffsetSubtitles) {\n buffered[i].start = endOffsetSubtitles;\n } else {\n break;\n }\n i++;\n }\n });\n this.fragmentTracker.removeFragmentsInRange(\n startOffset,\n endOffsetSubtitles,\n PlaylistLevelType.SUBTITLE,\n );\n }\n }\n\n onFragBuffered(event: Events.FRAG_BUFFERED, data: FragBufferedData) {\n if (!this.loadedmetadata && data.frag.type === PlaylistLevelType.MAIN) {\n if (this.media?.buffered.length) {\n this.loadedmetadata = true;\n }\n }\n }\n\n // If something goes wrong, proceed to next frag, if we were processing one.\n onError(event: Events.ERROR, data: ErrorData) {\n const frag = data.frag;\n\n if (frag?.type === PlaylistLevelType.SUBTITLE) {\n if (data.details === ErrorDetails.FRAG_GAP) {\n this.fragmentTracker.fragBuffered(frag, true);\n }\n if (this.fragCurrent) {\n this.fragCurrent.abortRequests();\n }\n if (this.state !== State.STOPPED) {\n this.state = State.IDLE;\n }\n }\n }\n\n // Got all new subtitle levels.\n onSubtitleTracksUpdated(\n event: Events.SUBTITLE_TRACKS_UPDATED,\n { subtitleTracks }: SubtitleTracksUpdatedData,\n ) {\n if (this.levels && subtitleOptionsIdentical(this.levels, subtitleTracks)) {\n this.levels = subtitleTracks.map(\n (mediaPlaylist) => new Level(mediaPlaylist),\n );\n return;\n }\n this.tracksBuffered = [];\n this.levels = subtitleTracks.map((mediaPlaylist) => {\n const level = new Level(mediaPlaylist);\n this.tracksBuffered[level.id] = [];\n return level;\n });\n this.fragmentTracker.removeFragmentsInRange(\n 0,\n Number.POSITIVE_INFINITY,\n PlaylistLevelType.SUBTITLE,\n );\n this.fragPrevious = null;\n this.mediaBuffer = null;\n }\n\n onSubtitleTrackSwitch(\n event: Events.SUBTITLE_TRACK_SWITCH,\n data: TrackSwitchedData,\n ) {\n this.currentTrackId = data.id;\n\n if (!this.levels?.length || this.currentTrackId === -1) {\n this.clearInterval();\n return;\n }\n\n // Check if track has the necessary details to load fragments\n const currentTrack = this.levels[this.currentTrackId];\n if (currentTrack?.details) {\n this.mediaBuffer = this.mediaBufferTimeRanges;\n } else {\n this.mediaBuffer = null;\n }\n if (currentTrack) {\n this.setInterval(TICK_INTERVAL);\n }\n }\n\n // Got a new set of subtitle fragments.\n onSubtitleTrackLoaded(\n event: Events.SUBTITLE_TRACK_LOADED,\n data: TrackLoadedData,\n ) {\n const { currentTrackId, levels } = this;\n const { details: newDetails, id: trackId } = data;\n if (!levels) {\n this.warn(`Subtitle tracks were reset while loading level ${trackId}`);\n return;\n }\n const track: Level = levels[trackId];\n if (trackId >= levels.length || !track) {\n return;\n }\n this.log(\n `Subtitle track ${trackId} loaded [${newDetails.startSN},${\n newDetails.endSN\n }]${\n newDetails.lastPartSn\n ? `[part-${newDetails.lastPartSn}-${newDetails.lastPartIndex}]`\n : ''\n },duration:${newDetails.totalduration}`,\n );\n this.mediaBuffer = this.mediaBufferTimeRanges;\n let sliding = 0;\n if (newDetails.live || track.details?.live) {\n const mainDetails = this.mainDetails;\n if (newDetails.deltaUpdateFailed || !mainDetails) {\n return;\n }\n const mainSlidingStartFragment = mainDetails.fragments[0];\n if (!track.details) {\n if (newDetails.hasProgramDateTime && mainDetails.hasProgramDateTime) {\n alignMediaPlaylistByPDT(newDetails, mainDetails);\n sliding = newDetails.fragments[0].start;\n } else if (mainSlidingStartFragment) {\n // line up live playlist with main so that fragments in range are loaded\n sliding = mainSlidingStartFragment.start;\n addSliding(newDetails, sliding);\n }\n } else {\n sliding = this.alignPlaylists(\n newDetails,\n track.details,\n this.levelLastLoaded?.details,\n );\n if (sliding === 0 && mainSlidingStartFragment) {\n // realign with main when there is no overlap with last refresh\n sliding = mainSlidingStartFragment.start;\n addSliding(newDetails, sliding);\n }\n }\n }\n track.details = newDetails;\n this.levelLastLoaded = track;\n\n if (trackId !== currentTrackId) {\n return;\n }\n\n if (!this.startFragRequested && (this.mainDetails || !newDetails.live)) {\n this.setStartPosition(this.mainDetails || newDetails, sliding);\n }\n\n // trigger handler right now\n this.tick();\n\n // If playlist is misaligned because of bad PDT or drift, delete details to resync with main on reload\n if (\n newDetails.live &&\n !this.fragCurrent &&\n this.media &&\n this.state === State.IDLE\n ) {\n const foundFrag = findFragmentByPTS(\n null,\n newDetails.fragments,\n this.media.currentTime,\n 0,\n );\n if (!foundFrag) {\n this.warn('Subtitle playlist not aligned with playback');\n track.details = undefined;\n }\n }\n }\n\n _handleFragmentLoadComplete(fragLoadedData: FragLoadedData) {\n const { frag, payload } = fragLoadedData;\n const decryptData = frag.decryptdata;\n const hls = this.hls;\n\n if (this.fragContextChanged(frag)) {\n return;\n }\n // check to see if the payload needs to be decrypted\n if (\n payload &&\n payload.byteLength > 0 &&\n decryptData?.key &&\n decryptData.iv &&\n decryptData.method === 'AES-128'\n ) {\n const startTime = performance.now();\n // decrypt the subtitles\n this.decrypter\n .decrypt(\n new Uint8Array(payload),\n decryptData.key.buffer,\n decryptData.iv.buffer,\n )\n .catch((err) => {\n hls.trigger(Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.FRAG_DECRYPT_ERROR,\n fatal: false,\n error: err,\n reason: err.message,\n frag,\n });\n throw err;\n })\n .then((decryptedData) => {\n const endTime = performance.now();\n hls.trigger(Events.FRAG_DECRYPTED, {\n frag,\n payload: decryptedData,\n stats: {\n tstart: startTime,\n tdecrypt: endTime,\n },\n });\n })\n .catch((err) => {\n this.warn(`${err.name}: ${err.message}`);\n this.state = State.IDLE;\n });\n }\n }\n\n doTick() {\n if (!this.media) {\n this.state = State.IDLE;\n return;\n }\n\n if (this.state === State.IDLE) {\n const { currentTrackId, levels } = this;\n const track = levels?.[currentTrackId];\n if (!track || !levels.length || !track.details) {\n return;\n }\n const { config } = this;\n const currentTime = this.getLoadPosition();\n const bufferedInfo = BufferHelper.bufferedInfo(\n this.tracksBuffered[this.currentTrackId] || [],\n currentTime,\n config.maxBufferHole,\n );\n const { end: targetBufferTime, len: bufferLen } = bufferedInfo;\n\n const mainBufferInfo = this.getFwdBufferInfo(\n this.media,\n PlaylistLevelType.MAIN,\n );\n const trackDetails = track.details as LevelDetails;\n const maxBufLen =\n this.getMaxBufferLength(mainBufferInfo?.len) +\n trackDetails.levelTargetDuration;\n\n if (bufferLen > maxBufLen) {\n return;\n }\n const fragments = trackDetails.fragments;\n const fragLen = fragments.length;\n const end = trackDetails.edge;\n\n let foundFrag: Fragment | null = null;\n const fragPrevious = this.fragPrevious;\n if (targetBufferTime < end) {\n const tolerance = config.maxFragLookUpTolerance;\n const lookupTolerance =\n targetBufferTime > end - tolerance ? 0 : tolerance;\n foundFrag = findFragmentByPTS(\n fragPrevious,\n fragments,\n Math.max(fragments[0].start, targetBufferTime),\n lookupTolerance,\n );\n if (\n !foundFrag &&\n fragPrevious &&\n fragPrevious.start < fragments[0].start\n ) {\n foundFrag = fragments[0];\n }\n } else {\n foundFrag = fragments[fragLen - 1];\n }\n if (!foundFrag) {\n return;\n }\n foundFrag = this.mapToInitFragWhenRequired(foundFrag) as Fragment;\n if (foundFrag.sn !== 'initSegment') {\n // Load earlier fragment in same discontinuity to make up for misaligned playlists and cues that extend beyond end of segment\n const curSNIdx = foundFrag.sn - trackDetails.startSN;\n const prevFrag = fragments[curSNIdx - 1];\n if (\n prevFrag &&\n prevFrag.cc === foundFrag.cc &&\n this.fragmentTracker.getState(prevFrag) === FragmentState.NOT_LOADED\n ) {\n foundFrag = prevFrag;\n }\n }\n if (\n this.fragmentTracker.getState(foundFrag) === FragmentState.NOT_LOADED\n ) {\n // only load if fragment is not loaded\n this.loadFragment(foundFrag, track, targetBufferTime);\n }\n }\n }\n\n protected getMaxBufferLength(mainBufferLength?: number): number {\n const maxConfigBuffer = super.getMaxBufferLength();\n if (!mainBufferLength) {\n return maxConfigBuffer;\n }\n return Math.max(maxConfigBuffer, mainBufferLength);\n }\n\n protected loadFragment(\n frag: Fragment,\n level: Level,\n targetBufferTime: number,\n ) {\n this.fragCurrent = frag;\n if (frag.sn === 'initSegment') {\n this._loadInitSegment(frag, level);\n } else {\n this.startFragRequested = true;\n super.loadFragment(frag, level, targetBufferTime);\n }\n }\n\n get mediaBufferTimeRanges(): Bufferable {\n return new BufferableInstance(\n this.tracksBuffered[this.currentTrackId] || [],\n );\n }\n}\n\nclass BufferableInstance implements Bufferable {\n public readonly buffered: TimeRanges;\n\n constructor(timeranges: TimeRange[]) {\n const getRange = (\n name: 'start' | 'end',\n index: number,\n length: number,\n ): number => {\n index = index >>> 0;\n if (index > length - 1) {\n throw new DOMException(\n `Failed to execute '${name}' on 'TimeRanges': The index provided (${index}) is greater than the maximum bound (${length})`,\n );\n }\n return timeranges[index][name];\n };\n this.buffered = {\n get length() {\n return timeranges.length;\n },\n end(index: number): number {\n return getRange('end', index, timeranges.length);\n },\n start(index: number): number {\n return getRange('start', index, timeranges.length);\n },\n };\n }\n}\n", "import BasePlaylistController from './base-playlist-controller';\nimport { Events } from '../events';\nimport {\n clearCurrentCues,\n filterSubtitleTracks,\n} from '../utils/texttrack-utils';\nimport { PlaylistContextType } from '../types/loader';\nimport {\n mediaAttributesIdentical,\n subtitleTrackMatchesTextTrack,\n} from '../utils/media-option-attributes';\nimport { findMatchingOption, matchesOption } from '../utils/rendition-helper';\nimport type Hls from '../hls';\nimport type {\n MediaPlaylist,\n SubtitleSelectionOption,\n} from '../types/media-playlist';\nimport type { HlsUrlParameters } from '../types/level';\nimport type {\n ErrorData,\n LevelLoadingData,\n MediaAttachedData,\n SubtitleTracksUpdatedData,\n ManifestParsedData,\n TrackLoadedData,\n LevelSwitchingData,\n} from '../types/events';\n\nclass SubtitleTrackController extends BasePlaylistController {\n private media: HTMLMediaElement | null = null;\n private tracks: MediaPlaylist[] = [];\n private groupIds: (string | undefined)[] | null = null;\n private tracksInGroup: MediaPlaylist[] = [];\n private trackId: number = -1;\n private currentTrack: MediaPlaylist | null = null;\n private selectDefaultTrack: boolean = true;\n private queuedDefaultTrack: number = -1;\n private asyncPollTrackChange: () => void = () => this.pollTrackChange(0);\n private useTextTrackPolling: boolean = false;\n private subtitlePollingInterval: number = -1;\n private _subtitleDisplay: boolean = true;\n\n constructor(hls: Hls) {\n super(hls, '[subtitle-track-controller]');\n this.registerListeners();\n }\n\n public destroy() {\n this.unregisterListeners();\n this.tracks.length = 0;\n this.tracksInGroup.length = 0;\n this.currentTrack = null;\n this.onTextTracksChanged = this.asyncPollTrackChange = null as any;\n super.destroy();\n }\n\n public get subtitleDisplay(): boolean {\n return this._subtitleDisplay;\n }\n\n public set subtitleDisplay(value: boolean) {\n this._subtitleDisplay = value;\n if (this.trackId > -1) {\n this.toggleTrackModes();\n }\n }\n\n private registerListeners() {\n const { hls } = this;\n hls.on(Events.MEDIA_ATTACHED, this.onMediaAttached, this);\n hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.on(Events.MANIFEST_PARSED, this.onManifestParsed, this);\n hls.on(Events.LEVEL_LOADING, this.onLevelLoading, this);\n hls.on(Events.LEVEL_SWITCHING, this.onLevelSwitching, this);\n hls.on(Events.SUBTITLE_TRACK_LOADED, this.onSubtitleTrackLoaded, this);\n hls.on(Events.ERROR, this.onError, this);\n }\n\n private unregisterListeners() {\n const { hls } = this;\n hls.off(Events.MEDIA_ATTACHED, this.onMediaAttached, this);\n hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.off(Events.MANIFEST_PARSED, this.onManifestParsed, this);\n hls.off(Events.LEVEL_LOADING, this.onLevelLoading, this);\n hls.off(Events.LEVEL_SWITCHING, this.onLevelSwitching, this);\n hls.off(Events.SUBTITLE_TRACK_LOADED, this.onSubtitleTrackLoaded, this);\n hls.off(Events.ERROR, this.onError, this);\n }\n\n // Listen for subtitle track change, then extract the current track ID.\n protected onMediaAttached(\n event: Events.MEDIA_ATTACHED,\n data: MediaAttachedData,\n ): void {\n this.media = data.media;\n if (!this.media) {\n return;\n }\n\n if (this.queuedDefaultTrack > -1) {\n this.subtitleTrack = this.queuedDefaultTrack;\n this.queuedDefaultTrack = -1;\n }\n\n this.useTextTrackPolling = !(\n this.media.textTracks && 'onchange' in this.media.textTracks\n );\n if (this.useTextTrackPolling) {\n this.pollTrackChange(500);\n } else {\n this.media.textTracks.addEventListener(\n 'change',\n this.asyncPollTrackChange,\n );\n }\n }\n\n private pollTrackChange(timeout: number) {\n self.clearInterval(this.subtitlePollingInterval);\n this.subtitlePollingInterval = self.setInterval(\n this.onTextTracksChanged,\n timeout,\n );\n }\n\n protected onMediaDetaching(): void {\n if (!this.media) {\n return;\n }\n\n self.clearInterval(this.subtitlePollingInterval);\n if (!this.useTextTrackPolling) {\n this.media.textTracks.removeEventListener(\n 'change',\n this.asyncPollTrackChange,\n );\n }\n\n if (this.trackId > -1) {\n this.queuedDefaultTrack = this.trackId;\n }\n\n const textTracks = filterSubtitleTracks(this.media.textTracks);\n // Clear loaded cues on media detachment from tracks\n textTracks.forEach((track) => {\n clearCurrentCues(track);\n });\n // Disable all subtitle tracks before detachment so when reattached only tracks in that content are enabled.\n this.subtitleTrack = -1;\n this.media = null;\n }\n\n protected onManifestLoading(): void {\n this.tracks = [];\n this.groupIds = null;\n this.tracksInGroup = [];\n this.trackId = -1;\n this.currentTrack = null;\n this.selectDefaultTrack = true;\n }\n\n // Fired whenever a new manifest is loaded.\n protected onManifestParsed(\n event: Events.MANIFEST_PARSED,\n data: ManifestParsedData,\n ): void {\n this.tracks = data.subtitleTracks;\n }\n\n protected onSubtitleTrackLoaded(\n event: Events.SUBTITLE_TRACK_LOADED,\n data: TrackLoadedData,\n ): void {\n const { id, groupId, details } = data;\n const trackInActiveGroup = this.tracksInGroup[id];\n\n if (!trackInActiveGroup || trackInActiveGroup.groupId !== groupId) {\n this.warn(\n `Subtitle track with id:${id} and group:${groupId} not found in active group ${trackInActiveGroup?.groupId}`,\n );\n return;\n }\n\n const curDetails = trackInActiveGroup.details;\n trackInActiveGroup.details = data.details;\n this.log(\n `Subtitle track ${id} \"${trackInActiveGroup.name}\" lang:${trackInActiveGroup.lang} group:${groupId} loaded [${details.startSN}-${details.endSN}]`,\n );\n\n if (id === this.trackId) {\n this.playlistLoaded(id, data, curDetails);\n }\n }\n\n protected onLevelLoading(\n event: Events.LEVEL_LOADING,\n data: LevelLoadingData,\n ): void {\n this.switchLevel(data.level);\n }\n\n protected onLevelSwitching(\n event: Events.LEVEL_SWITCHING,\n data: LevelSwitchingData,\n ): void {\n this.switchLevel(data.level);\n }\n\n private switchLevel(levelIndex: number) {\n const levelInfo = this.hls.levels[levelIndex];\n if (!levelInfo) {\n return;\n }\n const subtitleGroups = levelInfo.subtitleGroups || null;\n const currentGroups = this.groupIds;\n let currentTrack = this.currentTrack;\n if (\n !subtitleGroups ||\n currentGroups?.length !== subtitleGroups?.length ||\n subtitleGroups?.some((groupId) => currentGroups?.indexOf(groupId) === -1)\n ) {\n this.groupIds = subtitleGroups;\n this.trackId = -1;\n this.currentTrack = null;\n\n const subtitleTracks = this.tracks.filter(\n (track): boolean =>\n !subtitleGroups || subtitleGroups.indexOf(track.groupId) !== -1,\n );\n if (subtitleTracks.length) {\n // Disable selectDefaultTrack if there are no default tracks\n if (\n this.selectDefaultTrack &&\n !subtitleTracks.some((track) => track.default)\n ) {\n this.selectDefaultTrack = false;\n }\n // track.id should match hls.audioTracks index\n subtitleTracks.forEach((track, i) => {\n track.id = i;\n });\n } else if (!currentTrack && !this.tracksInGroup.length) {\n // Do not dispatch SUBTITLE_TRACKS_UPDATED when there were and are no tracks\n return;\n }\n this.tracksInGroup = subtitleTracks;\n\n // Find preferred track\n const subtitlePreference = this.hls.config.subtitlePreference;\n if (!currentTrack && subtitlePreference) {\n this.selectDefaultTrack = false;\n const groupIndex = findMatchingOption(\n subtitlePreference,\n subtitleTracks,\n );\n if (groupIndex > -1) {\n currentTrack = subtitleTracks[groupIndex];\n } else {\n const allIndex = findMatchingOption(subtitlePreference, this.tracks);\n currentTrack = this.tracks[allIndex];\n }\n }\n\n // Select initial track\n let trackId = this.findTrackId(currentTrack);\n if (trackId === -1 && currentTrack) {\n trackId = this.findTrackId(null);\n }\n\n // Dispatch events and load track if needed\n const subtitleTracksUpdated: SubtitleTracksUpdatedData = {\n subtitleTracks,\n };\n this.log(\n `Updating subtitle tracks, ${\n subtitleTracks.length\n } track(s) found in \"${subtitleGroups?.join(',')}\" group-id`,\n );\n this.hls.trigger(Events.SUBTITLE_TRACKS_UPDATED, subtitleTracksUpdated);\n\n if (trackId !== -1 && this.trackId === -1) {\n this.setSubtitleTrack(trackId);\n }\n } else if (this.shouldReloadPlaylist(currentTrack)) {\n // Retry playlist loading if no playlist is or has been loaded yet\n this.setSubtitleTrack(this.trackId);\n }\n }\n\n private findTrackId(currentTrack: MediaPlaylist | null): number {\n const tracks = this.tracksInGroup;\n const selectDefault = this.selectDefaultTrack;\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n if (\n (selectDefault && !track.default) ||\n (!selectDefault && !currentTrack)\n ) {\n continue;\n }\n if (!currentTrack || matchesOption(track, currentTrack)) {\n return i;\n }\n }\n if (currentTrack) {\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n if (\n mediaAttributesIdentical(currentTrack.attrs, track.attrs, [\n 'LANGUAGE',\n 'ASSOC-LANGUAGE',\n 'CHARACTERISTICS',\n ])\n ) {\n return i;\n }\n }\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n if (\n mediaAttributesIdentical(currentTrack.attrs, track.attrs, [\n 'LANGUAGE',\n ])\n ) {\n return i;\n }\n }\n }\n return -1;\n }\n\n private findTrackForTextTrack(textTrack: TextTrack | null): number {\n if (textTrack) {\n const tracks = this.tracksInGroup;\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n if (subtitleTrackMatchesTextTrack(track, textTrack)) {\n return i;\n }\n }\n }\n return -1;\n }\n\n protected onError(event: Events.ERROR, data: ErrorData): void {\n if (data.fatal || !data.context) {\n return;\n }\n\n if (\n data.context.type === PlaylistContextType.SUBTITLE_TRACK &&\n data.context.id === this.trackId &&\n (!this.groupIds || this.groupIds.indexOf(data.context.groupId) !== -1)\n ) {\n this.checkRetry(data);\n }\n }\n\n get allSubtitleTracks(): MediaPlaylist[] {\n return this.tracks;\n }\n\n /** get alternate subtitle tracks list from playlist **/\n get subtitleTracks(): MediaPlaylist[] {\n return this.tracksInGroup;\n }\n\n /** get/set index of the selected subtitle track (based on index in subtitle track lists) **/\n get subtitleTrack(): number {\n return this.trackId;\n }\n\n set subtitleTrack(newId: number) {\n this.selectDefaultTrack = false;\n this.setSubtitleTrack(newId);\n }\n\n public setSubtitleOption(\n subtitleOption: MediaPlaylist | SubtitleSelectionOption | undefined,\n ): MediaPlaylist | null {\n this.hls.config.subtitlePreference = subtitleOption;\n if (subtitleOption) {\n const allSubtitleTracks = this.allSubtitleTracks;\n this.selectDefaultTrack = false;\n if (allSubtitleTracks.length) {\n // First see if current option matches (no switch op)\n const currentTrack = this.currentTrack;\n if (currentTrack && matchesOption(subtitleOption, currentTrack)) {\n return currentTrack;\n }\n // Find option in current group\n const groupIndex = findMatchingOption(\n subtitleOption,\n this.tracksInGroup,\n );\n if (groupIndex > -1) {\n const track = this.tracksInGroup[groupIndex];\n this.setSubtitleTrack(groupIndex);\n return track;\n } else if (currentTrack) {\n // If this is not the initial selection return null\n // option should have matched one in active group\n return null;\n } else {\n // Find the option in all tracks for initial selection\n const allIndex = findMatchingOption(\n subtitleOption,\n allSubtitleTracks,\n );\n if (allIndex > -1) {\n return allSubtitleTracks[allIndex];\n }\n }\n }\n }\n return null;\n }\n\n protected loadPlaylist(hlsUrlParameters?: HlsUrlParameters): void {\n super.loadPlaylist();\n const currentTrack = this.currentTrack;\n if (this.shouldLoadPlaylist(currentTrack) && currentTrack) {\n const id = currentTrack.id;\n const groupId = currentTrack.groupId as string;\n let url = currentTrack.url;\n if (hlsUrlParameters) {\n try {\n url = hlsUrlParameters.addDirectives(url);\n } catch (error) {\n this.warn(\n `Could not construct new URL with HLS Delivery Directives: ${error}`,\n );\n }\n }\n this.log(`Loading subtitle playlist for id ${id}`);\n this.hls.trigger(Events.SUBTITLE_TRACK_LOADING, {\n url,\n id,\n groupId,\n deliveryDirectives: hlsUrlParameters || null,\n });\n }\n }\n\n /**\n * Disables the old subtitleTrack and sets current mode on the next subtitleTrack.\n * This operates on the DOM textTracks.\n * A value of -1 will disable all subtitle tracks.\n */\n private toggleTrackModes(): void {\n const { media } = this;\n if (!media) {\n return;\n }\n\n const textTracks = filterSubtitleTracks(media.textTracks);\n const currentTrack = this.currentTrack;\n let nextTrack;\n if (currentTrack) {\n nextTrack = textTracks.filter((textTrack) =>\n subtitleTrackMatchesTextTrack(currentTrack, textTrack),\n )[0];\n if (!nextTrack) {\n this.warn(\n `Unable to find subtitle TextTrack with name \"${currentTrack.name}\" and language \"${currentTrack.lang}\"`,\n );\n }\n }\n [].slice.call(textTracks).forEach((track) => {\n if (track.mode !== 'disabled' && track !== nextTrack) {\n track.mode = 'disabled';\n }\n });\n if (nextTrack) {\n const mode = this.subtitleDisplay ? 'showing' : 'hidden';\n if (nextTrack.mode !== mode) {\n nextTrack.mode = mode;\n }\n }\n }\n\n /**\n * This method is responsible for validating the subtitle index and periodically reloading if live.\n * Dispatches the SUBTITLE_TRACK_SWITCH event, which instructs the subtitle-stream-controller to load the selected track.\n */\n private setSubtitleTrack(newId: number): void {\n const tracks = this.tracksInGroup;\n\n // setting this.subtitleTrack will trigger internal logic\n // if media has not been attached yet, it will fail\n // we keep a reference to the default track id\n // and we'll set subtitleTrack when onMediaAttached is triggered\n if (!this.media) {\n this.queuedDefaultTrack = newId;\n return;\n }\n\n // exit if track id as already set or invalid\n if (newId < -1 || newId >= tracks.length || !Number.isFinite(newId)) {\n this.warn(`Invalid subtitle track id: ${newId}`);\n return;\n }\n\n // stopping live reloading timer if any\n this.clearTimer();\n\n this.selectDefaultTrack = false;\n const lastTrack = this.currentTrack;\n const track: MediaPlaylist | null = tracks[newId] || null;\n this.trackId = newId;\n this.currentTrack = track;\n this.toggleTrackModes();\n if (!track) {\n // switch to -1\n this.hls.trigger(Events.SUBTITLE_TRACK_SWITCH, { id: newId });\n return;\n }\n const trackLoaded = !!track.details && !track.details.live;\n if (newId === this.trackId && track === lastTrack && trackLoaded) {\n return;\n }\n this.log(\n `Switching to subtitle-track ${newId}` +\n (track\n ? ` \"${track.name}\" lang:${track.lang} group:${track.groupId}`\n : ''),\n );\n const { id, groupId = '', name, type, url } = track;\n this.hls.trigger(Events.SUBTITLE_TRACK_SWITCH, {\n id,\n groupId,\n name,\n type,\n url,\n });\n const hlsUrlParameters = this.switchParams(\n track.url,\n lastTrack?.details,\n track.details,\n );\n this.loadPlaylist(hlsUrlParameters);\n }\n\n private onTextTracksChanged = () => {\n if (!this.useTextTrackPolling) {\n self.clearInterval(this.subtitlePollingInterval);\n }\n // Media is undefined when switching streams via loadSource()\n if (!this.media || !this.hls.config.renderTextTracksNatively) {\n return;\n }\n\n let textTrack: TextTrack | null = null;\n const tracks = filterSubtitleTracks(this.media.textTracks);\n for (let i = 0; i < tracks.length; i++) {\n if (tracks[i].mode === 'hidden') {\n // Do not break in case there is a following track with showing.\n textTrack = tracks[i];\n } else if (tracks[i].mode === 'showing') {\n textTrack = tracks[i];\n break;\n }\n }\n\n // Find internal track index for TextTrack\n const trackId = this.findTrackForTextTrack(textTrack);\n if (this.subtitleTrack !== trackId) {\n this.setSubtitleTrack(trackId);\n }\n };\n}\n\nexport default SubtitleTrackController;\n", "import { logger } from '../utils/logger';\nimport type {\n BufferOperation,\n BufferOperationQueues,\n SourceBuffers,\n SourceBufferName,\n} from '../types/buffer';\n\nexport default class BufferOperationQueue {\n private buffers: SourceBuffers;\n private queues: BufferOperationQueues = {\n video: [],\n audio: [],\n audiovideo: [],\n };\n\n constructor(sourceBufferReference: SourceBuffers) {\n this.buffers = sourceBufferReference;\n }\n\n public append(\n operation: BufferOperation,\n type: SourceBufferName,\n pending?: boolean,\n ) {\n const queue = this.queues[type];\n queue.push(operation);\n if (queue.length === 1 && !pending) {\n this.executeNext(type);\n }\n }\n\n public insertAbort(operation: BufferOperation, type: SourceBufferName) {\n const queue = this.queues[type];\n queue.unshift(operation);\n this.executeNext(type);\n }\n\n public appendBlocker(type: SourceBufferName): Promise<{}> {\n let execute;\n const promise: Promise<{}> = new Promise((resolve) => {\n execute = resolve;\n });\n const operation: BufferOperation = {\n execute,\n onStart: () => {},\n onComplete: () => {},\n onError: () => {},\n };\n\n this.append(operation, type);\n return promise;\n }\n\n public executeNext(type: SourceBufferName) {\n const queue = this.queues[type];\n if (queue.length) {\n const operation: BufferOperation = queue[0];\n try {\n // Operations are expected to result in an 'updateend' event being fired. If not, the queue will lock. Operations\n // which do not end with this event must call _onSBUpdateEnd manually\n operation.execute();\n } catch (error) {\n logger.warn(\n `[buffer-operation-queue]: Exception executing \"${type}\" SourceBuffer operation: ${error}`,\n );\n operation.onError(error);\n\n // Only shift the current operation off, otherwise the updateend handler will do this for us\n const sb = this.buffers[type];\n if (!sb?.updating) {\n this.shiftAndExecuteNext(type);\n }\n }\n }\n }\n\n public shiftAndExecuteNext(type: SourceBufferName) {\n this.queues[type].shift();\n this.executeNext(type);\n }\n\n public current(type: SourceBufferName) {\n return this.queues[type][0];\n }\n}\n", "import { Events } from '../events';\nimport { logger } from '../utils/logger';\nimport { ErrorDetails, ErrorTypes } from '../errors';\nimport { BufferHelper } from '../utils/buffer-helper';\nimport {\n getCodecCompatibleName,\n pickMostCompleteCodecName,\n} from '../utils/codecs';\nimport {\n getMediaSource,\n isManagedMediaSource,\n} from '../utils/mediasource-helper';\nimport { ElementaryStreamTypes } from '../loader/fragment';\nimport type { TrackSet } from '../types/track';\nimport BufferOperationQueue from './buffer-operation-queue';\nimport {\n BufferOperation,\n SourceBuffers,\n SourceBufferName,\n SourceBufferListeners,\n} from '../types/buffer';\nimport type {\n LevelUpdatedData,\n BufferAppendingData,\n MediaAttachingData,\n ManifestParsedData,\n BufferCodecsData,\n BufferEOSData,\n BufferFlushingData,\n FragParsedData,\n FragChangedData,\n ErrorData,\n} from '../types/events';\nimport type { ComponentAPI } from '../types/component-api';\nimport type { ChunkMetadata } from '../types/transmuxer';\nimport type Hls from '../hls';\nimport type { LevelDetails } from '../loader/level-details';\nimport type { HlsConfig } from '../config';\n\nconst VIDEO_CODEC_PROFILE_REPLACE =\n /(avc[1234]|hvc1|hev1|dvh[1e]|vp09|av01)(?:\\.[^.,]+)+/;\n\ninterface BufferedChangeEvent extends Event {\n readonly addedRanges?: TimeRanges;\n readonly removedRanges?: TimeRanges;\n}\n\nexport default class BufferController implements ComponentAPI {\n // The level details used to determine duration, target-duration and live\n private details: LevelDetails | null = null;\n // cache the self generated object url to detect hijack of video tag\n private _objectUrl: string | null = null;\n // A queue of buffer operations which require the SourceBuffer to not be updating upon execution\n private operationQueue!: BufferOperationQueue;\n // References to event listeners for each SourceBuffer, so that they can be referenced for event removal\n private listeners!: SourceBufferListeners;\n\n private hls: Hls;\n\n // The number of BUFFER_CODEC events received before any sourceBuffers are created\n public bufferCodecEventsExpected: number = 0;\n\n // The total number of BUFFER_CODEC events received\n private _bufferCodecEventsTotal: number = 0;\n\n // A reference to the attached media element\n public media: HTMLMediaElement | null = null;\n\n // A reference to the active media source\n public mediaSource: MediaSource | null = null;\n\n // Last MP3 audio chunk appended\n private lastMpegAudioChunk: ChunkMetadata | null = null;\n\n private appendSource: boolean;\n\n // counters\n public appendErrors = {\n audio: 0,\n video: 0,\n audiovideo: 0,\n };\n\n public tracks: TrackSet = {};\n public pendingTracks: TrackSet = {};\n public sourceBuffer!: SourceBuffers;\n\n protected log: (msg: any) => void;\n protected warn: (msg: any, obj?: any) => void;\n protected error: (msg: any, obj?: any) => void;\n\n constructor(hls: Hls) {\n this.hls = hls;\n const logPrefix = '[buffer-controller]';\n this.appendSource = isManagedMediaSource(\n getMediaSource(hls.config.preferManagedMediaSource),\n );\n this.log = logger.log.bind(logger, logPrefix);\n this.warn = logger.warn.bind(logger, logPrefix);\n this.error = logger.error.bind(logger, logPrefix);\n this._initSourceBuffer();\n this.registerListeners();\n }\n\n public hasSourceTypes(): boolean {\n return (\n this.getSourceBufferTypes().length > 0 ||\n Object.keys(this.pendingTracks).length > 0\n );\n }\n\n public destroy() {\n this.unregisterListeners();\n this.details = null;\n this.lastMpegAudioChunk = null;\n // @ts-ignore\n this.hls = null;\n }\n\n protected registerListeners() {\n const { hls } = this;\n hls.on(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);\n hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.on(Events.MANIFEST_PARSED, this.onManifestParsed, this);\n hls.on(Events.BUFFER_RESET, this.onBufferReset, this);\n hls.on(Events.BUFFER_APPENDING, this.onBufferAppending, this);\n hls.on(Events.BUFFER_CODECS, this.onBufferCodecs, this);\n hls.on(Events.BUFFER_EOS, this.onBufferEos, this);\n hls.on(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);\n hls.on(Events.LEVEL_UPDATED, this.onLevelUpdated, this);\n hls.on(Events.FRAG_PARSED, this.onFragParsed, this);\n hls.on(Events.FRAG_CHANGED, this.onFragChanged, this);\n }\n\n protected unregisterListeners() {\n const { hls } = this;\n hls.off(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);\n hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);\n hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);\n hls.off(Events.MANIFEST_PARSED, this.onManifestParsed, this);\n hls.off(Events.BUFFER_RESET, this.onBufferReset, this);\n hls.off(Events.BUFFER_APPENDING, this.onBufferAppending, this);\n hls.off(Events.BUFFER_CODECS, this.onBufferCodecs, this);\n hls.off(Events.BUFFER_EOS, this.onBufferEos, this);\n hls.off(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);\n hls.off(Events.LEVEL_UPDATED, this.onLevelUpdated, this);\n hls.off(Events.FRAG_PARSED, this.onFragParsed, this);\n hls.off(Events.FRAG_CHANGED, this.onFragChanged, this);\n }\n\n private _initSourceBuffer() {\n this.sourceBuffer = {};\n this.operationQueue = new BufferOperationQueue(this.sourceBuffer);\n this.listeners = {\n audio: [],\n video: [],\n audiovideo: [],\n };\n this.appendErrors = {\n audio: 0,\n video: 0,\n audiovideo: 0,\n };\n this.lastMpegAudioChunk = null;\n }\n\n private onManifestLoading() {\n this.bufferCodecEventsExpected = this._bufferCodecEventsTotal = 0;\n this.details = null;\n }\n\n protected onManifestParsed(\n event: Events.MANIFEST_PARSED,\n data: ManifestParsedData,\n ) {\n // in case of alt audio 2 BUFFER_CODECS events will be triggered, one per stream controller\n // sourcebuffers will be created all at once when the expected nb of tracks will be reached\n // in case alt audio is not used, only one BUFFER_CODEC event will be fired from main stream controller\n // it will contain the expected nb of source buffers, no need to compute it\n let codecEvents: number = 2;\n if ((data.audio && !data.video) || !data.altAudio || !__USE_ALT_AUDIO__) {\n codecEvents = 1;\n }\n this.bufferCodecEventsExpected = this._bufferCodecEventsTotal = codecEvents;\n this.log(`${this.bufferCodecEventsExpected} bufferCodec event(s) expected`);\n }\n\n protected onMediaAttaching(\n event: Events.MEDIA_ATTACHING,\n data: MediaAttachingData,\n ) {\n const media = (this.media = data.media);\n const MediaSource = getMediaSource(this.appendSource);\n\n if (media && MediaSource) {\n const ms = (this.mediaSource = new MediaSource());\n this.log(`created media source: ${ms.constructor?.name}`);\n // MediaSource listeners are arrow functions with a lexical scope, and do not need to be bound\n ms.addEventListener('sourceopen', this._onMediaSourceOpen);\n ms.addEventListener('sourceended', this._onMediaSourceEnded);\n ms.addEventListener('sourceclose', this._onMediaSourceClose);\n if (this.appendSource) {\n ms.addEventListener('startstreaming', this._onStartStreaming);\n ms.addEventListener('endstreaming', this._onEndStreaming);\n }\n\n // cache the locally generated object url\n const objectUrl = (this._objectUrl = self.URL.createObjectURL(ms));\n // link video and media Source\n if (this.appendSource) {\n try {\n media.removeAttribute('src');\n // ManagedMediaSource will not open without disableRemotePlayback set to false or source alternatives\n const MMS = (self as any).ManagedMediaSource;\n media.disableRemotePlayback =\n media.disableRemotePlayback || (MMS && ms instanceof MMS);\n removeSourceChildren(media);\n addSource(media, objectUrl);\n media.load();\n } catch (error) {\n media.src = objectUrl;\n }\n } else {\n media.src = objectUrl;\n }\n media.addEventListener('emptied', this._onMediaEmptied);\n }\n }\n private _onEndStreaming = (event) => {\n if (!this.hls) {\n return;\n }\n this.hls.pauseBuffering();\n };\n private _onStartStreaming = (event) => {\n if (!this.hls) {\n return;\n }\n this.hls.resumeBuffering();\n };\n\n protected onMediaDetaching() {\n const { media, mediaSource, _objectUrl } = this;\n if (mediaSource) {\n this.log('media source detaching');\n if (mediaSource.readyState === 'open') {\n try {\n // endOfStream could trigger exception if any sourcebuffer is in updating state\n // we don't really care about checking sourcebuffer state here,\n // as we are anyway detaching the MediaSource\n // let's just avoid this exception to propagate\n mediaSource.endOfStream();\n } catch (err) {\n this.warn(\n `onMediaDetaching: ${err.message} while calling endOfStream`,\n );\n }\n }\n // Clean up the SourceBuffers by invoking onBufferReset\n this.onBufferReset();\n mediaSource.removeEventListener('sourceopen', this._onMediaSourceOpen);\n mediaSource.removeEventListener('sourceended', this._onMediaSourceEnded);\n mediaSource.removeEventListener('sourceclose', this._onMediaSourceClose);\n if (this.appendSource) {\n mediaSource.removeEventListener(\n 'startstreaming',\n this._onStartStreaming,\n );\n mediaSource.removeEventListener('endstreaming', this._onEndStreaming);\n }\n\n // Detach properly the MediaSource from the HTMLMediaElement as\n // suggested in https://github.com/w3c/media-source/issues/53.\n if (media) {\n media.removeEventListener('emptied', this._onMediaEmptied);\n if (_objectUrl) {\n self.URL.revokeObjectURL(_objectUrl);\n }\n\n // clean up video tag src only if it's our own url. some external libraries might\n // hijack the video tag and change its 'src' without destroying the Hls instance first\n if (this.mediaSrc === _objectUrl) {\n media.removeAttribute('src');\n if (this.appendSource) {\n removeSourceChildren(media);\n }\n media.load();\n } else {\n this.warn(\n 'media|source.src was changed by a third party - skip cleanup',\n );\n }\n }\n\n this.mediaSource = null;\n this.media = null;\n this._objectUrl = null;\n this.bufferCodecEventsExpected = this._bufferCodecEventsTotal;\n this.pendingTracks = {};\n this.tracks = {};\n }\n\n this.hls.trigger(Events.MEDIA_DETACHED, undefined);\n }\n\n protected onBufferReset() {\n this.getSourceBufferTypes().forEach((type) => {\n this.resetBuffer(type);\n });\n this._initSourceBuffer();\n this.hls.resumeBuffering();\n }\n\n private resetBuffer(type: SourceBufferName) {\n const sb = this.sourceBuffer[type];\n try {\n if (sb) {\n this.removeBufferListeners(type);\n // Synchronously remove the SB from the map before the next call in order to prevent an async function from\n // accessing it\n this.sourceBuffer[type] = undefined;\n if (this.mediaSource?.sourceBuffers.length) {\n this.mediaSource.removeSourceBuffer(sb);\n }\n }\n } catch (err) {\n this.warn(`onBufferReset ${type}`, err);\n }\n }\n\n protected onBufferCodecs(\n event: Events.BUFFER_CODECS,\n data: BufferCodecsData,\n ) {\n const sourceBufferCount = this.getSourceBufferTypes().length;\n const trackNames = Object.keys(data);\n trackNames.forEach((trackName) => {\n if (sourceBufferCount) {\n // check if SourceBuffer codec needs to change\n const track = this.tracks[trackName];\n if (track && typeof track.buffer.changeType === 'function') {\n const { id, codec, levelCodec, container, metadata } =\n data[trackName];\n const currentCodecFull = pickMostCompleteCodecName(\n track.codec,\n track.levelCodec,\n );\n const currentCodec = currentCodecFull?.replace(\n VIDEO_CODEC_PROFILE_REPLACE,\n '$1',\n );\n let trackCodec = pickMostCompleteCodecName(codec, levelCodec);\n const nextCodec = trackCodec?.replace(\n VIDEO_CODEC_PROFILE_REPLACE,\n '$1',\n );\n if (trackCodec && currentCodec !== nextCodec) {\n if (trackName.slice(0, 5) === 'audio') {\n trackCodec = getCodecCompatibleName(\n trackCodec,\n this.appendSource,\n );\n }\n const mimeType = `${container};codecs=${trackCodec}`;\n this.appendChangeType(trackName, mimeType);\n this.log(`switching codec ${currentCodecFull} to ${trackCodec}`);\n this.tracks[trackName] = {\n buffer: track.buffer,\n codec,\n container,\n levelCodec,\n metadata,\n id,\n };\n }\n }\n } else {\n // if source buffer(s) not created yet, appended buffer tracks in this.pendingTracks\n this.pendingTracks[trackName] = data[trackName];\n }\n });\n\n // if sourcebuffers already created, do nothing ...\n if (sourceBufferCount) {\n return;\n }\n\n const bufferCodecEventsExpected = Math.max(\n this.bufferCodecEventsExpected - 1,\n 0,\n );\n if (this.bufferCodecEventsExpected !== bufferCodecEventsExpected) {\n this.log(\n `${bufferCodecEventsExpected} bufferCodec event(s) expected ${trackNames.join(\n ',',\n )}`,\n );\n this.bufferCodecEventsExpected = bufferCodecEventsExpected;\n }\n if (this.mediaSource && this.mediaSource.readyState === 'open') {\n this.checkPendingTracks();\n }\n }\n\n protected appendChangeType(type, mimeType) {\n const { operationQueue } = this;\n const operation: BufferOperation = {\n execute: () => {\n const sb = this.sourceBuffer[type];\n if (sb) {\n this.log(`changing ${type} sourceBuffer type to ${mimeType}`);\n sb.changeType(mimeType);\n }\n operationQueue.shiftAndExecuteNext(type);\n },\n onStart: () => {},\n onComplete: () => {},\n onError: (error: Error) => {\n this.warn(`Failed to change ${type} SourceBuffer type`, error);\n },\n };\n\n operationQueue.append(operation, type, !!this.pendingTracks[type]);\n }\n\n protected onBufferAppending(\n event: Events.BUFFER_APPENDING,\n eventData: BufferAppendingData,\n ) {\n const { hls, operationQueue, tracks } = this;\n const { data, type, frag, part, chunkMeta } = eventData;\n const chunkStats = chunkMeta.buffering[type];\n\n const bufferAppendingStart = self.performance.now();\n chunkStats.start = bufferAppendingStart;\n const fragBuffering = frag.stats.buffering;\n const partBuffering = part ? part.stats.buffering : null;\n if (fragBuffering.start === 0) {\n fragBuffering.start = bufferAppendingStart;\n }\n if (partBuffering && partBuffering.start === 0) {\n partBuffering.start = bufferAppendingStart;\n }\n\n // TODO: Only update timestampOffset when audio/mpeg fragment or part is not contiguous with previously appended\n // Adjusting `SourceBuffer.timestampOffset` (desired point in the timeline where the next frames should be appended)\n // in Chrome browser when we detect MPEG audio container and time delta between level PTS and `SourceBuffer.timestampOffset`\n // is greater than 100ms (this is enough to handle seek for VOD or level change for LIVE videos).\n // More info here: https://github.com/video-dev/hls.js/issues/332#issuecomment-257986486\n const audioTrack = tracks.audio;\n let checkTimestampOffset = false;\n if (type === 'audio' && audioTrack?.container === 'audio/mpeg') {\n checkTimestampOffset =\n !this.lastMpegAudioChunk ||\n chunkMeta.id === 1 ||\n this.lastMpegAudioChunk.sn !== chunkMeta.sn;\n this.lastMpegAudioChunk = chunkMeta;\n }\n\n const fragStart = frag.start;\n const operation: BufferOperation = {\n execute: () => {\n chunkStats.executeStart = self.performance.now();\n if (checkTimestampOffset) {\n const sb = this.sourceBuffer[type];\n if (sb) {\n const delta = fragStart - sb.timestampOffset;\n if (Math.abs(delta) >= 0.1) {\n this.log(\n `Updating audio SourceBuffer timestampOffset to ${fragStart} (delta: ${delta}) sn: ${frag.sn})`,\n );\n sb.timestampOffset = fragStart;\n }\n }\n }\n this.appendExecutor(data, type);\n },\n onStart: () => {\n // logger.debug(`[buffer-controller]: ${type} SourceBuffer updatestart`);\n },\n onComplete: () => {\n // logger.debug(`[buffer-controller]: ${type} SourceBuffer updateend`);\n const end = self.performance.now();\n chunkStats.executeEnd = chunkStats.end = end;\n if (fragBuffering.first === 0) {\n fragBuffering.first = end;\n }\n if (partBuffering && partBuffering.first === 0) {\n partBuffering.first = end;\n }\n\n const { sourceBuffer } = this;\n const timeRanges = {};\n for (const type in sourceBuffer) {\n timeRanges[type] = BufferHelper.getBuffered(sourceBuffer[type]);\n }\n this.appendErrors[type] = 0;\n if (type === 'audio' || type === 'video') {\n this.appendErrors.audiovideo = 0;\n } else {\n this.appendErrors.audio = 0;\n this.appendErrors.video = 0;\n }\n this.hls.trigger(Events.BUFFER_APPENDED, {\n type,\n frag,\n part,\n chunkMeta,\n parent: frag.type,\n timeRanges,\n });\n },\n onError: (error: Error) => {\n // in case any error occured while appending, put back segment in segments table\n const event: ErrorData = {\n type: ErrorTypes.MEDIA_ERROR,\n parent: frag.type,\n details: ErrorDetails.BUFFER_APPEND_ERROR,\n sourceBufferName: type,\n frag,\n part,\n chunkMeta,\n error,\n err: error,\n fatal: false,\n };\n\n if ((error as DOMException).code === DOMException.QUOTA_EXCEEDED_ERR) {\n // QuotaExceededError: http://www.w3.org/TR/html5/infrastructure.html#quotaexceedederror\n // let's stop appending any segments, and report BUFFER_FULL_ERROR error\n event.details = ErrorDetails.BUFFER_FULL_ERROR;\n } else {\n const appendErrorCount = ++this.appendErrors[type];\n event.details = ErrorDetails.BUFFER_APPEND_ERROR;\n /* with UHD content, we could get loop of quota exceeded error until\n browser is able to evict some data from sourcebuffer. Retrying can help recover.\n */\n this.warn(\n `Failed ${appendErrorCount}/${hls.config.appendErrorMaxRetry} times to append segment in \"${type}\" sourceBuffer`,\n );\n if (appendErrorCount >= hls.config.appendErrorMaxRetry) {\n event.fatal = true;\n }\n }\n hls.trigger(Events.ERROR, event);\n },\n };\n operationQueue.append(operation, type, !!this.pendingTracks[type]);\n }\n\n protected onBufferFlushing(\n event: Events.BUFFER_FLUSHING,\n data: BufferFlushingData,\n ) {\n const { operationQueue } = this;\n const flushOperation = (type: SourceBufferName): BufferOperation => ({\n execute: this.removeExecutor.bind(\n this,\n type,\n data.startOffset,\n data.endOffset,\n ),\n onStart: () => {\n // logger.debug(`[buffer-controller]: Started flushing ${data.startOffset} -> ${data.endOffset} for ${type} Source Buffer`);\n },\n onComplete: () => {\n // logger.debug(`[buffer-controller]: Finished flushing ${data.startOffset} -> ${data.endOffset} for ${type} Source Buffer`);\n this.hls.trigger(Events.BUFFER_FLUSHED, { type });\n },\n onError: (error: Error) => {\n this.warn(`Failed to remove from ${type} SourceBuffer`, error);\n },\n });\n\n if (data.type) {\n operationQueue.append(flushOperation(data.type), data.type);\n } else {\n this.getSourceBufferTypes().forEach((type: SourceBufferName) => {\n operationQueue.append(flushOperation(type), type);\n });\n }\n }\n\n protected onFragParsed(event: Events.FRAG_PARSED, data: FragParsedData) {\n const { frag, part } = data;\n const buffersAppendedTo: Array = [];\n const elementaryStreams = part\n ? part.elementaryStreams\n : frag.elementaryStreams;\n if (elementaryStreams[ElementaryStreamTypes.AUDIOVIDEO]) {\n buffersAppendedTo.push('audiovideo');\n } else {\n if (elementaryStreams[ElementaryStreamTypes.AUDIO]) {\n buffersAppendedTo.push('audio');\n }\n if (elementaryStreams[ElementaryStreamTypes.VIDEO]) {\n buffersAppendedTo.push('video');\n }\n }\n\n const onUnblocked = () => {\n const now = self.performance.now();\n frag.stats.buffering.end = now;\n if (part) {\n part.stats.buffering.end = now;\n }\n const stats = part ? part.stats : frag.stats;\n this.hls.trigger(Events.FRAG_BUFFERED, {\n frag,\n part,\n stats,\n id: frag.type,\n });\n };\n\n if (buffersAppendedTo.length === 0) {\n this.warn(\n `Fragments must have at least one ElementaryStreamType set. type: ${frag.type} level: ${frag.level} sn: ${frag.sn}`,\n );\n }\n\n this.blockBuffers(onUnblocked, buffersAppendedTo);\n }\n\n private onFragChanged(event: Events.FRAG_CHANGED, data: FragChangedData) {\n this.trimBuffers();\n }\n\n // on BUFFER_EOS mark matching sourcebuffer(s) as ended and trigger checkEos()\n // an undefined data.type will mark all buffers as EOS.\n protected onBufferEos(event: Events.BUFFER_EOS, data: BufferEOSData) {\n const ended = this.getSourceBufferTypes().reduce((acc, type) => {\n const sb = this.sourceBuffer[type];\n if (sb && (!data.type || data.type === type)) {\n sb.ending = true;\n if (!sb.ended) {\n sb.ended = true;\n this.log(`${type} sourceBuffer now EOS`);\n }\n }\n return acc && !!(!sb || sb.ended);\n }, true);\n\n if (ended) {\n this.log(`Queueing mediaSource.endOfStream()`);\n this.blockBuffers(() => {\n this.getSourceBufferTypes().forEach((type) => {\n const sb = this.sourceBuffer[type];\n if (sb) {\n sb.ending = false;\n }\n });\n const { mediaSource } = this;\n if (!mediaSource || mediaSource.readyState !== 'open') {\n if (mediaSource) {\n this.log(\n `Could not call mediaSource.endOfStream(). mediaSource.readyState: ${mediaSource.readyState}`,\n );\n }\n return;\n }\n this.log(`Calling mediaSource.endOfStream()`);\n // Allow this to throw and be caught by the enqueueing function\n mediaSource.endOfStream();\n });\n }\n }\n\n protected onLevelUpdated(\n event: Events.LEVEL_UPDATED,\n { details }: LevelUpdatedData,\n ) {\n if (!details.fragments.length) {\n return;\n }\n this.details = details;\n\n if (this.getSourceBufferTypes().length) {\n this.blockBuffers(this.updateMediaElementDuration.bind(this));\n } else {\n this.updateMediaElementDuration();\n }\n }\n\n trimBuffers() {\n const { hls, details, media } = this;\n if (!media || details === null) {\n return;\n }\n\n const sourceBufferTypes = this.getSourceBufferTypes();\n if (!sourceBufferTypes.length) {\n return;\n }\n\n const config: Readonly = hls.config;\n const currentTime = media.currentTime;\n const targetDuration = details.levelTargetDuration;\n\n // Support for deprecated liveBackBufferLength\n const backBufferLength =\n details.live && config.liveBackBufferLength !== null\n ? config.liveBackBufferLength\n : config.backBufferLength;\n\n if (Number.isFinite(backBufferLength) && backBufferLength > 0) {\n const maxBackBufferLength = Math.max(backBufferLength, targetDuration);\n const targetBackBufferPosition =\n Math.floor(currentTime / targetDuration) * targetDuration -\n maxBackBufferLength;\n\n this.flushBackBuffer(\n currentTime,\n targetDuration,\n targetBackBufferPosition,\n );\n }\n\n if (\n Number.isFinite(config.frontBufferFlushThreshold) &&\n config.frontBufferFlushThreshold > 0\n ) {\n const frontBufferLength = Math.max(\n config.maxBufferLength,\n config.frontBufferFlushThreshold,\n );\n\n const maxFrontBufferLength = Math.max(frontBufferLength, targetDuration);\n const targetFrontBufferPosition =\n Math.floor(currentTime / targetDuration) * targetDuration +\n maxFrontBufferLength;\n\n this.flushFrontBuffer(\n currentTime,\n targetDuration,\n targetFrontBufferPosition,\n );\n }\n }\n\n flushBackBuffer(\n currentTime: number,\n targetDuration: number,\n targetBackBufferPosition: number,\n ) {\n const { details, sourceBuffer } = this;\n const sourceBufferTypes = this.getSourceBufferTypes();\n\n sourceBufferTypes.forEach((type: SourceBufferName) => {\n const sb = sourceBuffer[type];\n if (sb) {\n const buffered = BufferHelper.getBuffered(sb);\n // when target buffer start exceeds actual buffer start\n if (\n buffered.length > 0 &&\n targetBackBufferPosition > buffered.start(0)\n ) {\n this.hls.trigger(Events.BACK_BUFFER_REACHED, {\n bufferEnd: targetBackBufferPosition,\n });\n\n // Support for deprecated event:\n if (details?.live) {\n this.hls.trigger(Events.LIVE_BACK_BUFFER_REACHED, {\n bufferEnd: targetBackBufferPosition,\n });\n } else if (\n sb.ended &&\n buffered.end(buffered.length - 1) - currentTime < targetDuration * 2\n ) {\n this.log(\n `Cannot flush ${type} back buffer while SourceBuffer is in ended state`,\n );\n return;\n }\n\n this.hls.trigger(Events.BUFFER_FLUSHING, {\n startOffset: 0,\n endOffset: targetBackBufferPosition,\n type,\n });\n }\n }\n });\n }\n\n flushFrontBuffer(\n currentTime: number,\n targetDuration: number,\n targetFrontBufferPosition: number,\n ) {\n const { sourceBuffer } = this;\n const sourceBufferTypes = this.getSourceBufferTypes();\n\n sourceBufferTypes.forEach((type: SourceBufferName) => {\n const sb = sourceBuffer[type];\n if (sb) {\n const buffered = BufferHelper.getBuffered(sb);\n const numBufferedRanges = buffered.length;\n // The buffer is either empty or contiguous\n if (numBufferedRanges < 2) {\n return;\n }\n const bufferStart = buffered.start(numBufferedRanges - 1);\n const bufferEnd = buffered.end(numBufferedRanges - 1);\n // No flush if we can tolerate the current buffer length or the current buffer range we would flush is contiguous with current position\n if (\n targetFrontBufferPosition > bufferStart ||\n (currentTime >= bufferStart && currentTime <= bufferEnd)\n ) {\n return;\n } else if (sb.ended && currentTime - bufferEnd < 2 * targetDuration) {\n this.log(\n `Cannot flush ${type} front buffer while SourceBuffer is in ended state`,\n );\n return;\n }\n\n this.hls.trigger(Events.BUFFER_FLUSHING, {\n startOffset: bufferStart,\n endOffset: Infinity,\n type,\n });\n }\n });\n }\n\n /**\n * Update Media Source duration to current level duration or override to Infinity if configuration parameter\n * 'liveDurationInfinity` is set to `true`\n * More details: https://github.com/video-dev/hls.js/issues/355\n */\n private updateMediaElementDuration() {\n if (\n !this.details ||\n !this.media ||\n !this.mediaSource ||\n this.mediaSource.readyState !== 'open'\n ) {\n return;\n }\n const { details, hls, media, mediaSource } = this;\n const levelDuration = details.fragments[0].start + details.totalduration;\n const mediaDuration = media.duration;\n const msDuration = Number.isFinite(mediaSource.duration)\n ? mediaSource.duration\n : 0;\n\n if (details.live && hls.config.liveDurationInfinity) {\n // Override duration to Infinity\n mediaSource.duration = Infinity;\n this.updateSeekableRange(details);\n } else if (\n (levelDuration > msDuration && levelDuration > mediaDuration) ||\n !Number.isFinite(mediaDuration)\n ) {\n // levelDuration was the last value we set.\n // not using mediaSource.duration as the browser may tweak this value\n // only update Media Source duration if its value increase, this is to avoid\n // flushing already buffered portion when switching between quality level\n this.log(`Updating Media Source duration to ${levelDuration.toFixed(3)}`);\n mediaSource.duration = levelDuration;\n }\n }\n\n updateSeekableRange(levelDetails) {\n const mediaSource = this.mediaSource;\n const fragments = levelDetails.fragments;\n const len = fragments.length;\n if (len && levelDetails.live && mediaSource?.setLiveSeekableRange) {\n const start = Math.max(0, fragments[0].start);\n const end = Math.max(start, start + levelDetails.totalduration);\n this.log(\n `Media Source duration is set to ${mediaSource.duration}. Setting seekable range to ${start}-${end}.`,\n );\n mediaSource.setLiveSeekableRange(start, end);\n }\n }\n\n protected checkPendingTracks() {\n const { bufferCodecEventsExpected, operationQueue, pendingTracks } = this;\n\n // Check if we've received all of the expected bufferCodec events. When none remain, create all the sourceBuffers at once.\n // This is important because the MSE spec allows implementations to throw QuotaExceededErrors if creating new sourceBuffers after\n // data has been appended to existing ones.\n // 2 tracks is the max (one for audio, one for video). If we've reach this max go ahead and create the buffers.\n const pendingTracksCount = Object.keys(pendingTracks).length;\n if (\n pendingTracksCount &&\n (!bufferCodecEventsExpected ||\n pendingTracksCount === 2 ||\n 'audiovideo' in pendingTracks)\n ) {\n // ok, let's create them now !\n this.createSourceBuffers(pendingTracks);\n this.pendingTracks = {};\n // append any pending segments now !\n const buffers = this.getSourceBufferTypes();\n if (buffers.length) {\n this.hls.trigger(Events.BUFFER_CREATED, { tracks: this.tracks });\n buffers.forEach((type: SourceBufferName) => {\n operationQueue.executeNext(type);\n });\n } else {\n const error = new Error(\n 'could not create source buffer for media codec(s)',\n );\n this.hls.trigger(Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.BUFFER_INCOMPATIBLE_CODECS_ERROR,\n fatal: true,\n error,\n reason: error.message,\n });\n }\n }\n }\n\n protected createSourceBuffers(tracks: TrackSet) {\n const { sourceBuffer, mediaSource } = this;\n if (!mediaSource) {\n throw Error('createSourceBuffers called when mediaSource was null');\n }\n for (const trackName in tracks) {\n if (!sourceBuffer[trackName]) {\n const track = tracks[trackName as keyof TrackSet];\n if (!track) {\n throw Error(\n `source buffer exists for track ${trackName}, however track does not`,\n );\n }\n // use levelCodec as first priority unless it contains multiple comma-separated codec values\n let codec =\n track.levelCodec?.indexOf(',') === -1\n ? track.levelCodec\n : track.codec;\n if (codec) {\n if (trackName.slice(0, 5) === 'audio') {\n codec = getCodecCompatibleName(codec, this.appendSource);\n }\n }\n const mimeType = `${track.container};codecs=${codec}`;\n this.log(`creating sourceBuffer(${mimeType})`);\n try {\n const sb = (sourceBuffer[trackName] =\n mediaSource.addSourceBuffer(mimeType));\n const sbName = trackName as SourceBufferName;\n this.addBufferListener(sbName, 'updatestart', this._onSBUpdateStart);\n this.addBufferListener(sbName, 'updateend', this._onSBUpdateEnd);\n this.addBufferListener(sbName, 'error', this._onSBUpdateError);\n // ManagedSourceBuffer bufferedchange event\n if (this.appendSource) {\n this.addBufferListener(\n sbName,\n 'bufferedchange',\n (type: SourceBufferName, event: BufferedChangeEvent) => {\n // If media was ejected check for a change. Added ranges are redundant with changes on 'updateend' event.\n const removedRanges = event.removedRanges;\n if (removedRanges?.length) {\n this.hls.trigger(Events.BUFFER_FLUSHED, {\n type: trackName as SourceBufferName,\n });\n }\n },\n );\n }\n\n this.tracks[trackName] = {\n buffer: sb,\n codec: codec,\n container: track.container,\n levelCodec: track.levelCodec,\n metadata: track.metadata,\n id: track.id,\n };\n } catch (err) {\n this.error(`error while trying to add sourceBuffer: ${err.message}`);\n this.hls.trigger(Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.BUFFER_ADD_CODEC_ERROR,\n fatal: false,\n error: err,\n sourceBufferName: trackName as SourceBufferName,\n mimeType: mimeType,\n });\n }\n }\n }\n }\n\n // Keep as arrow functions so that we can directly reference these functions directly as event listeners\n private _onMediaSourceOpen = () => {\n const { media, mediaSource } = this;\n this.log('Media source opened');\n if (media) {\n media.removeEventListener('emptied', this._onMediaEmptied);\n this.updateMediaElementDuration();\n this.hls.trigger(Events.MEDIA_ATTACHED, {\n media,\n mediaSource: mediaSource as MediaSource,\n });\n }\n\n if (mediaSource) {\n // once received, don't listen anymore to sourceopen event\n mediaSource.removeEventListener('sourceopen', this._onMediaSourceOpen);\n }\n this.checkPendingTracks();\n };\n\n private _onMediaSourceClose = () => {\n this.log('Media source closed');\n };\n\n private _onMediaSourceEnded = () => {\n this.log('Media source ended');\n };\n\n private _onMediaEmptied = () => {\n const { mediaSrc, _objectUrl } = this;\n if (mediaSrc !== _objectUrl) {\n logger.error(\n `Media element src was set while attaching MediaSource (${_objectUrl} > ${mediaSrc})`,\n );\n }\n };\n\n private get mediaSrc(): string | undefined {\n const media = this.media?.querySelector?.('source') || this.media;\n return media?.src;\n }\n\n private _onSBUpdateStart(type: SourceBufferName) {\n const { operationQueue } = this;\n const operation = operationQueue.current(type);\n operation.onStart();\n }\n\n private _onSBUpdateEnd(type: SourceBufferName) {\n if (this.mediaSource?.readyState === 'closed') {\n this.resetBuffer(type);\n return;\n }\n const { operationQueue } = this;\n const operation = operationQueue.current(type);\n operation.onComplete();\n operationQueue.shiftAndExecuteNext(type);\n }\n\n private _onSBUpdateError(type: SourceBufferName, event: Event) {\n const error = new Error(\n `${type} SourceBuffer error. MediaSource readyState: ${this.mediaSource?.readyState}`,\n );\n this.error(`${error}`, event);\n // according to http://www.w3.org/TR/media-source/#sourcebuffer-append-error\n // SourceBuffer errors are not necessarily fatal; if so, the HTMLMediaElement will fire an error event\n this.hls.trigger(Events.ERROR, {\n type: ErrorTypes.MEDIA_ERROR,\n details: ErrorDetails.BUFFER_APPENDING_ERROR,\n sourceBufferName: type,\n error,\n fatal: false,\n });\n // updateend is always fired after error, so we'll allow that to shift the current operation off of the queue\n const operation = this.operationQueue.current(type);\n if (operation) {\n operation.onError(error);\n }\n }\n\n // This method must result in an updateend event; if remove is not called, _onSBUpdateEnd must be called manually\n private removeExecutor(\n type: SourceBufferName,\n startOffset: number,\n endOffset: number,\n ) {\n const { media, mediaSource, operationQueue, sourceBuffer } = this;\n const sb = sourceBuffer[type];\n if (!media || !mediaSource || !sb) {\n this.warn(\n `Attempting to remove from the ${type} SourceBuffer, but it does not exist`,\n );\n operationQueue.shiftAndExecuteNext(type);\n return;\n }\n const mediaDuration = Number.isFinite(media.duration)\n ? media.duration\n : Infinity;\n const msDuration = Number.isFinite(mediaSource.duration)\n ? mediaSource.duration\n : Infinity;\n const removeStart = Math.max(0, startOffset);\n const removeEnd = Math.min(endOffset, mediaDuration, msDuration);\n if (removeEnd > removeStart && (!sb.ending || sb.ended)) {\n sb.ended = false;\n this.log(\n `Removing [${removeStart},${removeEnd}] from the ${type} SourceBuffer`,\n );\n sb.remove(removeStart, removeEnd);\n } else {\n // Cycle the queue\n operationQueue.shiftAndExecuteNext(type);\n }\n }\n\n // This method must result in an updateend event; if append is not called, _onSBUpdateEnd must be called manually\n private appendExecutor(data: Uint8Array, type: SourceBufferName) {\n const sb = this.sourceBuffer[type];\n if (!sb) {\n if (!this.pendingTracks[type]) {\n throw new Error(\n `Attempting to append to the ${type} SourceBuffer, but it does not exist`,\n );\n }\n return;\n }\n\n sb.ended = false;\n sb.appendBuffer(data);\n }\n\n // Enqueues an operation to each SourceBuffer queue which, upon execution, resolves a promise. When all promises\n // resolve, the onUnblocked function is executed. Functions calling this method do not need to unblock the queue\n // upon completion, since we already do it here\n private blockBuffers(\n onUnblocked: () => void,\n buffers: Array = this.getSourceBufferTypes(),\n ) {\n if (!buffers.length) {\n this.log('Blocking operation requested, but no SourceBuffers exist');\n Promise.resolve().then(onUnblocked);\n return;\n }\n const { operationQueue } = this;\n\n // logger.debug(`[buffer-controller]: Blocking ${buffers} SourceBuffer`);\n const blockingOperations = buffers.map((type) =>\n operationQueue.appendBlocker(type as SourceBufferName),\n );\n Promise.all(blockingOperations).then(() => {\n // logger.debug(`[buffer-controller]: Blocking operation resolved; unblocking ${buffers} SourceBuffer`);\n onUnblocked();\n buffers.forEach((type) => {\n const sb = this.sourceBuffer[type];\n // Only cycle the queue if the SB is not updating. There's a bug in Chrome which sets the SB updating flag to\n // true when changing the MediaSource duration (https://bugs.chromium.org/p/chromium/issues/detail?id=959359&can=2&q=mediasource%20duration)\n // While this is a workaround, it's probably useful to have around\n if (!sb?.updating) {\n operationQueue.shiftAndExecuteNext(type);\n }\n });\n });\n }\n\n private getSourceBufferTypes(): Array {\n return Object.keys(this.sourceBuffer) as Array;\n }\n\n private addBufferListener(\n type: SourceBufferName,\n event: string,\n fn: Function,\n ) {\n const buffer = this.sourceBuffer[type];\n if (!buffer) {\n return;\n }\n const listener = fn.bind(this, type);\n this.listeners[type].push({ event, listener });\n buffer.addEventListener(event, listener);\n }\n\n private removeBufferListeners(type: SourceBufferName) {\n const buffer = this.sourceBuffer[type];\n if (!buffer) {\n return;\n }\n this.listeners[type].forEach((l) => {\n buffer.removeEventListener(l.event, l.listener);\n });\n }\n}\n\nfunction removeSourceChildren(node: HTMLElement) {\n const sourceChildren = node.querySelectorAll('source');\n [].slice.call(sourceChildren).forEach((source) => {\n node.removeChild(source);\n });\n}\n\nfunction addSource(media: HTMLMediaElement, url: string) {\n const source = self.document.createElement('source');\n source.type = 'video/mp4';\n source.src = url;\n media.appendChild(source);\n}\n", "import OutputFilter from './output-filter';\nimport { logger } from '../utils/logger';\n\n/**\n *\n * This code was ported from the dash.js project at:\n * https://github.com/Dash-Industry-Forum/dash.js/blob/development/externals/cea608-parser.js\n * https://github.com/Dash-Industry-Forum/dash.js/commit/8269b26a761e0853bb21d78780ed945144ecdd4d#diff-71bc295a2d6b6b7093a1d3290d53a4b2\n *\n * The original copyright appears below:\n *\n * The copyright in this software is being made available under the BSD License,\n * included below. This software may be subject to other third party and contributor\n * rights, including patent rights, and no such rights are granted under this license.\n *\n * Copyright (c) 2015-2016, DASH Industry Forum.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without modification,\n * are permitted provided that the following conditions are met:\n * 1. Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation and/or\n * other materials provided with the distribution.\n * 2. Neither the name of Dash Industry Forum nor the names of its\n * contributors may be used to endorse or promote products derived from this software\n * without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n */\n/**\n * Exceptions from regular ASCII. CodePoints are mapped to UTF-16 codes\n */\n\nconst specialCea608CharsCodes = {\n 0x2a: 0xe1, // lowercase a, acute accent\n 0x5c: 0xe9, // lowercase e, acute accent\n 0x5e: 0xed, // lowercase i, acute accent\n 0x5f: 0xf3, // lowercase o, acute accent\n 0x60: 0xfa, // lowercase u, acute accent\n 0x7b: 0xe7, // lowercase c with cedilla\n 0x7c: 0xf7, // division symbol\n 0x7d: 0xd1, // uppercase N tilde\n 0x7e: 0xf1, // lowercase n tilde\n 0x7f: 0x2588, // Full block\n // THIS BLOCK INCLUDES THE 16 EXTENDED (TWO-BYTE) LINE 21 CHARACTERS\n // THAT COME FROM HI BYTE=0x11 AND LOW BETWEEN 0x30 AND 0x3F\n // THIS MEANS THAT \\x50 MUST BE ADDED TO THE VALUES\n 0x80: 0xae, // Registered symbol (R)\n 0x81: 0xb0, // degree sign\n 0x82: 0xbd, // 1/2 symbol\n 0x83: 0xbf, // Inverted (open) question mark\n 0x84: 0x2122, // Trademark symbol (TM)\n 0x85: 0xa2, // Cents symbol\n 0x86: 0xa3, // Pounds sterling\n 0x87: 0x266a, // Music 8'th note\n 0x88: 0xe0, // lowercase a, grave accent\n 0x89: 0x20, // transparent space (regular)\n 0x8a: 0xe8, // lowercase e, grave accent\n 0x8b: 0xe2, // lowercase a, circumflex accent\n 0x8c: 0xea, // lowercase e, circumflex accent\n 0x8d: 0xee, // lowercase i, circumflex accent\n 0x8e: 0xf4, // lowercase o, circumflex accent\n 0x8f: 0xfb, // lowercase u, circumflex accent\n // THIS BLOCK INCLUDES THE 32 EXTENDED (TWO-BYTE) LINE 21 CHARACTERS\n // THAT COME FROM HI BYTE=0x12 AND LOW BETWEEN 0x20 AND 0x3F\n 0x90: 0xc1, // capital letter A with acute\n 0x91: 0xc9, // capital letter E with acute\n 0x92: 0xd3, // capital letter O with acute\n 0x93: 0xda, // capital letter U with acute\n 0x94: 0xdc, // capital letter U with diaresis\n 0x95: 0xfc, // lowercase letter U with diaeresis\n 0x96: 0x2018, // opening single quote\n 0x97: 0xa1, // inverted exclamation mark\n 0x98: 0x2a, // asterisk\n 0x99: 0x2019, // closing single quote\n 0x9a: 0x2501, // box drawings heavy horizontal\n 0x9b: 0xa9, // copyright sign\n 0x9c: 0x2120, // Service mark\n 0x9d: 0x2022, // (round) bullet\n 0x9e: 0x201c, // Left double quotation mark\n 0x9f: 0x201d, // Right double quotation mark\n 0xa0: 0xc0, // uppercase A, grave accent\n 0xa1: 0xc2, // uppercase A, circumflex\n 0xa2: 0xc7, // uppercase C with cedilla\n 0xa3: 0xc8, // uppercase E, grave accent\n 0xa4: 0xca, // uppercase E, circumflex\n 0xa5: 0xcb, // capital letter E with diaresis\n 0xa6: 0xeb, // lowercase letter e with diaresis\n 0xa7: 0xce, // uppercase I, circumflex\n 0xa8: 0xcf, // uppercase I, with diaresis\n 0xa9: 0xef, // lowercase i, with diaresis\n 0xaa: 0xd4, // uppercase O, circumflex\n 0xab: 0xd9, // uppercase U, grave accent\n 0xac: 0xf9, // lowercase u, grave accent\n 0xad: 0xdb, // uppercase U, circumflex\n 0xae: 0xab, // left-pointing double angle quotation mark\n 0xaf: 0xbb, // right-pointing double angle quotation mark\n // THIS BLOCK INCLUDES THE 32 EXTENDED (TWO-BYTE) LINE 21 CHARACTERS\n // THAT COME FROM HI BYTE=0x13 AND LOW BETWEEN 0x20 AND 0x3F\n 0xb0: 0xc3, // Uppercase A, tilde\n 0xb1: 0xe3, // Lowercase a, tilde\n 0xb2: 0xcd, // Uppercase I, acute accent\n 0xb3: 0xcc, // Uppercase I, grave accent\n 0xb4: 0xec, // Lowercase i, grave accent\n 0xb5: 0xd2, // Uppercase O, grave accent\n 0xb6: 0xf2, // Lowercase o, grave accent\n 0xb7: 0xd5, // Uppercase O, tilde\n 0xb8: 0xf5, // Lowercase o, tilde\n 0xb9: 0x7b, // Open curly brace\n 0xba: 0x7d, // Closing curly brace\n 0xbb: 0x5c, // Backslash\n 0xbc: 0x5e, // Caret\n 0xbd: 0x5f, // Underscore\n 0xbe: 0x7c, // Pipe (vertical line)\n 0xbf: 0x223c, // Tilde operator\n 0xc0: 0xc4, // Uppercase A, umlaut\n 0xc1: 0xe4, // Lowercase A, umlaut\n 0xc2: 0xd6, // Uppercase O, umlaut\n 0xc3: 0xf6, // Lowercase o, umlaut\n 0xc4: 0xdf, // Esszett (sharp S)\n 0xc5: 0xa5, // Yen symbol\n 0xc6: 0xa4, // Generic currency sign\n 0xc7: 0x2503, // Box drawings heavy vertical\n 0xc8: 0xc5, // Uppercase A, ring\n 0xc9: 0xe5, // Lowercase A, ring\n 0xca: 0xd8, // Uppercase O, stroke\n 0xcb: 0xf8, // Lowercase o, strok\n 0xcc: 0x250f, // Box drawings heavy down and right\n 0xcd: 0x2513, // Box drawings heavy down and left\n 0xce: 0x2517, // Box drawings heavy up and right\n 0xcf: 0x251b, // Box drawings heavy up and left\n};\n\n/**\n * Utils\n */\nconst getCharForByte = (byte: number) =>\n String.fromCharCode(specialCea608CharsCodes[byte] || byte);\n\nconst NR_ROWS = 15;\nconst NR_COLS = 100;\n// Tables to look up row from PAC data\nconst rowsLowCh1 = {\n 0x11: 1,\n 0x12: 3,\n 0x15: 5,\n 0x16: 7,\n 0x17: 9,\n 0x10: 11,\n 0x13: 12,\n 0x14: 14,\n};\nconst rowsHighCh1 = {\n 0x11: 2,\n 0x12: 4,\n 0x15: 6,\n 0x16: 8,\n 0x17: 10,\n 0x13: 13,\n 0x14: 15,\n};\nconst rowsLowCh2 = {\n 0x19: 1,\n 0x1a: 3,\n 0x1d: 5,\n 0x1e: 7,\n 0x1f: 9,\n 0x18: 11,\n 0x1b: 12,\n 0x1c: 14,\n};\nconst rowsHighCh2 = {\n 0x19: 2,\n 0x1a: 4,\n 0x1d: 6,\n 0x1e: 8,\n 0x1f: 10,\n 0x1b: 13,\n 0x1c: 15,\n};\n\nconst backgroundColors = [\n 'white',\n 'green',\n 'blue',\n 'cyan',\n 'red',\n 'yellow',\n 'magenta',\n 'black',\n 'transparent',\n];\n\nconst enum VerboseLevel {\n ERROR = 0,\n TEXT = 1,\n WARNING = 2,\n INFO = 2,\n DEBUG = 3,\n DATA = 3,\n}\n\nclass CaptionsLogger {\n public time: number | null = null;\n public verboseLevel: VerboseLevel = VerboseLevel.ERROR;\n\n log(severity: VerboseLevel, msg: string | (() => string)): void {\n if (this.verboseLevel >= severity) {\n const m: string = typeof msg === 'function' ? msg() : msg;\n logger.log(`${this.time} [${severity}] ${m}`);\n }\n }\n}\n\nconst numArrayToHexArray = function (numArray: number[]): string[] {\n const hexArray: string[] = [];\n for (let j = 0; j < numArray.length; j++) {\n hexArray.push(numArray[j].toString(16));\n }\n\n return hexArray;\n};\n\ntype PenStyles = {\n foreground: string | null;\n underline: boolean;\n italics: boolean;\n background: string;\n flash: boolean;\n};\n\nclass PenState {\n public foreground: string = 'white';\n public underline: boolean = false;\n public italics: boolean = false;\n public background: string = 'black';\n public flash: boolean = false;\n\n reset() {\n this.foreground = 'white';\n this.underline = false;\n this.italics = false;\n this.background = 'black';\n this.flash = false;\n }\n\n setStyles(styles: Partial) {\n const attribs = [\n 'foreground',\n 'underline',\n 'italics',\n 'background',\n 'flash',\n ];\n for (let i = 0; i < attribs.length; i++) {\n const style = attribs[i];\n if (styles.hasOwnProperty(style)) {\n this[style] = styles[style];\n }\n }\n }\n\n isDefault() {\n return (\n this.foreground === 'white' &&\n !this.underline &&\n !this.italics &&\n this.background === 'black' &&\n !this.flash\n );\n }\n\n equals(other: PenState) {\n return (\n this.foreground === other.foreground &&\n this.underline === other.underline &&\n this.italics === other.italics &&\n this.background === other.background &&\n this.flash === other.flash\n );\n }\n\n copy(newPenState: PenState) {\n this.foreground = newPenState.foreground;\n this.underline = newPenState.underline;\n this.italics = newPenState.italics;\n this.background = newPenState.background;\n this.flash = newPenState.flash;\n }\n\n toString(): string {\n return (\n 'color=' +\n this.foreground +\n ', underline=' +\n this.underline +\n ', italics=' +\n this.italics +\n ', background=' +\n this.background +\n ', flash=' +\n this.flash\n );\n }\n}\n\n/**\n * Unicode character with styling and background.\n * @constructor\n */\nclass StyledUnicodeChar {\n uchar: string = ' ';\n penState: PenState = new PenState();\n\n reset() {\n this.uchar = ' ';\n this.penState.reset();\n }\n\n setChar(uchar: string, newPenState: PenState) {\n this.uchar = uchar;\n this.penState.copy(newPenState);\n }\n\n setPenState(newPenState: PenState) {\n this.penState.copy(newPenState);\n }\n\n equals(other: StyledUnicodeChar) {\n return this.uchar === other.uchar && this.penState.equals(other.penState);\n }\n\n copy(newChar: StyledUnicodeChar) {\n this.uchar = newChar.uchar;\n this.penState.copy(newChar.penState);\n }\n\n isEmpty(): boolean {\n return this.uchar === ' ' && this.penState.isDefault();\n }\n}\n\n/**\n * CEA-608 row consisting of NR_COLS instances of StyledUnicodeChar.\n * @constructor\n */\nexport class Row {\n public chars: StyledUnicodeChar[] = [];\n public pos: number = 0;\n public currPenState: PenState = new PenState();\n public cueStartTime: number | null = null;\n private logger: CaptionsLogger;\n\n constructor(logger: CaptionsLogger) {\n for (let i = 0; i < NR_COLS; i++) {\n this.chars.push(new StyledUnicodeChar());\n }\n this.logger = logger;\n }\n\n equals(other: Row) {\n for (let i = 0; i < NR_COLS; i++) {\n if (!this.chars[i].equals(other.chars[i])) {\n return false;\n }\n }\n return true;\n }\n\n copy(other: Row) {\n for (let i = 0; i < NR_COLS; i++) {\n this.chars[i].copy(other.chars[i]);\n }\n }\n\n isEmpty(): boolean {\n let empty = true;\n for (let i = 0; i < NR_COLS; i++) {\n if (!this.chars[i].isEmpty()) {\n empty = false;\n break;\n }\n }\n return empty;\n }\n\n /**\n * Set the cursor to a valid column.\n */\n setCursor(absPos: number) {\n if (this.pos !== absPos) {\n this.pos = absPos;\n }\n\n if (this.pos < 0) {\n this.logger.log(\n VerboseLevel.DEBUG,\n 'Negative cursor position ' + this.pos,\n );\n this.pos = 0;\n } else if (this.pos > NR_COLS) {\n this.logger.log(\n VerboseLevel.DEBUG,\n 'Too large cursor position ' + this.pos,\n );\n this.pos = NR_COLS;\n }\n }\n\n /**\n * Move the cursor relative to current position.\n */\n moveCursor(relPos: number) {\n const newPos = this.pos + relPos;\n if (relPos > 1) {\n for (let i = this.pos + 1; i < newPos + 1; i++) {\n this.chars[i].setPenState(this.currPenState);\n }\n }\n this.setCursor(newPos);\n }\n\n /**\n * Backspace, move one step back and clear character.\n */\n backSpace() {\n this.moveCursor(-1);\n this.chars[this.pos].setChar(' ', this.currPenState);\n }\n\n insertChar(byte: number) {\n if (byte >= 0x90) {\n // Extended char\n this.backSpace();\n }\n const char = getCharForByte(byte);\n if (this.pos >= NR_COLS) {\n this.logger.log(\n VerboseLevel.ERROR,\n () =>\n 'Cannot insert ' +\n byte.toString(16) +\n ' (' +\n char +\n ') at position ' +\n this.pos +\n '. Skipping it!',\n );\n return;\n }\n this.chars[this.pos].setChar(char, this.currPenState);\n this.moveCursor(1);\n }\n\n clearFromPos(startPos: number) {\n let i: number;\n for (i = startPos; i < NR_COLS; i++) {\n this.chars[i].reset();\n }\n }\n\n clear() {\n this.clearFromPos(0);\n this.pos = 0;\n this.currPenState.reset();\n }\n\n clearToEndOfRow() {\n this.clearFromPos(this.pos);\n }\n\n getTextString() {\n const chars: string[] = [];\n let empty = true;\n for (let i = 0; i < NR_COLS; i++) {\n const char = this.chars[i].uchar;\n if (char !== ' ') {\n empty = false;\n }\n\n chars.push(char);\n }\n if (empty) {\n return '';\n } else {\n return chars.join('');\n }\n }\n\n setPenStyles(styles: Partial) {\n this.currPenState.setStyles(styles);\n const currChar = this.chars[this.pos];\n currChar.setPenState(this.currPenState);\n }\n}\n\n/**\n * Keep a CEA-608 screen of 32x15 styled characters\n * @constructor\n */\nexport class CaptionScreen {\n rows: Row[] = [];\n currRow: number = NR_ROWS - 1;\n nrRollUpRows: number | null = null;\n lastOutputScreen: CaptionScreen | null = null;\n logger: CaptionsLogger;\n\n constructor(logger: CaptionsLogger) {\n for (let i = 0; i < NR_ROWS; i++) {\n this.rows.push(new Row(logger));\n }\n this.logger = logger;\n }\n\n reset() {\n for (let i = 0; i < NR_ROWS; i++) {\n this.rows[i].clear();\n }\n this.currRow = NR_ROWS - 1;\n }\n\n equals(other: CaptionScreen): boolean {\n let equal = true;\n for (let i = 0; i < NR_ROWS; i++) {\n if (!this.rows[i].equals(other.rows[i])) {\n equal = false;\n break;\n }\n }\n return equal;\n }\n\n copy(other: CaptionScreen) {\n for (let i = 0; i < NR_ROWS; i++) {\n this.rows[i].copy(other.rows[i]);\n }\n }\n\n isEmpty(): boolean {\n let empty = true;\n for (let i = 0; i < NR_ROWS; i++) {\n if (!this.rows[i].isEmpty()) {\n empty = false;\n break;\n }\n }\n return empty;\n }\n\n backSpace() {\n const row = this.rows[this.currRow];\n row.backSpace();\n }\n\n clearToEndOfRow() {\n const row = this.rows[this.currRow];\n row.clearToEndOfRow();\n }\n\n /**\n * Insert a character (without styling) in the current row.\n */\n insertChar(char: number) {\n const row = this.rows[this.currRow];\n row.insertChar(char);\n }\n\n setPen(styles: Partial) {\n const row = this.rows[this.currRow];\n row.setPenStyles(styles);\n }\n\n moveCursor(relPos: number) {\n const row = this.rows[this.currRow];\n row.moveCursor(relPos);\n }\n\n setCursor(absPos: number) {\n this.logger.log(VerboseLevel.INFO, 'setCursor: ' + absPos);\n const row = this.rows[this.currRow];\n row.setCursor(absPos);\n }\n\n setPAC(pacData: PACData) {\n this.logger.log(\n VerboseLevel.INFO,\n () => 'pacData = ' + JSON.stringify(pacData),\n );\n let newRow = pacData.row - 1;\n if (this.nrRollUpRows && newRow < this.nrRollUpRows - 1) {\n newRow = this.nrRollUpRows - 1;\n }\n\n // Make sure this only affects Roll-up Captions by checking this.nrRollUpRows\n if (this.nrRollUpRows && this.currRow !== newRow) {\n // clear all rows first\n for (let i = 0; i < NR_ROWS; i++) {\n this.rows[i].clear();\n }\n\n // Copy this.nrRollUpRows rows from lastOutputScreen and place it in the newRow location\n // topRowIndex - the start of rows to copy (inclusive index)\n const topRowIndex = this.currRow + 1 - this.nrRollUpRows;\n // We only copy if the last position was already shown.\n // We use the cueStartTime value to check this.\n const lastOutputScreen = this.lastOutputScreen;\n if (lastOutputScreen) {\n const prevLineTime = lastOutputScreen.rows[topRowIndex].cueStartTime;\n const time = this.logger.time;\n if (prevLineTime !== null && time !== null && prevLineTime < time) {\n for (let i = 0; i < this.nrRollUpRows; i++) {\n this.rows[newRow - this.nrRollUpRows + i + 1].copy(\n lastOutputScreen.rows[topRowIndex + i],\n );\n }\n }\n }\n }\n\n this.currRow = newRow;\n const row = this.rows[this.currRow];\n if (pacData.indent !== null) {\n const indent = pacData.indent;\n const prevPos = Math.max(indent - 1, 0);\n row.setCursor(pacData.indent);\n pacData.color = row.chars[prevPos].penState.foreground;\n }\n const styles: PenStyles = {\n foreground: pacData.color,\n underline: pacData.underline,\n italics: pacData.italics,\n background: 'black',\n flash: false,\n };\n this.setPen(styles);\n }\n\n /**\n * Set background/extra foreground, but first do back_space, and then insert space (backwards compatibility).\n */\n setBkgData(bkgData: Partial) {\n this.logger.log(\n VerboseLevel.INFO,\n () => 'bkgData = ' + JSON.stringify(bkgData),\n );\n this.backSpace();\n this.setPen(bkgData);\n this.insertChar(0x20); // Space\n }\n\n setRollUpRows(nrRows: number | null) {\n this.nrRollUpRows = nrRows;\n }\n\n rollUp() {\n if (this.nrRollUpRows === null) {\n this.logger.log(\n VerboseLevel.DEBUG,\n 'roll_up but nrRollUpRows not set yet',\n );\n return; // Not properly setup\n }\n this.logger.log(VerboseLevel.TEXT, () => this.getDisplayText());\n const topRowIndex = this.currRow + 1 - this.nrRollUpRows;\n const topRow = this.rows.splice(topRowIndex, 1)[0];\n topRow.clear();\n this.rows.splice(this.currRow, 0, topRow);\n this.logger.log(VerboseLevel.INFO, 'Rolling up');\n // this.logger.log(VerboseLevel.TEXT, this.get_display_text())\n }\n\n /**\n * Get all non-empty rows with as unicode text.\n */\n getDisplayText(asOneRow?: boolean) {\n asOneRow = asOneRow || false;\n const displayText: string[] = [];\n let text = '';\n let rowNr = -1;\n for (let i = 0; i < NR_ROWS; i++) {\n const rowText = this.rows[i].getTextString();\n if (rowText) {\n rowNr = i + 1;\n if (asOneRow) {\n displayText.push('Row ' + rowNr + \": '\" + rowText + \"'\");\n } else {\n displayText.push(rowText.trim());\n }\n }\n }\n if (displayText.length > 0) {\n if (asOneRow) {\n text = '[' + displayText.join(' | ') + ']';\n } else {\n text = displayText.join('\\n');\n }\n }\n return text;\n }\n\n getTextAndFormat() {\n return this.rows;\n }\n}\n\n// var modes = ['MODE_ROLL-UP', 'MODE_POP-ON', 'MODE_PAINT-ON', 'MODE_TEXT'];\n\ntype CaptionModes =\n | 'MODE_ROLL-UP'\n | 'MODE_POP-ON'\n | 'MODE_PAINT-ON'\n | 'MODE_TEXT'\n | null;\n\nclass Cea608Channel {\n chNr: number;\n outputFilter: OutputFilter;\n mode: CaptionModes;\n verbose: number;\n displayedMemory: CaptionScreen;\n nonDisplayedMemory: CaptionScreen;\n lastOutputScreen: CaptionScreen;\n currRollUpRow: Row;\n writeScreen: CaptionScreen;\n cueStartTime: number | null;\n logger: CaptionsLogger;\n\n constructor(\n channelNumber: number,\n outputFilter: OutputFilter,\n logger: CaptionsLogger,\n ) {\n this.chNr = channelNumber;\n this.outputFilter = outputFilter;\n this.mode = null;\n this.verbose = 0;\n this.displayedMemory = new CaptionScreen(logger);\n this.nonDisplayedMemory = new CaptionScreen(logger);\n this.lastOutputScreen = new CaptionScreen(logger);\n this.currRollUpRow = this.displayedMemory.rows[NR_ROWS - 1];\n this.writeScreen = this.displayedMemory;\n this.mode = null;\n this.cueStartTime = null; // Keeps track of where a cue started.\n this.logger = logger;\n }\n\n reset() {\n this.mode = null;\n this.displayedMemory.reset();\n this.nonDisplayedMemory.reset();\n this.lastOutputScreen.reset();\n this.outputFilter.reset();\n this.currRollUpRow = this.displayedMemory.rows[NR_ROWS - 1];\n this.writeScreen = this.displayedMemory;\n this.mode = null;\n this.cueStartTime = null;\n }\n\n getHandler(): OutputFilter {\n return this.outputFilter;\n }\n\n setHandler(newHandler: OutputFilter) {\n this.outputFilter = newHandler;\n }\n\n setPAC(pacData: PACData) {\n this.writeScreen.setPAC(pacData);\n }\n\n setBkgData(bkgData: Partial) {\n this.writeScreen.setBkgData(bkgData);\n }\n\n setMode(newMode: CaptionModes) {\n if (newMode === this.mode) {\n return;\n }\n\n this.mode = newMode;\n this.logger.log(VerboseLevel.INFO, () => 'MODE=' + newMode);\n if (this.mode === 'MODE_POP-ON') {\n this.writeScreen = this.nonDisplayedMemory;\n } else {\n this.writeScreen = this.displayedMemory;\n this.writeScreen.reset();\n }\n if (this.mode !== 'MODE_ROLL-UP') {\n this.displayedMemory.nrRollUpRows = null;\n this.nonDisplayedMemory.nrRollUpRows = null;\n }\n this.mode = newMode;\n }\n\n insertChars(chars: number[]) {\n for (let i = 0; i < chars.length; i++) {\n this.writeScreen.insertChar(chars[i]);\n }\n\n const screen =\n this.writeScreen === this.displayedMemory ? 'DISP' : 'NON_DISP';\n this.logger.log(\n VerboseLevel.INFO,\n () => screen + ': ' + this.writeScreen.getDisplayText(true),\n );\n if (this.mode === 'MODE_PAINT-ON' || this.mode === 'MODE_ROLL-UP') {\n this.logger.log(\n VerboseLevel.TEXT,\n () => 'DISPLAYED: ' + this.displayedMemory.getDisplayText(true),\n );\n this.outputDataUpdate();\n }\n }\n\n ccRCL() {\n // Resume Caption Loading (switch mode to Pop On)\n this.logger.log(VerboseLevel.INFO, 'RCL - Resume Caption Loading');\n this.setMode('MODE_POP-ON');\n }\n\n ccBS() {\n // BackSpace\n this.logger.log(VerboseLevel.INFO, 'BS - BackSpace');\n if (this.mode === 'MODE_TEXT') {\n return;\n }\n\n this.writeScreen.backSpace();\n if (this.writeScreen === this.displayedMemory) {\n this.outputDataUpdate();\n }\n }\n\n ccAOF() {\n // Reserved (formerly Alarm Off)\n }\n\n ccAON() {\n // Reserved (formerly Alarm On)\n }\n\n ccDER() {\n // Delete to End of Row\n this.logger.log(VerboseLevel.INFO, 'DER- Delete to End of Row');\n this.writeScreen.clearToEndOfRow();\n this.outputDataUpdate();\n }\n\n ccRU(nrRows: number | null) {\n // Roll-Up Captions-2,3,or 4 Rows\n this.logger.log(VerboseLevel.INFO, 'RU(' + nrRows + ') - Roll Up');\n this.writeScreen = this.displayedMemory;\n this.setMode('MODE_ROLL-UP');\n this.writeScreen.setRollUpRows(nrRows);\n }\n\n ccFON() {\n // Flash On\n this.logger.log(VerboseLevel.INFO, 'FON - Flash On');\n this.writeScreen.setPen({ flash: true });\n }\n\n ccRDC() {\n // Resume Direct Captioning (switch mode to PaintOn)\n this.logger.log(VerboseLevel.INFO, 'RDC - Resume Direct Captioning');\n this.setMode('MODE_PAINT-ON');\n }\n\n ccTR() {\n // Text Restart in text mode (not supported, however)\n this.logger.log(VerboseLevel.INFO, 'TR');\n this.setMode('MODE_TEXT');\n }\n\n ccRTD() {\n // Resume Text Display in Text mode (not supported, however)\n this.logger.log(VerboseLevel.INFO, 'RTD');\n this.setMode('MODE_TEXT');\n }\n\n ccEDM() {\n // Erase Displayed Memory\n this.logger.log(VerboseLevel.INFO, 'EDM - Erase Displayed Memory');\n this.displayedMemory.reset();\n this.outputDataUpdate(true);\n }\n\n ccCR() {\n // Carriage Return\n this.logger.log(VerboseLevel.INFO, 'CR - Carriage Return');\n this.writeScreen.rollUp();\n this.outputDataUpdate(true);\n }\n\n ccENM() {\n // Erase Non-Displayed Memory\n this.logger.log(VerboseLevel.INFO, 'ENM - Erase Non-displayed Memory');\n this.nonDisplayedMemory.reset();\n }\n\n ccEOC() {\n // End of Caption (Flip Memories)\n this.logger.log(VerboseLevel.INFO, 'EOC - End Of Caption');\n if (this.mode === 'MODE_POP-ON') {\n const tmp = this.displayedMemory;\n this.displayedMemory = this.nonDisplayedMemory;\n this.nonDisplayedMemory = tmp;\n this.writeScreen = this.nonDisplayedMemory;\n this.logger.log(\n VerboseLevel.TEXT,\n () => 'DISP: ' + this.displayedMemory.getDisplayText(),\n );\n }\n this.outputDataUpdate(true);\n }\n\n ccTO(nrCols: number) {\n // Tab Offset 1,2, or 3 columns\n this.logger.log(VerboseLevel.INFO, 'TO(' + nrCols + ') - Tab Offset');\n this.writeScreen.moveCursor(nrCols);\n }\n\n ccMIDROW(secondByte: number) {\n // Parse MIDROW command\n const styles: Partial = { flash: false };\n styles.underline = secondByte % 2 === 1;\n styles.italics = secondByte >= 0x2e;\n if (!styles.italics) {\n const colorIndex = Math.floor(secondByte / 2) - 0x10;\n const colors = [\n 'white',\n 'green',\n 'blue',\n 'cyan',\n 'red',\n 'yellow',\n 'magenta',\n ];\n styles.foreground = colors[colorIndex];\n } else {\n styles.foreground = 'white';\n }\n this.logger.log(VerboseLevel.INFO, 'MIDROW: ' + JSON.stringify(styles));\n this.writeScreen.setPen(styles);\n }\n\n outputDataUpdate(dispatch: boolean = false) {\n const time = this.logger.time;\n if (time === null) {\n return;\n }\n\n if (this.outputFilter) {\n if (this.cueStartTime === null && !this.displayedMemory.isEmpty()) {\n // Start of a new cue\n this.cueStartTime = time;\n } else {\n if (!this.displayedMemory.equals(this.lastOutputScreen)) {\n this.outputFilter.newCue(\n this.cueStartTime!,\n time,\n this.lastOutputScreen,\n );\n if (dispatch && this.outputFilter.dispatchCue) {\n this.outputFilter.dispatchCue();\n }\n\n this.cueStartTime = this.displayedMemory.isEmpty() ? null : time;\n }\n }\n this.lastOutputScreen.copy(this.displayedMemory);\n }\n }\n\n cueSplitAtTime(t: number) {\n if (this.outputFilter) {\n if (!this.displayedMemory.isEmpty()) {\n if (this.outputFilter.newCue) {\n this.outputFilter.newCue(this.cueStartTime!, t, this.displayedMemory);\n }\n\n this.cueStartTime = t;\n }\n }\n }\n}\n\ninterface PACData {\n row: number;\n indent: number | null;\n color: string | null;\n underline: boolean;\n italics: boolean;\n}\n\ntype SupportedField = 1 | 3;\n\ntype Channels = 0 | 1 | 2; // Will be 1 or 2 when parsing captions\n\ntype CmdHistory = {\n a: number | null;\n b: number | null;\n};\n\nclass Cea608Parser {\n channels: Array;\n currentChannel: Channels = 0;\n cmdHistory: CmdHistory = createCmdHistory();\n logger: CaptionsLogger;\n\n constructor(field: SupportedField, out1: OutputFilter, out2: OutputFilter) {\n const logger = (this.logger = new CaptionsLogger());\n this.channels = [\n null,\n new Cea608Channel(field, out1, logger),\n new Cea608Channel(field + 1, out2, logger),\n ];\n }\n\n getHandler(channel: number) {\n return (this.channels[channel] as Cea608Channel).getHandler();\n }\n\n setHandler(channel: number, newHandler: OutputFilter) {\n (this.channels[channel] as Cea608Channel).setHandler(newHandler);\n }\n\n /**\n * Add data for time t in forms of list of bytes (unsigned ints). The bytes are treated as pairs.\n */\n addData(time: number | null, byteList: number[]) {\n this.logger.time = time;\n for (let i = 0; i < byteList.length; i += 2) {\n const a = byteList[i] & 0x7f;\n const b = byteList[i + 1] & 0x7f;\n let cmdFound: boolean = false;\n let charsFound: number[] | null = null;\n\n if (a === 0 && b === 0) {\n continue;\n } else {\n this.logger.log(\n VerboseLevel.DATA,\n () =>\n '[' +\n numArrayToHexArray([byteList[i], byteList[i + 1]]) +\n '] -> (' +\n numArrayToHexArray([a, b]) +\n ')',\n );\n }\n\n const cmdHistory = this.cmdHistory;\n const isControlCode = a >= 0x10 && a <= 0x1f;\n if (isControlCode) {\n // Skip redundant control codes\n if (hasCmdRepeated(a, b, cmdHistory)) {\n setLastCmd(null, null, cmdHistory);\n this.logger.log(\n VerboseLevel.DEBUG,\n () =>\n 'Repeated command (' +\n numArrayToHexArray([a, b]) +\n ') is dropped',\n );\n continue;\n }\n setLastCmd(a, b, this.cmdHistory);\n\n cmdFound = this.parseCmd(a, b);\n\n if (!cmdFound) {\n cmdFound = this.parseMidrow(a, b);\n }\n\n if (!cmdFound) {\n cmdFound = this.parsePAC(a, b);\n }\n\n if (!cmdFound) {\n cmdFound = this.parseBackgroundAttributes(a, b);\n }\n } else {\n setLastCmd(null, null, cmdHistory);\n }\n if (!cmdFound) {\n charsFound = this.parseChars(a, b);\n if (charsFound) {\n const currChNr = this.currentChannel;\n if (currChNr && currChNr > 0) {\n const channel = this.channels[currChNr] as Cea608Channel;\n channel.insertChars(charsFound);\n } else {\n this.logger.log(\n VerboseLevel.WARNING,\n 'No channel found yet. TEXT-MODE?',\n );\n }\n }\n }\n if (!cmdFound && !charsFound) {\n this.logger.log(\n VerboseLevel.WARNING,\n () =>\n \"Couldn't parse cleaned data \" +\n numArrayToHexArray([a, b]) +\n ' orig: ' +\n numArrayToHexArray([byteList[i], byteList[i + 1]]),\n );\n }\n }\n }\n\n /**\n * Parse Command.\n * @returns True if a command was found\n */\n parseCmd(a: number, b: number): boolean {\n const cond1 =\n (a === 0x14 || a === 0x1c || a === 0x15 || a === 0x1d) &&\n b >= 0x20 &&\n b <= 0x2f;\n const cond2 = (a === 0x17 || a === 0x1f) && b >= 0x21 && b <= 0x23;\n if (!(cond1 || cond2)) {\n return false;\n }\n\n const chNr = a === 0x14 || a === 0x15 || a === 0x17 ? 1 : 2;\n const channel = this.channels[chNr] as Cea608Channel;\n\n if (a === 0x14 || a === 0x15 || a === 0x1c || a === 0x1d) {\n if (b === 0x20) {\n channel.ccRCL();\n } else if (b === 0x21) {\n channel.ccBS();\n } else if (b === 0x22) {\n channel.ccAOF();\n } else if (b === 0x23) {\n channel.ccAON();\n } else if (b === 0x24) {\n channel.ccDER();\n } else if (b === 0x25) {\n channel.ccRU(2);\n } else if (b === 0x26) {\n channel.ccRU(3);\n } else if (b === 0x27) {\n channel.ccRU(4);\n } else if (b === 0x28) {\n channel.ccFON();\n } else if (b === 0x29) {\n channel.ccRDC();\n } else if (b === 0x2a) {\n channel.ccTR();\n } else if (b === 0x2b) {\n channel.ccRTD();\n } else if (b === 0x2c) {\n channel.ccEDM();\n } else if (b === 0x2d) {\n channel.ccCR();\n } else if (b === 0x2e) {\n channel.ccENM();\n } else if (b === 0x2f) {\n channel.ccEOC();\n }\n } else {\n // a == 0x17 || a == 0x1F\n channel.ccTO(b - 0x20);\n }\n this.currentChannel = chNr;\n return true;\n }\n\n /**\n * Parse midrow styling command\n */\n parseMidrow(a: number, b: number): boolean {\n let chNr: number = 0;\n\n if ((a === 0x11 || a === 0x19) && b >= 0x20 && b <= 0x2f) {\n if (a === 0x11) {\n chNr = 1;\n } else {\n chNr = 2;\n }\n\n if (chNr !== this.currentChannel) {\n this.logger.log(\n VerboseLevel.ERROR,\n 'Mismatch channel in midrow parsing',\n );\n return false;\n }\n const channel = this.channels[chNr];\n if (!channel) {\n return false;\n }\n channel.ccMIDROW(b);\n this.logger.log(\n VerboseLevel.DEBUG,\n () => 'MIDROW (' + numArrayToHexArray([a, b]) + ')',\n );\n return true;\n }\n return false;\n }\n\n /**\n * Parse Preable Access Codes (Table 53).\n * @returns {Boolean} Tells if PAC found\n */\n parsePAC(a: number, b: number): boolean {\n let row: number;\n\n const case1 =\n ((a >= 0x11 && a <= 0x17) || (a >= 0x19 && a <= 0x1f)) &&\n b >= 0x40 &&\n b <= 0x7f;\n const case2 = (a === 0x10 || a === 0x18) && b >= 0x40 && b <= 0x5f;\n if (!(case1 || case2)) {\n return false;\n }\n\n const chNr: Channels = a <= 0x17 ? 1 : 2;\n\n if (b >= 0x40 && b <= 0x5f) {\n row = chNr === 1 ? rowsLowCh1[a] : rowsLowCh2[a];\n } else {\n // 0x60 <= b <= 0x7F\n row = chNr === 1 ? rowsHighCh1[a] : rowsHighCh2[a];\n }\n const channel = this.channels[chNr];\n if (!channel) {\n return false;\n }\n channel.setPAC(this.interpretPAC(row, b));\n this.currentChannel = chNr;\n return true;\n }\n\n /**\n * Interpret the second byte of the pac, and return the information.\n * @returns pacData with style parameters\n */\n interpretPAC(row: number, byte: number): PACData {\n let pacIndex;\n const pacData: PACData = {\n color: null,\n italics: false,\n indent: null,\n underline: false,\n row: row,\n };\n\n if (byte > 0x5f) {\n pacIndex = byte - 0x60;\n } else {\n pacIndex = byte - 0x40;\n }\n\n pacData.underline = (pacIndex & 1) === 1;\n if (pacIndex <= 0xd) {\n pacData.color = [\n 'white',\n 'green',\n 'blue',\n 'cyan',\n 'red',\n 'yellow',\n 'magenta',\n 'white',\n ][Math.floor(pacIndex / 2)];\n } else if (pacIndex <= 0xf) {\n pacData.italics = true;\n pacData.color = 'white';\n } else {\n pacData.indent = Math.floor((pacIndex - 0x10) / 2) * 4;\n }\n return pacData; // Note that row has zero offset. The spec uses 1.\n }\n\n /**\n * Parse characters.\n * @returns An array with 1 to 2 codes corresponding to chars, if found. null otherwise.\n */\n parseChars(a: number, b: number): number[] | null {\n let channelNr: Channels;\n let charCodes: number[] | null = null;\n let charCode1: number | null = null;\n\n if (a >= 0x19) {\n channelNr = 2;\n charCode1 = a - 8;\n } else {\n channelNr = 1;\n charCode1 = a;\n }\n if (charCode1 >= 0x11 && charCode1 <= 0x13) {\n // Special character\n let oneCode;\n if (charCode1 === 0x11) {\n oneCode = b + 0x50;\n } else if (charCode1 === 0x12) {\n oneCode = b + 0x70;\n } else {\n oneCode = b + 0x90;\n }\n\n this.logger.log(\n VerboseLevel.INFO,\n () =>\n \"Special char '\" +\n getCharForByte(oneCode) +\n \"' in channel \" +\n channelNr,\n );\n charCodes = [oneCode];\n } else if (a >= 0x20 && a <= 0x7f) {\n charCodes = b === 0 ? [a] : [a, b];\n }\n if (charCodes) {\n this.logger.log(\n VerboseLevel.DEBUG,\n () =>\n 'Char codes = ' +\n numArrayToHexArray(charCodes as number[]).join(','),\n );\n }\n return charCodes;\n }\n\n /**\n * Parse extended background attributes as well as new foreground color black.\n * @returns True if background attributes are found\n */\n parseBackgroundAttributes(a: number, b: number): boolean {\n const case1 = (a === 0x10 || a === 0x18) && b >= 0x20 && b <= 0x2f;\n const case2 = (a === 0x17 || a === 0x1f) && b >= 0x2d && b <= 0x2f;\n if (!(case1 || case2)) {\n return false;\n }\n let index: number;\n const bkgData: Partial = {};\n if (a === 0x10 || a === 0x18) {\n index = Math.floor((b - 0x20) / 2);\n bkgData.background = backgroundColors[index];\n if (b % 2 === 1) {\n bkgData.background = bkgData.background + '_semi';\n }\n } else if (b === 0x2d) {\n bkgData.background = 'transparent';\n } else {\n bkgData.foreground = 'black';\n if (b === 0x2f) {\n bkgData.underline = true;\n }\n }\n const chNr: Channels = a <= 0x17 ? 1 : 2;\n const channel: Cea608Channel = this.channels[chNr] as Cea608Channel;\n channel.setBkgData(bkgData);\n return true;\n }\n\n /**\n * Reset state of parser and its channels.\n */\n reset() {\n for (let i = 0; i < Object.keys(this.channels).length; i++) {\n const channel = this.channels[i];\n if (channel) {\n channel.reset();\n }\n }\n setLastCmd(null, null, this.cmdHistory);\n }\n\n /**\n * Trigger the generation of a cue, and the start of a new one if displayScreens are not empty.\n */\n cueSplitAtTime(t: number) {\n for (let i = 0; i < this.channels.length; i++) {\n const channel = this.channels[i];\n if (channel) {\n channel.cueSplitAtTime(t);\n }\n }\n }\n}\n\nfunction setLastCmd(\n a: number | null,\n b: number | null,\n cmdHistory: CmdHistory,\n) {\n cmdHistory.a = a;\n cmdHistory.b = b;\n}\n\nfunction hasCmdRepeated(a: number, b: number, cmdHistory: CmdHistory) {\n return cmdHistory.a === a && cmdHistory.b === b;\n}\n\nfunction createCmdHistory(): CmdHistory {\n return {\n a: null,\n b: null,\n };\n}\n\nexport default Cea608Parser;\n", "import type { TimelineController } from '../controller/timeline-controller';\nimport type { CaptionScreen } from './cea-608-parser';\n\nexport default class OutputFilter {\n private timelineController: TimelineController;\n private cueRanges: Array<[number, number]> = [];\n private trackName: string;\n private startTime: number | null = null;\n private endTime: number | null = null;\n private screen: CaptionScreen | null = null;\n\n constructor(timelineController: TimelineController, trackName: string) {\n this.timelineController = timelineController;\n this.trackName = trackName;\n }\n\n dispatchCue() {\n if (this.startTime === null) {\n return;\n }\n\n this.timelineController.addCues(\n this.trackName,\n this.startTime,\n this.endTime as number,\n this.screen as CaptionScreen,\n this.cueRanges,\n );\n this.startTime = null;\n }\n\n newCue(startTime: number, endTime: number, screen: CaptionScreen) {\n if (this.startTime === null || this.startTime > startTime) {\n this.startTime = startTime;\n }\n\n this.endTime = endTime;\n this.screen = screen;\n this.timelineController.createCaptionsTrack(this.trackName);\n }\n\n reset() {\n this.cueRanges = [];\n this.startTime = null;\n }\n}\n", "/**\n * Copyright 2013 vtt.js Contributors\n *\n * Licensed under the Apache License, Version 2.0 (the 'License');\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an 'AS IS' BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { optionalSelf } from './global';\n\ndeclare interface VTTCuePolyfill extends VTTCue {\n new (...args): VTTCuePolyfill;\n hasBeenReset: boolean;\n displayState: void;\n}\n\nexport default (function () {\n if (optionalSelf?.VTTCue) {\n return self.VTTCue;\n }\n\n const AllowedDirections = ['', 'lr', 'rl'] as const;\n type Direction = (typeof AllowedDirections)[number];\n\n const AllowedAlignments = [\n 'start',\n 'middle',\n 'end',\n 'left',\n 'right',\n ] as const;\n type Alignment = (typeof AllowedAlignments)[number];\n\n function isAllowedValue(allowed: T, value: string): A | false {\n if (typeof value !== 'string') {\n return false;\n }\n // necessary for assuring the generic conforms to the Array interface\n if (!Array.isArray(allowed)) {\n return false;\n }\n // reset the type so that the next narrowing works well\n const lcValue = value.toLowerCase() as any;\n // use the allow list to narrow the type to a specific subset of strings\n if (~allowed.indexOf(lcValue)) {\n return lcValue;\n }\n\n return false;\n }\n\n function findDirectionSetting(value: string) {\n return isAllowedValue(\n AllowedDirections,\n value,\n );\n }\n\n function findAlignSetting(value: string) {\n return isAllowedValue(\n AllowedAlignments,\n value,\n );\n }\n\n function extend(obj: Record, ...rest: Record[]) {\n let i = 1;\n for (; i < arguments.length; i++) {\n const cobj = arguments[i];\n for (const p in cobj) {\n obj[p] = cobj[p];\n }\n }\n\n return obj;\n }\n\n function VTTCue(startTime: number, endTime: number, text: string) {\n const cue = this as VTTCuePolyfill;\n const baseObj = { enumerable: true };\n /**\n * Shim implementation specific properties. These properties are not in\n * the spec.\n */\n\n // Lets us know when the VTTCue's data has changed in such a way that we need\n // to recompute its display state. This lets us compute its display state\n // lazily.\n cue.hasBeenReset = false;\n\n /**\n * VTTCue and TextTrackCue properties\n * http://dev.w3.org/html5/webvtt/#vttcue-interface\n */\n\n let _id = '';\n let _pauseOnExit = false;\n let _startTime = startTime;\n let _endTime = endTime;\n let _text = text;\n let _region = null;\n let _vertical: Direction = '';\n let _snapToLines = true;\n let _line: number | 'auto' = 'auto';\n let _lineAlign: Alignment = 'start';\n let _position = 50;\n let _positionAlign: Alignment = 'middle';\n let _size = 50;\n let _align: Alignment = 'middle';\n\n Object.defineProperty(\n cue,\n 'id',\n extend({}, baseObj, {\n get: function () {\n return _id;\n },\n set: function (value: string) {\n _id = '' + value;\n },\n }),\n );\n\n Object.defineProperty(\n cue,\n 'pauseOnExit',\n extend({}, baseObj, {\n get: function () {\n return _pauseOnExit;\n },\n set: function (value: boolean) {\n _pauseOnExit = !!value;\n },\n }),\n );\n\n Object.defineProperty(\n cue,\n 'startTime',\n extend({}, baseObj, {\n get: function () {\n return _startTime;\n },\n set: function (value: number) {\n if (typeof value !== 'number') {\n throw new TypeError('Start time must be set to a number.');\n }\n\n _startTime = value;\n this.hasBeenReset = true;\n },\n }),\n );\n\n Object.defineProperty(\n cue,\n 'endTime',\n extend({}, baseObj, {\n get: function () {\n return _endTime;\n },\n set: function (value: number) {\n if (typeof value !== 'number') {\n throw new TypeError('End time must be set to a number.');\n }\n\n _endTime = value;\n this.hasBeenReset = true;\n },\n }),\n );\n\n Object.defineProperty(\n cue,\n 'text',\n extend({}, baseObj, {\n get: function () {\n return _text;\n },\n set: function (value: string) {\n _text = '' + value;\n this.hasBeenReset = true;\n },\n }),\n );\n\n // todo: implement VTTRegion polyfill?\n Object.defineProperty(\n cue,\n 'region',\n extend({}, baseObj, {\n get: function () {\n return _region;\n },\n set: function (value: any) {\n _region = value;\n this.hasBeenReset = true;\n },\n }),\n );\n\n Object.defineProperty(\n cue,\n 'vertical',\n extend({}, baseObj, {\n get: function () {\n return _vertical;\n },\n set: function (value: string) {\n const setting = findDirectionSetting(value);\n // Have to check for false because the setting an be an empty string.\n if (setting === false) {\n throw new SyntaxError(\n 'An invalid or illegal string was specified.',\n );\n }\n\n _vertical = setting;\n this.hasBeenReset = true;\n },\n }),\n );\n\n Object.defineProperty(\n cue,\n 'snapToLines',\n extend({}, baseObj, {\n get: function () {\n return _snapToLines;\n },\n set: function (value: boolean) {\n _snapToLines = !!value;\n this.hasBeenReset = true;\n },\n }),\n );\n\n Object.defineProperty(\n cue,\n 'line',\n extend({}, baseObj, {\n get: function () {\n return _line;\n },\n set: function (value: number | 'auto') {\n if (typeof value !== 'number' && value !== 'auto') {\n throw new SyntaxError(\n 'An invalid number or illegal string was specified.',\n );\n }\n\n _line = value;\n this.hasBeenReset = true;\n },\n }),\n );\n\n Object.defineProperty(\n cue,\n 'lineAlign',\n extend({}, baseObj, {\n get: function () {\n return _lineAlign;\n },\n set: function (value: string) {\n const setting = findAlignSetting(value);\n if (!setting) {\n throw new SyntaxError(\n 'An invalid or illegal string was specified.',\n );\n }\n\n _lineAlign = setting;\n this.hasBeenReset = true;\n },\n }),\n );\n\n Object.defineProperty(\n cue,\n 'position',\n extend({}, baseObj, {\n get: function () {\n return _position;\n },\n set: function (value: number) {\n if (value < 0 || value > 100) {\n throw new Error('Position must be between 0 and 100.');\n }\n\n _position = value;\n this.hasBeenReset = true;\n },\n }),\n );\n\n Object.defineProperty(\n cue,\n 'positionAlign',\n extend({}, baseObj, {\n get: function () {\n return _positionAlign;\n },\n set: function (value: string) {\n const setting = findAlignSetting(value);\n if (!setting) {\n throw new SyntaxError(\n 'An invalid or illegal string was specified.',\n );\n }\n\n _positionAlign = setting;\n this.hasBeenReset = true;\n },\n }),\n );\n\n Object.defineProperty(\n cue,\n 'size',\n extend({}, baseObj, {\n get: function () {\n return _size;\n },\n set: function (value: number) {\n if (value < 0 || value > 100) {\n throw new Error('Size must be between 0 and 100.');\n }\n\n _size = value;\n this.hasBeenReset = true;\n },\n }),\n );\n\n Object.defineProperty(\n cue,\n 'align',\n extend({}, baseObj, {\n get: function () {\n return _align;\n },\n set: function (value: string) {\n const setting = findAlignSetting(value);\n if (!setting) {\n throw new SyntaxError(\n 'An invalid or illegal string was specified.',\n );\n }\n\n _align = setting;\n this.hasBeenReset = true;\n },\n }),\n );\n\n /**\n * Other