Home Reference Source

src/controller/timeline-controller.ts

  1. import { Events } from '../events';
  2. import Cea608Parser, { CaptionScreen } from '../utils/cea-608-parser';
  3. import OutputFilter from '../utils/output-filter';
  4. import { parseWebVTT } from '../utils/webvtt-parser';
  5. import {
  6. sendAddTrackEvent,
  7. clearCurrentCues,
  8. addCueToTrack,
  9. removeCuesInRange,
  10. } from '../utils/texttrack-utils';
  11. import { parseIMSC1, IMSC1_CODEC } from '../utils/imsc1-ttml-parser';
  12. import { PlaylistLevelType } from '../types/loader';
  13. import { Fragment } from '../loader/fragment';
  14. import {
  15. FragParsingUserdataData,
  16. FragLoadedData,
  17. FragDecryptedData,
  18. MediaAttachingData,
  19. ManifestLoadedData,
  20. InitPTSFoundData,
  21. SubtitleTracksUpdatedData,
  22. BufferFlushingData,
  23. FragLoadingData,
  24. } from '../types/events';
  25. import { logger } from '../utils/logger';
  26. import type Hls from '../hls';
  27. import type { ComponentAPI } from '../types/component-api';
  28. import type { HlsConfig } from '../config';
  29. import type { CuesInterface } from '../utils/cues';
  30. import type { MediaPlaylist } from '../types/media-playlist';
  31. import type { VTTCCs } from '../types/vtt';
  32.  
  33. type TrackProperties = {
  34. label: string;
  35. languageCode: string;
  36. media?: MediaPlaylist;
  37. };
  38.  
  39. type NonNativeCaptionsTrack = {
  40. _id?: string;
  41. label: string;
  42. kind: string;
  43. default: boolean;
  44. closedCaptions?: MediaPlaylist;
  45. subtitleTrack?: MediaPlaylist;
  46. };
  47.  
  48. export class TimelineController implements ComponentAPI {
  49. private hls: Hls;
  50. private media: HTMLMediaElement | null = null;
  51. private config: HlsConfig;
  52. private enabled: boolean = true;
  53. private Cues: CuesInterface;
  54. private textTracks: Array<TextTrack> = [];
  55. private tracks: Array<MediaPlaylist> = [];
  56. private initPTS: Array<number> = [];
  57. private timescale: Array<number> = [];
  58. private unparsedVttFrags: Array<FragLoadedData | FragDecryptedData> = [];
  59. private captionsTracks: Record<string, TextTrack> = {};
  60. private nonNativeCaptionsTracks: Record<string, NonNativeCaptionsTrack> = {};
  61. private cea608Parser1!: Cea608Parser;
  62. private cea608Parser2!: Cea608Parser;
  63. private lastSn: number = -1;
  64. private prevCC: number = -1;
  65. private vttCCs: VTTCCs = newVTTCCs();
  66. private captionsProperties: {
  67. textTrack1: TrackProperties;
  68. textTrack2: TrackProperties;
  69. textTrack3: TrackProperties;
  70. textTrack4: TrackProperties;
  71. };
  72.  
  73. constructor(hls: Hls) {
  74. this.hls = hls;
  75. this.config = hls.config;
  76. this.Cues = hls.config.cueHandler;
  77.  
  78. this.captionsProperties = {
  79. textTrack1: {
  80. label: this.config.captionsTextTrack1Label,
  81. languageCode: this.config.captionsTextTrack1LanguageCode,
  82. },
  83. textTrack2: {
  84. label: this.config.captionsTextTrack2Label,
  85. languageCode: this.config.captionsTextTrack2LanguageCode,
  86. },
  87. textTrack3: {
  88. label: this.config.captionsTextTrack3Label,
  89. languageCode: this.config.captionsTextTrack3LanguageCode,
  90. },
  91. textTrack4: {
  92. label: this.config.captionsTextTrack4Label,
  93. languageCode: this.config.captionsTextTrack4LanguageCode,
  94. },
  95. };
  96.  
  97. if (this.config.enableCEA708Captions) {
  98. const channel1 = new OutputFilter(this, 'textTrack1');
  99. const channel2 = new OutputFilter(this, 'textTrack2');
  100. const channel3 = new OutputFilter(this, 'textTrack3');
  101. const channel4 = new OutputFilter(this, 'textTrack4');
  102. this.cea608Parser1 = new Cea608Parser(1, channel1, channel2);
  103. this.cea608Parser2 = new Cea608Parser(3, channel3, channel4);
  104. }
  105.  
  106. hls.on(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);
  107. hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
  108. hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);
  109. hls.on(Events.MANIFEST_LOADED, this.onManifestLoaded, this);
  110. hls.on(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);
  111. hls.on(Events.FRAG_LOADING, this.onFragLoading, this);
  112. hls.on(Events.FRAG_LOADED, this.onFragLoaded, this);
  113. hls.on(Events.FRAG_PARSING_USERDATA, this.onFragParsingUserdata, this);
  114. hls.on(Events.FRAG_DECRYPTED, this.onFragDecrypted, this);
  115. hls.on(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);
  116. hls.on(Events.SUBTITLE_TRACKS_CLEARED, this.onSubtitleTracksCleared, this);
  117. hls.on(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);
  118. }
  119.  
  120. public destroy(): void {
  121. const { hls } = this;
  122. hls.off(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);
  123. hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
  124. hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);
  125. hls.off(Events.MANIFEST_LOADED, this.onManifestLoaded, this);
  126. hls.off(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);
  127. hls.off(Events.FRAG_LOADING, this.onFragLoading, this);
  128. hls.off(Events.FRAG_LOADED, this.onFragLoaded, this);
  129. hls.off(Events.FRAG_PARSING_USERDATA, this.onFragParsingUserdata, this);
  130. hls.off(Events.FRAG_DECRYPTED, this.onFragDecrypted, this);
  131. hls.off(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);
  132. hls.off(Events.SUBTITLE_TRACKS_CLEARED, this.onSubtitleTracksCleared, this);
  133. hls.off(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);
  134. // @ts-ignore
  135. this.hls = this.config = this.cea608Parser1 = this.cea608Parser2 = null;
  136. }
  137.  
  138. public addCues(
  139. trackName: string,
  140. startTime: number,
  141. endTime: number,
  142. screen: CaptionScreen,
  143. cueRanges: Array<[number, number]>
  144. ) {
  145. // skip cues which overlap more than 50% with previously parsed time ranges
  146. let merged = false;
  147. for (let i = cueRanges.length; i--; ) {
  148. const cueRange = cueRanges[i];
  149. const overlap = intersection(
  150. cueRange[0],
  151. cueRange[1],
  152. startTime,
  153. endTime
  154. );
  155. if (overlap >= 0) {
  156. cueRange[0] = Math.min(cueRange[0], startTime);
  157. cueRange[1] = Math.max(cueRange[1], endTime);
  158. merged = true;
  159. if (overlap / (endTime - startTime) > 0.5) {
  160. return;
  161. }
  162. }
  163. }
  164. if (!merged) {
  165. cueRanges.push([startTime, endTime]);
  166. }
  167.  
  168. if (this.config.renderTextTracksNatively) {
  169. const track = this.captionsTracks[trackName];
  170. this.Cues.newCue(track, startTime, endTime, screen);
  171. } else {
  172. const cues = this.Cues.newCue(null, startTime, endTime, screen);
  173. this.hls.trigger(Events.CUES_PARSED, {
  174. type: 'captions',
  175. cues,
  176. track: trackName,
  177. });
  178. }
  179. }
  180.  
  181. // Triggered when an initial PTS is found; used for synchronisation of WebVTT.
  182. private onInitPtsFound(
  183. event: Events.INIT_PTS_FOUND,
  184. { frag, id, initPTS, timescale }: InitPTSFoundData
  185. ) {
  186. const { unparsedVttFrags } = this;
  187. if (id === 'main') {
  188. this.initPTS[frag.cc] = initPTS;
  189. this.timescale[frag.cc] = timescale;
  190. }
  191.  
  192. // Due to asynchronous processing, initial PTS may arrive later than the first VTT fragments are loaded.
  193. // Parse any unparsed fragments upon receiving the initial PTS.
  194. if (unparsedVttFrags.length) {
  195. this.unparsedVttFrags = [];
  196. unparsedVttFrags.forEach((frag) => {
  197. this.onFragLoaded(Events.FRAG_LOADED, frag as FragLoadedData);
  198. });
  199. }
  200. }
  201.  
  202. private getExistingTrack(trackName: string): TextTrack | null {
  203. const { media } = this;
  204. if (media) {
  205. for (let i = 0; i < media.textTracks.length; i++) {
  206. const textTrack = media.textTracks[i];
  207. if (textTrack[trackName]) {
  208. return textTrack;
  209. }
  210. }
  211. }
  212. return null;
  213. }
  214.  
  215. public createCaptionsTrack(trackName: string) {
  216. if (this.config.renderTextTracksNatively) {
  217. this.createNativeTrack(trackName);
  218. } else {
  219. this.createNonNativeTrack(trackName);
  220. }
  221. }
  222.  
  223. private createNativeTrack(trackName: string) {
  224. if (this.captionsTracks[trackName]) {
  225. return;
  226. }
  227. const { captionsProperties, captionsTracks, media } = this;
  228. const { label, languageCode } = captionsProperties[trackName];
  229. // Enable reuse of existing text track.
  230. const existingTrack = this.getExistingTrack(trackName);
  231. if (!existingTrack) {
  232. const textTrack = this.createTextTrack('captions', label, languageCode);
  233. if (textTrack) {
  234. // Set a special property on the track so we know it's managed by Hls.js
  235. textTrack[trackName] = true;
  236. captionsTracks[trackName] = textTrack;
  237. }
  238. } else {
  239. captionsTracks[trackName] = existingTrack;
  240. clearCurrentCues(captionsTracks[trackName]);
  241. sendAddTrackEvent(captionsTracks[trackName], media as HTMLMediaElement);
  242. }
  243. }
  244.  
  245. private createNonNativeTrack(trackName: string) {
  246. if (this.nonNativeCaptionsTracks[trackName]) {
  247. return;
  248. }
  249. // Create a list of a single track for the provider to consume
  250. const trackProperties: TrackProperties = this.captionsProperties[trackName];
  251. if (!trackProperties) {
  252. return;
  253. }
  254. const label = trackProperties.label as string;
  255. const track = {
  256. _id: trackName,
  257. label,
  258. kind: 'captions',
  259. default: trackProperties.media ? !!trackProperties.media.default : false,
  260. closedCaptions: trackProperties.media,
  261. };
  262. this.nonNativeCaptionsTracks[trackName] = track;
  263. this.hls.trigger(Events.NON_NATIVE_TEXT_TRACKS_FOUND, { tracks: [track] });
  264. }
  265.  
  266. private createTextTrack(
  267. kind: TextTrackKind,
  268. label: string,
  269. lang?: string
  270. ): TextTrack | undefined {
  271. const media = this.media;
  272. if (!media) {
  273. return;
  274. }
  275. return media.addTextTrack(kind, label, lang);
  276. }
  277.  
  278. private onMediaAttaching(
  279. event: Events.MEDIA_ATTACHING,
  280. data: MediaAttachingData
  281. ) {
  282. this.media = data.media;
  283. this._cleanTracks();
  284. }
  285.  
  286. private onMediaDetaching() {
  287. const { captionsTracks } = this;
  288. Object.keys(captionsTracks).forEach((trackName) => {
  289. clearCurrentCues(captionsTracks[trackName]);
  290. delete captionsTracks[trackName];
  291. });
  292. this.nonNativeCaptionsTracks = {};
  293. }
  294.  
  295. private onManifestLoading() {
  296. this.lastSn = -1; // Detect discontinuity in fragment parsing
  297. this.prevCC = -1;
  298. this.vttCCs = newVTTCCs(); // Detect discontinuity in subtitle manifests
  299. this._cleanTracks();
  300. this.tracks = [];
  301. this.captionsTracks = {};
  302. this.nonNativeCaptionsTracks = {};
  303. this.textTracks = [];
  304. this.unparsedVttFrags = this.unparsedVttFrags || [];
  305. this.initPTS = [];
  306. this.timescale = [];
  307. if (this.cea608Parser1 && this.cea608Parser2) {
  308. this.cea608Parser1.reset();
  309. this.cea608Parser2.reset();
  310. }
  311. }
  312.  
  313. private _cleanTracks() {
  314. // clear outdated subtitles
  315. const { media } = this;
  316. if (!media) {
  317. return;
  318. }
  319. const textTracks = media.textTracks;
  320. if (textTracks) {
  321. for (let i = 0; i < textTracks.length; i++) {
  322. clearCurrentCues(textTracks[i]);
  323. }
  324. }
  325. }
  326.  
  327. private onSubtitleTracksUpdated(
  328. event: Events.SUBTITLE_TRACKS_UPDATED,
  329. data: SubtitleTracksUpdatedData
  330. ) {
  331. this.textTracks = [];
  332. const tracks: Array<MediaPlaylist> = data.subtitleTracks || [];
  333. const hasIMSC1 = tracks.some((track) => track.textCodec === IMSC1_CODEC);
  334. if (this.config.enableWebVTT || (hasIMSC1 && this.config.enableIMSC1)) {
  335. const sameTracks =
  336. this.tracks && tracks && this.tracks.length === tracks.length;
  337. this.tracks = tracks || [];
  338.  
  339. if (this.config.renderTextTracksNatively) {
  340. const inUseTracks = this.media ? this.media.textTracks : [];
  341.  
  342. this.tracks.forEach((track, index) => {
  343. let textTrack: TextTrack | undefined;
  344. if (index < inUseTracks.length) {
  345. let inUseTrack: TextTrack | null = null;
  346.  
  347. for (let i = 0; i < inUseTracks.length; i++) {
  348. if (canReuseVttTextTrack(inUseTracks[i], track)) {
  349. inUseTrack = inUseTracks[i];
  350. break;
  351. }
  352. }
  353.  
  354. // Reuse tracks with the same label, but do not reuse 608/708 tracks
  355. if (inUseTrack) {
  356. textTrack = inUseTrack;
  357. }
  358. }
  359. if (textTrack) {
  360. clearCurrentCues(textTrack);
  361. } else {
  362. textTrack = this.createTextTrack(
  363. 'subtitles',
  364. track.name,
  365. track.lang
  366. );
  367. if (textTrack) {
  368. textTrack.mode = 'disabled';
  369. }
  370. }
  371. if (textTrack) {
  372. (textTrack as any).groupId = track.groupId;
  373. this.textTracks.push(textTrack);
  374. }
  375. });
  376. } else if (!sameTracks && this.tracks && this.tracks.length) {
  377. // Create a list of tracks for the provider to consume
  378. const tracksList = this.tracks.map((track) => {
  379. return {
  380. label: track.name,
  381. kind: track.type.toLowerCase(),
  382. default: track.default,
  383. subtitleTrack: track,
  384. };
  385. });
  386. this.hls.trigger(Events.NON_NATIVE_TEXT_TRACKS_FOUND, {
  387. tracks: tracksList,
  388. });
  389. }
  390. }
  391. }
  392.  
  393. private onManifestLoaded(
  394. event: Events.MANIFEST_LOADED,
  395. data: ManifestLoadedData
  396. ) {
  397. if (this.config.enableCEA708Captions && data.captions) {
  398. data.captions.forEach((captionsTrack) => {
  399. const instreamIdMatch = /(?:CC|SERVICE)([1-4])/.exec(
  400. captionsTrack.instreamId as string
  401. );
  402. if (!instreamIdMatch) {
  403. return;
  404. }
  405. const trackName = `textTrack${instreamIdMatch[1]}`;
  406. const trackProperties: TrackProperties = this.captionsProperties[
  407. trackName
  408. ];
  409. if (!trackProperties) {
  410. return;
  411. }
  412. trackProperties.label = captionsTrack.name;
  413. if (captionsTrack.lang) {
  414. // optional attribute
  415. trackProperties.languageCode = captionsTrack.lang;
  416. }
  417. trackProperties.media = captionsTrack;
  418. });
  419. }
  420. }
  421.  
  422. private onFragLoading(event: Events.FRAG_LOADING, data: FragLoadingData) {
  423. const { cea608Parser1, cea608Parser2, lastSn } = this;
  424. if (!this.enabled || !(cea608Parser1 && cea608Parser2)) {
  425. return;
  426. }
  427. // if this frag isn't contiguous, clear the parser so cues with bad start/end times aren't added to the textTrack
  428. if (data.frag.type === PlaylistLevelType.MAIN) {
  429. const sn = data.frag.sn;
  430. if (sn !== lastSn + 1) {
  431. cea608Parser1.reset();
  432. cea608Parser2.reset();
  433. }
  434. this.lastSn = sn as number;
  435. }
  436. }
  437.  
  438. private onFragLoaded(event: Events.FRAG_LOADED, data: FragLoadedData) {
  439. const { frag, payload } = data;
  440. const { initPTS, unparsedVttFrags } = this;
  441. if (frag.type === PlaylistLevelType.SUBTITLE) {
  442. // If fragment is subtitle type, parse as WebVTT.
  443. if (payload.byteLength) {
  444. // We need an initial synchronisation PTS. Store fragments as long as none has arrived.
  445. if (!Number.isFinite(initPTS[frag.cc])) {
  446. unparsedVttFrags.push(data);
  447. if (initPTS.length) {
  448. // finish unsuccessfully, otherwise the subtitle-stream-controller could be blocked from loading new frags.
  449. this.hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  450. success: false,
  451. frag,
  452. error: new Error('Missing initial subtitle PTS'),
  453. });
  454. }
  455. return;
  456. }
  457.  
  458. const decryptData = frag.decryptdata;
  459. // If the subtitles are not encrypted, parse VTTs now. Otherwise, we need to wait.
  460. if (
  461. decryptData == null ||
  462. decryptData.key == null ||
  463. decryptData.method !== 'AES-128'
  464. ) {
  465. const trackPlaylistMedia = this.tracks[frag.level];
  466. const vttCCs = this.vttCCs;
  467. if (!vttCCs[frag.cc]) {
  468. vttCCs[frag.cc] = {
  469. start: frag.start,
  470. prevCC: this.prevCC,
  471. new: true,
  472. };
  473. this.prevCC = frag.cc;
  474. }
  475. if (
  476. trackPlaylistMedia &&
  477. trackPlaylistMedia.textCodec === IMSC1_CODEC
  478. ) {
  479. this._parseIMSC1(frag, payload);
  480. } else {
  481. this._parseVTTs(frag, payload, vttCCs);
  482. }
  483. }
  484. } else {
  485. // In case there is no payload, finish unsuccessfully.
  486. this.hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  487. success: false,
  488. frag,
  489. error: new Error('Empty subtitle payload'),
  490. });
  491. }
  492. }
  493. }
  494.  
  495. private _parseIMSC1(frag: Fragment, payload: ArrayBuffer) {
  496. const hls = this.hls;
  497. parseIMSC1(
  498. payload,
  499. this.initPTS[frag.cc],
  500. this.timescale[frag.cc],
  501. (cues) => {
  502. this._appendCues(cues, frag.level);
  503. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  504. success: true,
  505. frag: frag,
  506. });
  507. },
  508. (error) => {
  509. logger.log(`Failed to parse IMSC1: ${error}`);
  510. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  511. success: false,
  512. frag: frag,
  513. error,
  514. });
  515. }
  516. );
  517. }
  518.  
  519. private _parseVTTs(frag: Fragment, payload: ArrayBuffer, vttCCs: any) {
  520. const hls = this.hls;
  521. // Parse the WebVTT file contents.
  522. parseWebVTT(
  523. payload,
  524. this.initPTS[frag.cc],
  525. this.timescale[frag.cc],
  526. vttCCs,
  527. frag.cc,
  528. frag.start,
  529. (cues) => {
  530. this._appendCues(cues, frag.level);
  531. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  532. success: true,
  533. frag: frag,
  534. });
  535. },
  536. (error) => {
  537. this._fallbackToIMSC1(frag, payload);
  538. // Something went wrong while parsing. Trigger event with success false.
  539. logger.log(`Failed to parse VTT cue: ${error}`);
  540. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  541. success: false,
  542. frag: frag,
  543. error,
  544. });
  545. }
  546. );
  547. }
  548.  
  549. private _fallbackToIMSC1(frag: Fragment, payload: ArrayBuffer) {
  550. // If textCodec is unknown, try parsing as IMSC1. Set textCodec based on the result
  551. const trackPlaylistMedia = this.tracks[frag.level];
  552. if (!trackPlaylistMedia.textCodec) {
  553. parseIMSC1(
  554. payload,
  555. this.initPTS[frag.cc],
  556. this.timescale[frag.cc],
  557. () => {
  558. trackPlaylistMedia.textCodec = IMSC1_CODEC;
  559. this._parseIMSC1(frag, payload);
  560. },
  561. () => {
  562. trackPlaylistMedia.textCodec = 'wvtt';
  563. }
  564. );
  565. }
  566. }
  567.  
  568. private _appendCues(cues: VTTCue[], fragLevel: number) {
  569. const hls = this.hls;
  570. if (this.config.renderTextTracksNatively) {
  571. const textTrack = this.textTracks[fragLevel];
  572. // WebVTTParser.parse is an async method and if the currently selected text track mode is set to "disabled"
  573. // before parsing is done then don't try to access currentTrack.cues.getCueById as cues will be null
  574. // and trying to access getCueById method of cues will throw an exception
  575. // Because we check if the mode is disabled, we can force check `cues` below. They can't be null.
  576. if (textTrack.mode === 'disabled') {
  577. return;
  578. }
  579. cues.forEach((cue) => addCueToTrack(textTrack, cue));
  580. } else {
  581. const currentTrack = this.tracks[fragLevel];
  582. const track = currentTrack.default ? 'default' : 'subtitles' + fragLevel;
  583. hls.trigger(Events.CUES_PARSED, { type: 'subtitles', cues, track });
  584. }
  585. }
  586.  
  587. private onFragDecrypted(
  588. event: Events.FRAG_DECRYPTED,
  589. data: FragDecryptedData
  590. ) {
  591. const { frag } = data;
  592. if (frag.type === PlaylistLevelType.SUBTITLE) {
  593. if (!Number.isFinite(this.initPTS[frag.cc])) {
  594. this.unparsedVttFrags.push((data as unknown) as FragLoadedData);
  595. return;
  596. }
  597. this.onFragLoaded(
  598. Events.FRAG_LOADED,
  599. (data as unknown) as FragLoadedData
  600. );
  601. }
  602. }
  603.  
  604. private onSubtitleTracksCleared() {
  605. this.tracks = [];
  606. this.captionsTracks = {};
  607. }
  608.  
  609. private onFragParsingUserdata(
  610. event: Events.FRAG_PARSING_USERDATA,
  611. data: FragParsingUserdataData
  612. ) {
  613. const { cea608Parser1, cea608Parser2 } = this;
  614. if (!this.enabled || !(cea608Parser1 && cea608Parser2)) {
  615. return;
  616. }
  617.  
  618. // If the event contains captions (found in the bytes property), push all bytes into the parser immediately
  619. // It will create the proper timestamps based on the PTS value
  620. for (let i = 0; i < data.samples.length; i++) {
  621. const ccBytes = data.samples[i].bytes;
  622. if (ccBytes) {
  623. const ccdatas = this.extractCea608Data(ccBytes);
  624. cea608Parser1.addData(data.samples[i].pts, ccdatas[0]);
  625. cea608Parser2.addData(data.samples[i].pts, ccdatas[1]);
  626. }
  627. }
  628. }
  629.  
  630. onBufferFlushing(
  631. event: Events.BUFFER_FLUSHING,
  632. { startOffset, endOffset, type }: BufferFlushingData
  633. ) {
  634. // Clear 608 CC cues from the back buffer
  635. // Forward cues are never removed because we can loose streamed 608 content from recent fragments
  636. if (!type || type === 'video') {
  637. const { media } = this;
  638. if (!media || media.currentTime < endOffset) {
  639. return;
  640. }
  641. const { captionsTracks } = this;
  642. Object.keys(captionsTracks).forEach((trackName) =>
  643. removeCuesInRange(captionsTracks[trackName], startOffset, endOffset)
  644. );
  645. }
  646. }
  647.  
  648. private extractCea608Data(byteArray: Uint8Array): number[][] {
  649. const count = byteArray[0] & 31;
  650. let position = 2;
  651. const actualCCBytes: number[][] = [[], []];
  652.  
  653. for (let j = 0; j < count; j++) {
  654. const tmpByte = byteArray[position++];
  655. const ccbyte1 = 0x7f & byteArray[position++];
  656. const ccbyte2 = 0x7f & byteArray[position++];
  657. const ccValid = (4 & tmpByte) !== 0;
  658. const ccType = 3 & tmpByte;
  659.  
  660. if (ccbyte1 === 0 && ccbyte2 === 0) {
  661. continue;
  662. }
  663.  
  664. if (ccValid) {
  665. if (ccType === 0 || ccType === 1) {
  666. actualCCBytes[ccType].push(ccbyte1);
  667. actualCCBytes[ccType].push(ccbyte2);
  668. }
  669. }
  670. }
  671. return actualCCBytes;
  672. }
  673. }
  674.  
  675. function canReuseVttTextTrack(inUseTrack, manifestTrack): boolean {
  676. return (
  677. inUseTrack &&
  678. inUseTrack.label === manifestTrack.name &&
  679. !(inUseTrack.textTrack1 || inUseTrack.textTrack2)
  680. );
  681. }
  682.  
  683. function intersection(x1: number, x2: number, y1: number, y2: number): number {
  684. return Math.min(x2, y2) - Math.max(x1, y1);
  685. }
  686.  
  687. function newVTTCCs(): VTTCCs {
  688. return {
  689. ccOffset: 0,
  690. presentationOffset: 0,
  691. 0: {
  692. start: 0,
  693. prevCC: -1,
  694. new: false,
  695. },
  696. };
  697. }