diff --git a/css/VideoPlayer.css b/css/VideoPlayer.css index 4516a5d..898fc1d 100644 --- a/css/VideoPlayer.css +++ b/css/VideoPlayer.css @@ -40,8 +40,6 @@ width:100%; text-align: center; padding-bottom: 0.5em; -} -.fl-videoPlayer-theme .fl-videoPlayer-controller { background-color: gray; } @@ -59,46 +57,46 @@ /* * Play button */ -.fl-videoPlayer-theme .fl-videoPlayer-play { +.fl-videoPlayer-play { background: #ffffff url('../images/play-black.png') no-repeat center center; } -.fl-videoPlayer-theme .fl-videoPlayer-playing { +.fl-videoPlayer-playing { background-image: url('../images/pause-black.png'); } -.fl-videoPlayer-theme .fl-videoPlayer-play:hover, -.fl-videoPlayer-theme .fl-videoPlayer-playing:hover { +.fl-videoPlayer-play:hover, +.fl-videoPlayer-playing:hover { background-color: yellow; } /* * Fullscreen button */ -.fl-videoPlayer-theme .fl-videoPlayer-fullscreen { +.fl-videoPlayer-fullscreen { background: #ffffff url('../images/fullscreen-black.png') no-repeat center center; } -.fl-videoPlayer-theme .fl-videoPlayer-fullscreen-on { +.fl-videoPlayer-fullscreen-on { background-image: url('../images/fullscreen-on-black.png'); } -.fl-videoPlayer-theme .fl-videoPlayer-fullscreen:hover, -.fl-videoPlayer-theme .fl-videoPlayer-fullscreen:focus { +.fl-videoPlayer-fullscreen:hover, +.fl-videoPlayer-fullscreen:focus { background-color: yellow; } /* * Volume controls */ -.fl-videoPlayer-theme .fl-videoPlayer-mute { +.fl-videoPlayer-mute { background: #ffffff url('../images/volume-black.png') no-repeat center center; } -.fl-videoPlayer-theme .fl-videoPlayer-volumeContainer:hover .fl-videoPlayer-mute, -.fl-videoPlayer-theme .fl-videoPlayer-volumeContainer:focus .fl-videoPlayer-mute { +.fl-videoPlayer-volumeContainer:hover .fl-videoPlayer-mute, +.fl-videoPlayer-volumeContainer:focus .fl-videoPlayer-mute { background-color: yellow; } -.fl-videoPlayer-theme .fl-videoPlayer-muted { +.fl-videoPlayer-muted { background-image: url('../images/volume-muted-black.png'); } -.fl-videoPlayer-theme .fl-videoPlayer-volumeContainer:hover .fl-videoPlayer-muted, -.fl-videoPlayer-theme .fl-videoPlayer-volumeContainer:focus .fl-videoPlayer-muted { +.fl-videoPlayer-volumeContainer:hover .fl-videoPlayer-muted, +.fl-videoPlayer-volumeContainer:focus .fl-videoPlayer-muted { background-color: orange; } .fl-videoPlayer-volumeControl { @@ -110,20 +108,37 @@ display: block; } +/* + * Language menus + * colours are temporary + */ +.fl-videoPlayer-languageMenu * { + display: block; +} +.fl-videoPlayer-languageMenu .fl-videoPlayer-menuItem:hover { + color: yellow; +} +.fl-videoPlayer-languageMenu .fl-videoPlayer-menuItem-selected, +.fl-videoPlayer-languageMenu .fl-videoPlayer-menuItem-active.fl-videoPlayer-menuItem-selected { + color: orange; +} +.fl-videoPlayer-languageMenu .fl-videoPlayer-menuItem-active { + color: green; +} /* * Caption controls */ -.fl-videoPlayer-theme .fl-videoPlayer-captions-button { +.fl-videoPlayer-captions-button { background: #ffffff url('../images/captions-black.png') no-repeat center center; } -.fl-videoPlayer-theme .fl-videoPlayer-captions-button:hover { +.fl-videoPlayer-captions-button:hover { background-color: yellow; } -.fl-videoPlayer-theme .fl-videoPlayer-captions-button.fl-videoPlayer-caption-active { +.fl-videoPlayer-captions-button.fl-videoPlayer-caption-active { background-color: orange; } -.fl-videoPlayer-theme .fl-videoPlayer-captions-button.fl-videoPlayer-caption-active:hover { +.fl-videoPlayer-captions-button.fl-videoPlayer-caption-active:hover { background-color: wheat; } @@ -145,16 +160,16 @@ ul.fl-videoPlayer-captions-languageList .fl-videoPlayer-caption-selected { /* * transcript controls */ -.fl-videoPlayer-theme .fl-videoPlayer-transcripts-button { +.fl-videoPlayer-transcripts-button { background: #ffffff url('../images/transcripts-black.png') no-repeat center center; } -.fl-videoPlayer-theme .fl-videoPlayer-transcripts-button:hover { +.fl-videoPlayer-transcripts-button:hover { background-color: yellow; } -.fl-videoPlayer-theme .fl-videoPlayer-transcripts-button.fl-videoPlayer-transcript-active { +.fl-videoPlayer-transcripts-button.fl-videoPlayer-transcript-active { background-color: orange; } -.fl-videoPlayer-theme .fl-videoPlayer-transcripts-button.fl-videoPlayer-transcript-active:hover { +.fl-videoPlayer-transcripts-button.fl-videoPlayer-transcript-active:hover { background-color: wheat; } @@ -177,12 +192,14 @@ ul.fl-videoPlayer-transcripts-languageList .fl-videoPlayer-transcript-selected { display: inline-block; } -.fl-videoPlayer-menu-captions { +.fl-videoPlayer-controller-volumeControl { display: block; position: absolute; + bottom: -40px; + z-index: 101; /* To make our div showin on top since captionator has 100 so */ } -.fl-videoPlayer-theme .fl-videoPlayer-transcripts-close-button { +.fl-videoPlayer-transcripts-close-button { background: #ffffff url('../images/close-black.png') no-repeat center center; } @@ -196,14 +213,21 @@ ul.fl-videoPlayer-transcripts-languageList .fl-videoPlayer-transcript-selected { text-align:center; } -.fl-videoPlayer-theme .fl-videoPlayer-caption-captionText { +.fl-videoPlayer-caption-captionText { color: white; background-color: black; opacity: 0.7; } +.fl-videoPlayer-controller-menu-captions { + display: block; + position: absolute; + bottom: -40px; + z-index: 101; /* To make our div showin on top since captionator has 100 so */ +} + /* - * Caption area + * Transcript area */ .fl-videoPlayer-transcriptArea { @@ -226,6 +250,14 @@ ul.fl-videoPlayer-transcripts-languageList .fl-videoPlayer-transcript-selected { .fl-videoPlayer-transcript-text { margin-top: 2em; padding: 1em; + overflow: scroll; + height: 84%; + width: 85%; +} +.fl-videoPlayer-transcript-element-highlight { + color: white; + background-color: black; + opacity: 0.7; } /* @@ -236,6 +268,13 @@ ul.fl-videoPlayer-transcripts-languageList .fl-videoPlayer-transcript-selected { width: 60%; } +/* +*Allow clicking through captionator.DIV.element +*/ + +.captionator-cue-canvas { + pointer-events:none; +} /*********************************************************** * Styling overrides for the controls when using UIO themes @@ -396,3 +435,23 @@ ul.fl-videoPlayer-transcripts-languageList .fl-videoPlayer-transcript-selected { color: yellow; } +.fl-theme-uio-wb .fl-videoPlayer-transcript-element-highlight, +.fl-theme-uio-yb .fl-videoPlayer-transcript-element-highlight { + background-color: black; +} +.fl-theme-uio-bw .fl-videoPlayer-transcript-element-highlight, +.fl-theme-uio-by .fl-videoPlayer-transcript-element-highlight { + color: black; +} +.fl-theme-uio-wb .fl-videoPlayer-transcript-element-highlight { + color: white; +} +.fl-theme-uio-bw .fl-videoPlayer-transcript-element-highlight { + background-color: white; +} +.fl-theme-uio-by .fl-videoPlayer-transcript-element-highlight { + background-color: yellow; +} +.fl-theme-uio-yb .fl-videoPlayer-transcript-element-highlight { + color: yellow; +} diff --git a/demos/mammals.css b/demos/Mammals.css similarity index 100% rename from demos/mammals.css rename to demos/Mammals.css diff --git a/demos/Mammals.html b/demos/Mammals.html index 5f822a3..8c21c55 100644 --- a/demos/Mammals.html +++ b/demos/Mammals.html @@ -18,24 +18,24 @@ - + + - - - + + diff --git a/demos/ReorganizeFuture.en.vtt b/demos/ReorganizeFuture.en.vtt new file mode 100644 index 0000000..46db043 --- /dev/null +++ b/demos/ReorganizeFuture.en.vtt @@ -0,0 +1,434 @@ +WEBVTT + +1 +00:00:01.77 --> 00:00:04.03 +Eeny, meeny, miny, moe, + +2 +00:00:04.03 --> 00:00:05.99 +Catch a tiger by the toe + +3 +00:00:05.99 --> 00:00:08.05 +If he hollers let him go + +4 +00:00:08.05 --> 00:00:10.96 +Eeny, meeny, miny moe. + +5 +00:00:12.70 --> 00:00:14.64 +I'm Jutta Treviranus + +6 +00:00:14.64 --> 00:00:16.04 +and I've come to wonder + +7 +00:00:16.04 --> 00:00:18.38 +whether we have a chance to reorganize our future. + +8 +00:00:18.38 --> 00:00:23.23 +David Kelley says that the future of design is human centred. + +9 +00:00:23.23 --> 00:00:25.51 +Most experts agree. + +10 +00:00:25.51 --> 00:00:29.02 +That leaves the question - which human? + +11 +00:00:29.02 --> 00:00:32.46 +An inevitable human condition is diversity. + +12 +00:00:32.46 --> 00:00:34.70 +There's no typical human, + +13 +00:00:34.70 --> 00:00:37.33 +even clones and identical twins are not the same. + +14 +00:00:37.33 --> 00:00:40.42 +We differ from one to the next, + +15 +00:00:40.42 --> 00:00:43.54 +but also from one moment to the next, + +16 +00:00:43.54 --> 00:00:46.84 +from one context to the next. + +17 +00:00:46.84 --> 00:00:49.91 +But diversity and difference become overwhelming + +18 +00:00:49.91 --> 00:00:52.94 +and we develop strategies to deal with this diversity. + +19 +00:00:52.94 --> 00:00:55.56 +We try to make things simpler, + +20 +00:00:55.60 --> 00:00:57.50 +less complex, less chaotic + +21 +00:00:57.50 --> 00:01:00.00 +Another part of the human condition is that + +22 +00:01:00.00 --> 00:01:03.18 +we try to find commonality and connections. + +23 +00:01:03.20 --> 00:01:09.53 +We form groups informal and formal with implicit and explicit criteria. + +24 +00:01:09.55 --> 00:01:13.97 +We organize, we create categories, we filter, we label. + +25 +00:01:13.97 --> 00:01:19.67 +At our most insecure and overwhelmed we divide in two, we create binaries: + +26 +00:01:19.67 --> 00:01:21.86 +male, female + +27 +00:01:21.86 --> 00:01:24.21 +disabled, normal + +28 +00:01:24.21 --> 00:01:27.03 +left, right + +29 +00:01:27.03 --> 00:01:29.57 +us, them. + +30 +00:01:29.57 --> 00:01:34.98 +This all results in issues of who belongs and who is excluded. + +31 +00:01:34.98 --> 00:01:37.53 +Membership in groups can be self assigned, + +32 +00:01:37.53 --> 00:01:40.46 +may be imposed, may even be policed. + +33 +00:01:40.46 --> 00:01:44.17 +Groups are used to assert or assign privileges and powers. + +34 +00:01:44.17 --> 00:01:46.92 +We use groups to judge + +35 +00:01:46.92 --> 00:01:49.22 +values get assigned to groups + +36 +00:01:49.22 --> 00:01:51.88 +often characteristics that have nothing to do with + +37 +00:01:51.88 --> 00:01:53.35 +the original founding properties of groups + +38 +00:01:53.35 --> 00:01:55.98 +are generalized to all individuals in the group. + +39 +00:01:55.98 --> 00:02:00.07 +Sometimes, people who are in an imposed group + +40 +00:02:00.07 --> 00:02:02.00 +take ownership of the group and reform + +41 +00:02:02.00 --> 00:02:04.42 +the classifications and values from within. + +42 +00:02:04.42 --> 00:02:08.44 +Occasionally, someone has the audacity + +43 +00:02:08.44 --> 00:02:11.00 +to break out of the category we have put her in + +44 +00:02:11.00 --> 00:02:16.90 +but to preserve our category, we may dismiss her as an anomaly. + +45 +00:02:16.90 --> 00:02:20.03 +Some groups are more fluid while others are more fixed. + +46 +00:02:20.03 --> 00:02:23.56 +We not only form groups, but groups of groups + +47 +00:02:23.56 --> 00:02:25.66 +and groups, of groups, of groups. + +48 +00:02:25.66 --> 00:02:29.09 +Membership in one group can grant us membership in other groups. + +49 +00:02:29.09 --> 00:02:33.03 +But despite all this, we are diverse, + +50 +00:02:33.03 --> 00:02:34.85 +we are complex, + +51 +00:02:34.85 --> 00:02:36.45 +we are chaotic. + +52 +00:02:36.45 --> 00:02:38.81 +Individually we're different + +53 +00:02:38.81 --> 00:02:40.44 +over time, in different contexts, + +54 +00:02:40.44 --> 00:02:42.35 +in different roles, in different groups. + +55 +00:02:42.35 --> 00:02:45.42 +We need to assert our uniqueness, + +56 +00:02:45.42 --> 00:02:47.86 +we need to form and refine our identity. + +57 +00:02:47.86 --> 00:02:50.91 +We struggle with the identity imposed on us. + +58 +00:02:50.91 --> 00:02:56.36 +Generally, people do not fit easily into assigned categories + +59 +00:02:56.36 --> 00:02:58.98 +and yet we persist in assigning them. + +60 +00:02:58.98 --> 00:03:02.63 +And then, something new comes along + +61 +00:03:02.63 --> 00:03:05.41 +and shakes up our groups, our categories and our rules, + +62 +00:03:05.41 --> 00:03:08.26 +and we need to adjust, rebuild and rethink. + +63 +00:03:08.26 --> 00:03:12.53 +Something like, networks and digital stuff. + +64 +00:03:12.53 --> 00:03:15.47 +This new digital and connected world + +65 +00:03:15.47 --> 00:03:17.87 +puts into question how we group things + +66 +00:03:17.87 --> 00:03:20.75 +and challenges our excuses for leaving people out. + +67 +00:03:20.75 --> 00:03:25.46 +The digital changes our view of time, space and distance + +68 +00:03:25.46 --> 00:03:31.08 +and by extension our view of design, what is possible and what things cost. + +69 +00:03:31.08 --> 00:03:36.04 +Digital things are plastic, mutable, malleable and adaptable. + +70 +00:03:36.04 --> 00:03:39.50 +Before, not everyone could fit, + +71 +00:03:39.50 --> 00:03:42.16 +allowing someone in meant someone else was left out. + +72 +00:03:42.16 --> 00:03:46.06 +In the digital, room is very stretchy. + +73 +00:03:46.06 --> 00:03:49.76 +Before, what we created could not fit everyone + +74 +00:03:49.76 --> 00:03:51.77 +so we made it fit the largest group. + +75 +00:03:51.77 --> 00:03:54.53 +We made it for the group called average or typical + +76 +00:03:54.53 --> 00:03:58.26 +this left out everyone not average or typical. + +77 +00:03:58.26 --> 00:04:03.39 +In the digital reality the things we make can reconfigure, adapt + +78 +00:04:03.39 --> 00:04:06.27 +and take a form that is best for each individual. + +79 +00:04:06.27 --> 00:04:11.90 +In the solid world, each copy cost almost the same as the original. + +80 +00:04:11.90 --> 00:04:14.35 +Consumption actually consumed. + +81 +00:04:14.35 --> 00:04:18.56 +In the digital world, we can copy almost without cost. + +82 +00:04:18.56 --> 00:04:21.00 +Consumption no longer consumes. + +83 +00:04:21.00 --> 00:04:24.52 +Before, it took a great deal of time and effort + +84 +00:04:24.52 --> 00:04:27.23 +to deliver things, especially to people far away. + +85 +00:04:27.23 --> 00:04:30.93 +Now it is as easy to deliver things around the world + +86 +00:04:30.93 --> 00:04:33.13 +as it is to deliver things next door. + +87 +00:04:33.13 --> 00:04:36.85 +Before, if we didn't place things in a fixed spot + +88 +00:04:36.85 --> 00:04:39.53 +we would have a hard time finding them again. + +89 +00:04:39.53 --> 00:04:43.63 +Now we can place them anywhere on the network and + +90 +00:04:43.63 --> 00:04:46.26 +retrieve them anywhere on the network. + +91 +00:04:46.26 --> 00:04:50.13 +Before, we needed to label things unambiguously and simply + +92 +00:04:50.13 --> 00:04:52.80 +so we could recognize them and know what to do with them. + +93 +00:04:52.80 --> 00:04:56.44 +Now we can see a description of each person or thing + +94 +00:04:56.44 --> 00:04:59.02 +that is useful and relevant to our purpose. + +95 +00:04:59.02 --> 00:05:03.01 +And by the way, we have learned that + +96 +00:05:03.01 --> 00:05:06.36 +inclusion and equality are good for all of us. + +97 +00:05:06.36 --> 00:05:09.35 +We are all healthier, wealthier and wiser + +98 +00:05:09.35 --> 00:05:12.19 +when our society is inclusive and equal. + +99 +00:05:12.19 --> 00:05:15.36 +We've also discovered that diverse groups + +100 +00:05:15.36 --> 00:05:18.93 +are more innovative and creative, and better at planning and predicting. + +101 +00:05:18.93 --> 00:05:23.73 +We've experimented with new organization like + +102 +00:05:23.73 --> 00:05:26.33 +most popular, to be ignored + +103 +00:05:26.33 --> 00:05:28.69 +friend, not friend. + +104 +00:05:28.69 --> 00:05:31.03 +But we can do better. + +105 +00:05:31.03 --> 00:05:33.26 +We can afford to be generous in our design, + +106 +00:05:33.26 --> 00:05:35.39 +we have fewer excuses to exclude. + +107 +00:05:35.39 --> 00:05:37.56 +We can be true to our diversity. + +108 +00:05:37.56 --> 00:05:43.06 +Perhaps now, we can find a way to make room for us all. + diff --git a/demos/ReorganizeFuture.fr.vtt b/demos/ReorganizeFuture.fr.vtt new file mode 100644 index 0000000..e647de8 --- /dev/null +++ b/demos/ReorganizeFuture.fr.vtt @@ -0,0 +1,434 @@ +WEBVTT + +1 +00:00:01.77 --> 00:00:04.03 +Eeny, meeny, miny, moe, + +2 +00:00:04.03 --> 00:00:05.99 +Catch un tigre par le gros orteil + +3 +00:00:05.99 --> 00:00:08.05 +S'il crie le laisser aller + +4 +00:00:08.05 --> 00:00:10.96 +Eeny, meeny, miny moe. + +5 +00:00:12.70 --> 00:00:14.64 +Je suis Jutta Treviranus + +6 +00:00:14.64 --> 00:00:16.04 +et je suis venu à me demander + +7 +00:00:16.04 --> 00:00:18.38 +si nous avons une chance de réorganiser notre avenir. + +8 +00:00:18.38 --> 00:00:23.23 +David Kelley affirme que l'avenir du design est centré humaine. + +9 +00:00:23.23 --> 00:00:25.51 +La plupart des experts s'accordent à dire. + +10 +00:00:25.51 --> 00:00:29.02 +Reste la question - ce qui de l'homme? + +11 +00:00:29.02 --> 00:00:32.46 +Une condition humaine est inévitable, la diversité. + +12 +00:00:32.46 --> 00:00:34.70 +Il n'y a aucun humain typique. + +13 +00:00:34.70 --> 00:00:37.33 +Même les clones et les jumeaux identiques ne sont pas les mêmes. + +14 +00:00:37.33 --> 00:00:40.42 +Nous différons de l'un à l'autre, + +15 +00:00:40.42 --> 00:00:43.54 +mais aussi d'un moment à l'autre, + +16 +00:00:43.54 --> 00:00:46.84 +d'un contexte à l'autre. + +17 +00:00:46.84 --> 00:00:49.91 +Mais la diversité et la différence devient écrasante + +18 +00:00:49.91 --> 00:00:52.94 +et nous développons des stratégies pour faire face à cette diversité. + +19 +00:00:52.94 --> 00:00:55.56 +Nous essayons de rendre les choses plus simples, + +20 +00:00:55.60 --> 00:00:57.50 +moins complexe, moins chaotique. + +21 +00:00:57.50 --> 00:01:00.00 +Une autre partie de la condition humaine est que nous + +22 +00:01:00.00 --> 00:01:03.18 +essayer de trouver communité et connexions. + +23 +00:01:03.20 --> 00:01:09.53 +Nous formons des groupes formels et informels avec des critères explicites et implicites. + +24 +00:01:09.55 --> 00:01:13.97 +Nous organisons, nous créons des catégories, on filtre, on étiquette. + +25 +00:01:13.97 --> 00:01:19.67 +A notre plus précaires et accablé nous divisons en deux, nous créons des binaires: + +26 +00:01:19.67 --> 00:01:21.86 +masculin, féminin + +27 +00:01:21.86 --> 00:01:24.21 +handicapés, normale + +28 +00:01:24.21 --> 00:01:27.03 +gauche, droite + +29 +00:01:27.03 --> 00:01:29.57 +nous, eux. + +30 +00:01:29.57 --> 00:01:34.98 +Cela se traduit tout dans les questions de qui appartient et qui est exclu. + +31 +00:01:34.98 --> 00:01:37.53 +L'adhésion à des groupes peuvent être auto assignés, + +32 +00:01:37.53 --> 00:01:40.46 +peut être imposée, peut-être même policée. + +33 +00:01:40.46 --> 00:01:44.17 +Les groupes sont utilisés pour affirmer ou d'attribuer des privilèges et des pouvoirs. + +34 +00:01:44.17 --> 00:01:46.92 +Nous utilisons des groupes de juger + +35 +00:01:46.92 --> 00:01:49.22 +valeurs sont attribuées à des groupes + +36 +00:01:49.22 --> 00:01:51.88 +souvent des caractéristiques qui n'ont rien à voir avec + +37 +00:01:51.88 --> 00:01:53.35 +les propriétés originales des groupes fondateurs du + +38 +00:01:53.35 --> 00:01:55.98 +sont généralisés à tous les individus dans le groupe. + +39 +00:01:55.98 --> 00:02:00.07 +Parfois, les gens qui sont dans un groupe imposé + +40 +00:02:00.07 --> 00:02:02.00 +prendre la propriété du groupe et de la réforme + +41 +00:02:02.00 --> 00:02:04.42 +les classifications et les valeurs de l'intérieur. + +42 +00:02:04.42 --> 00:02:08.44 +Parfois, quelqu'un a l'audace + +43 +00:02:08.44 --> 00:02:11.00 +pour sortir de la catégorie, nous avons la mettre dans + +44 +00:02:11.00 --> 00:02:16.90 +mais pour préserver notre catégorie, nous pouvons la renvoyer comme une anomalie. + +45 +00:02:16.90 --> 00:02:20.03 +Certains groupes sont plus fluides tandis que d'autres sont plus fixes. + +46 +00:02:20.03 --> 00:02:23.56 +Nous les groupes se forment pas seulement, mais des groupes de groupes + +47 +00:02:23.56 --> 00:02:25.66 +et des groupes, des groupes, des groupes. + +48 +00:02:25.66 --> 00:02:29.09 +L'adhésion à un groupe peut nous accorder l'adhésion à d'autres groupes. + +49 +00:02:29.09 --> 00:02:33.03 +Mais malgré tout cela, nous sommes diversifiés + +50 +00:02:33.03 --> 00:02:34.85 +nous sommes complexe + +51 +00:02:34.85 --> 00:02:36.45 +nous sommes chaotique. + +52 +00:02:36.45 --> 00:02:38.81 +Individuellement, nous sommes différents + +53 +00:02:38.81 --> 00:02:40.44 +au fil du temps, dans des contextes différents + +54 +00:02:40.44 --> 00:02:42.35 +dans des rôles différents, dans des groupes différents. + +55 +00:02:42.35 --> 00:02:45.42 +Nous devons affirmer notre spécificité + +56 +00:02:45.42 --> 00:02:47.86 +nous avons besoin de former et de perfectionner notre identité. + +57 +00:02:47.86 --> 00:02:50.91 +Nous luttons avec l'identité qui nous est imposé. + +58 +00:02:50.91 --> 00:02:56.36 +Généralement, les gens ne s'intègrent pas facilement dans les catégories assignées + +59 +00:02:56.36 --> 00:02:58.98 +et pourtant nous persistons à les affecter. + +60 +00:02:58.98 --> 00:03:02.63 +Et puis, quelque chose de nouveau arrive + +61 +00:03:02.63 --> 00:03:05.41 +et secoue nos groupes, nos catégories et nos règles + +62 +00:03:05.41 --> 00:03:08.26 +et nous avons besoin d'ajuster, de reconstruire et de repenser. + +63 +00:03:08.26 --> 00:03:12.53 +Quelque chose comme, réseaux et des trucs numérique. + +64 +00:03:12.53 --> 00:03:15.47 +Ce nouveau monde numérique et connecté + +65 +00:03:15.47 --> 00:03:17.87 +remet en question la façon dont nous les choses de groupe + +66 +00:03:17.87 --> 00:03:20.75 +et les défis nos excuses pour laisser les gens sortir. + +67 +00:03:20.75 --> 00:03:25.46 +Le numérique change notre vision du temps, d'espace et de distance + +68 +00:03:25.46 --> 00:03:31.08 +et par extension notre point de vue du design, ce qui est possible et quelles choses coût. + +69 +00:03:31.08 --> 00:03:36.04 +Things Digital sont en plastique, mutable, malléable et adaptable. + +70 +00:03:36.04 --> 00:03:39.50 +Avant, tout le monde ne pouvait en forme + +71 +00:03:39.50 --> 00:03:42.16 +permettre à quelqu'un de quelqu'un d'autre était destiné à l'écart. + +72 +00:03:42.16 --> 00:03:46.06 +. Dans le numérique, la chambre est très extensible + +73 +00:03:46.06 --> 00:03:49.76 +Avant, ce que nous avons créé ne pouvait pas convenir à tous + +74 +00:03:49.76 --> 00:03:51.77 +. Alors nous avons fait l'adapter le plus grand groupe + +75 +00:03:51.77 --> 00:03:54.53 +Nous l'avons fait pour le groupe appelé moyen ou typique + +76 +00:03:54.53 --> 00:03:58.26 +cette gauche à tous de ne pas en moyenne ou typique. + +77 +00:03:58.26 --> 00:04:03.39 +Dans la réalité numérique des choses que nous faisons peut reconfigurer, adapter + +78 +00:04:03.39 --> 00:04:06.27 +et prendre une forme qui est le mieux pour chaque individu. + +79 +00:04:06.27 --> 00:04:11.90 +Dans le monde solide, chaque copie coûte presque le même que l'original. + +80 +00:04:11.90 --> 00:04:14.35 +la consommation a réellement consommé. + +81 +00:04:14.35 --> 00:04:18.56 +Dans le monde numérique, nous pouvons copier presque sans coût. + +82 +00:04:18.56 --> 00:04:21.00 +La consommation ne consomme plus. + +83 +00:04:21.00 --> 00:04:24.52 +Avant, il a fallu beaucoup de temps et d'effort + +84 +00:04:24.52 --> 00:04:27.23 +pour livrer des choses, surtout pour les gens très loin. + +85 +00:04:27.23 --> 00:04:30.93 +Maintenant, il est aussi facile de livrer des choses dans le monde + +86 +00:04:30.93 --> 00:04:33.13 +. Comme il est de livrer des choses à côté + +87 +00:04:33.13 --> 00:04:36.85 +Avant, si on ne place pas les choses dans un endroit fixe + +88 +00:04:36.85 --> 00:04:39.53 +nous aurions du mal à les retrouver. + +89 +00:04:39.53 --> 00:04:43.63 +Maintenant, nous pouvons les placer n'importe où sur le réseau et + +90 +00:04:43.63 --> 00:04:46.26 +les récupérer n'importe où sur le réseau. + +91 +00:04:46.26 --> 00:04:50.13 +Avant, nous avions besoin d'étiqueter les choses clairement et simplement + +92 +00:04:50.13 --> 00:04:52.80 +. Afin que nous puissions les reconnaître et de savoir quoi faire avec eux + +93 +00:04:52.80 --> 00:04:56.44 +Maintenant nous pouvons voir une description de chaque personne ou une chose + +94 +00:04:56.44 --> 00:04:59.02 +ce qui est utile et pertinente à notre but. + +95 +00:04:59.02 --> 00:05:03.01 +Et en passant, nous avons appris que + +96 +00:05:03.01 --> 00:05:06.36 +l'inclusion et l'égalité sont bons pour nous tous. + +97 +00:05:06.36 --> 00:05:09.35 +Nous sommes tous sains, plus riches et plus sage + +98 +00:05:09.35 --> 00:05:12.19 +quand notre société est inclusive et égalitaire. + +99 +00:05:12.19 --> 00:05:15.36 +Nous avons également découvert que les divers groupes + +100 +00:05:15.36 --> 00:05:18.93 +sont plus innovantes et créatives, et mieux à la planification et la prévision. + +101 +00:05:18.93 --> 00:05:23.73 +Nous avons expérimenté avec la nouvelle organisation comme + +102 +00:05:23.73 --> 00:05:26.33 +le plus populaire, pour être ignoré + +103 +00:05:26.33 --> 00:05:28.69 +ami, pas un ami. + +104 +00:05:28.69 --> 00:05:31.03 +Mais nous pouvons faire mieux. + +105 +00:05:31.03 --> 00:05:33.26 +Nous pouvons nous permettre d'être généreux dans notre conception + +106 +00:05:33.26 --> 00:05:35.39 +nous avons moins d'excuses à exclure. + +107 +00:05:35.39 --> 00:05:37.56 +Nous pouvons être fidèles à notre diversité. + +108 +00:05:37.56 --> 00:05:43.06 +Peut-être maintenant, nous pouvons trouver un moyen de faire de la place pour nous tous. + diff --git a/demos/VideoPlayer.html b/demos/VideoPlayer.html index d7e93b0..062644c 100644 --- a/demos/VideoPlayer.html +++ b/demos/VideoPlayer.html @@ -19,18 +19,19 @@ + - + + + + - - + @@ -40,12 +41,6 @@ @@ -60,7 +55,7 @@

Infusion HTML 5 Video Player

-
+
@@ -70,72 +65,61 @@

Infusion HTML 5 Video Player

}); var vp = fluid.videoPlayer(".videoPlayer", { - model: { - video: { - sources: [ - { - src: "videos/ReorganizeFuture/ReorganizeFuture.mp4", - type: "video/mp4" - }, - { - src: "videos/ReorganizeFuture/ReorganizeFuture.webm", - type: "video/webm" - }, - { - src: "http://www.youtube.com/v/_VxQEPw1x9E&hl=en&fs=1", - type: "youtube" - } - ] - }, - captions: { - sources: { - english: { - src: "videos/ReorganizeFuture/ReorganizeFuture.en.json", - type: "JSONcc" - }, - francaise: { - src: "videos/ReorganizeFuture/ReorganizeFuture.fr.json", - type: "JSONcc" - } + video: { + sources: [ + { + src: "videos/ReorganizeFuture/ReorganizeFuture.mp4", + type: "video/mp4" }, - currentTrack: "english", - - // The following options (choices, names, selection) shouldn't be necessary. - // This is a temporary workaround for FLUID-4585 - choices: [], - names: [], - // TODO: setting 'selection' to something other than 'none' is a workaround - // for FLUID-4592: a default caption *must* be loaded for the - // intervalEventsConductor to be created - selection: "english", - - show: false, - conversionServiceUrl: "/videoPlayer/conversion_service/index.php", - maxNumber: 3, - track: undefined - }, - transcripts: { - sources: { - english: { - src: "videos/ReorganizeFuture/ReorganizeFuture.transcripts.en.json", - type: "JSONcc" - }, - francaise: { - src: "videos/ReorganizeFuture/ReorganizeFuture.transcripts.fr.json", - type: "JSONcc" - } + { + src: "videos/ReorganizeFuture/ReorganizeFuture.webm", + type: "video/webm" }, - - // The following options (choices, names, selection) shouldn't be necessary. - // This is a temporary workaround for FLUID-4585 - choices: [], - names: [], - selection: "english", - - show: false, - track: undefined - } - + { + src: "http://www.youtube.com/v/_VxQEPw1x9E&hl=en&fs=1", + type: "youtube" + } + ], + captions: [ + { + src: "videos/ReorganizeFuture/ReorganizeFuture.en.vtt", + type: "text/vtt", + srclang: "en", + label: "English Subtitles", + kind: "subtitles" + }, + { + src: "videos/ReorganizeFuture/ReorganizeFuture.fr.vtt", + type: "text/vtt", + srclang: "fr", + label: "French Subtitles", + kind: "subtitles" + } + ], + transcripts: [ + { + src: "videos/ReorganizeFuture/ReorganizeFuture.transcripts.en.json", + type: "JSONcc", + srclang: "en", + label: "English Transcripts", + kind: "transcripts" + }, + { + src: "videos/ReorganizeFuture/ReorganizeFuture.transcripts.fr.json", + type: "JSONcc", + srclang: "fr", + label: "French Transcripts", + kind: "transcripts" + } + ] + }, + model: { + currentTracks: { + captions: [0], + transcripts: [0] + }, + displayCaptions: true, + displayTranscripts: true } }); diff --git a/demos/mammals.js b/demos/mammals.js deleted file mode 100644 index 75e71bb..0000000 --- a/demos/mammals.js +++ /dev/null @@ -1,133 +0,0 @@ -/* - -Copyright 2012 OCAD University - -Licensed under the Educational Community License (ECL), Version 2.0 or the New -BSD license. You may not use this file except in compliance with one these -Licenses. - -You may obtain a copy of the ECL 2.0 License and BSD License at -https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt -*/ - -/*global jQuery, fluid*/ - -// JSLint options -/*jslint white: true, funcinvoke: true, undef: true, newcap: true, nomen: true, regexp: true, bitwise: true, browser: true, forin: true, maxerr: 100, indent: 4 */ - - -(function ($) { - $(document).ready(function () { - fluid.pageEnhancer({ - tocTemplate: "../lib/infusion/components/tableOfContents/html/TableOfContents.html" - }); - - fluid.uiOptions.fatPanel(".flc-uiOptions", { - prefix: "../lib/infusion/components/uiOptions/html/" - }); - - fluid.videoPlayer(".mammals-video", { - model: { - video: { - sources: [ - { - src: "videos/Mammals/Mammals.mp4", - type: "video/mp4" - }, - { - src: "videos/Mammals/Mammals.webm", - type: "video/webm" - }, - { - src: "http://www.youtube.com/v/0jw74pfWfxA", - type: "youtube" - } - ] - }, - captions: { - sources: { - english: { - src: "videos/Mammals/Mammals.en.json", - type: "JSONcc" - }, - francaise: { - src: "videos/Mammals/Mammals.fr.json", - type: "JSONcc" - } - }, - selection: "english" - } - } - }); - - // TODO: There is repetition here when creating the video players - can it be refactored? - fluid.videoPlayer(".polar-mammals-video", { - model: { - video: { - sources: [ - { - src: "videos/PolarMammals/PolarMammals.mp4", - type: "video/mp4" - }, - { - src: "videos/PolarMammals/PolarMammals.webm", - type: "video/webm" - }, - { - src: "http://www.youtube.com/v/h_oHNP50FGM", - type: "youtube" - } - ] - }, - captions: { - sources: { - english: { - src: "videos/PolarMammals/PolarMammals.en.json", - type: "JSONcc" - }, - francaise: { - src: "videos/PolarMammals/PolarMammals.fr.json", - type: "JSONcc" - } - }, - selection: "english" - } - } - }); - - fluid.videoPlayer(".polar-adapt-video", { - model: { - video: { - sources: [ - { - src: "videos/PolarMammalAdaptations/PolarMammalAdaptations.mp4", - type: "video/mp4" - }, - { - src: "videos/PolarMammalAdaptations/PolarMammalAdaptations.webm", - type: "video/webm" - }, - { - src: "http://www.youtube.com/v/3_3p2ylZDAE", - type: "youtube" - } - ] - }, - captions: { - sources: { - english: { - src: "videos/PolarMammalAdaptations/PolarMammalAdaptations.en.json", - type: "JSONcc" - }, - francaise: { - src: "videos/PolarMammalAdaptations/PolarMammalAdaptations.fr.json", - type: "JSONcc" - } - }, - selection: "english" - } - } - }); - }); - -})(jQuery); diff --git a/demos/videos/Mammals/Mammals.en.vtt b/demos/videos/Mammals/Mammals.en.vtt new file mode 100644 index 0000000..c6b44ab --- /dev/null +++ b/demos/videos/Mammals/Mammals.en.vtt @@ -0,0 +1,94 @@ +WEBVTT + +1 +00:00:00.033 --> 00:00:02.064 +http://beyondpenguins.nsdl.org + +2 +00:00:02.064 --> 00:00:08.059 +An interview with Dr. Ross MacPhee, curator and researcher at the American Museum of Natural History. + +3 +00:00:08.059 --> 00:00:13.000 +What is a mammal? + +4 +00:00:13.000 --> 00:00:16.050 +Mammals are a group of vertebrates + +5 +00:00:16.050 --> 00:00:19.000 +that is to say, animals with backbones + +6 +00:00:19.000 --> 00:00:22.000 +who are distinguished by having four legs or + +7 +00:00:22.000 --> 00:00:24.093 +four appendages that are like legs + +8 +00:00:24.093 --> 00:00:30.016 +and whose young develop within a complicated series of birth membranes. + +9 +00:00:30.016 --> 00:00:36.033 +By contrast, fish are vertebrates, but true fish lack four appendages + +10 +00:00:36.033 --> 00:00:38.091 +so they're not tetrapod as we call them + +11 +00:00:38.091 --> 00:00:43.033 +and animals like amphibians who do have four legs + +12 +00:00:43.033 --> 00:00:48.020 +and do have backbones are not members of that particular group + +13 +00:00:48.020 --> 00:00:51.064 +because they don't have these complicated birth membranes. + +14 +00:00:51.064 --> 00:00:58.014 +We are distinguished from all other vertebrates by that general set of features. + +15 +00:00:58.014 --> 00:01:01.083 +More specifically, we're the only group that has the combination + +16 +00:01:01.083 --> 00:01:08.000 +of hair, mother's milk and three tiny bones in the middle ear + +17 +00:01:08.000 --> 00:01:11.083 +that are responsible for conducting sound from the outside + +18 +00:01:11.083 --> 00:01:15.050 +to where it can be interpreted in the brain. + +19 +00:01:15.050 --> 00:01:17.078 +http://beyondpenguins.nsdl.org + +20 +00:01:17.078 --> 00:01:21.031 +For more: Go online to the January 2009 issue on mammals. + +21 +00:01:21.031 --> 00:01:25.017 +Special thanks to Dr. MacPhee and the American Museum of Natural History for the making of this recording. + +22 +00:01:25.017 --> 00:01:28.032 +Recorded at AMNH, October 2008. Karen Taber, Producer + +23 +00:01:28.032 --> 00:01:33.015 +Podcast produced by Robert Payo National Science Digital Library + diff --git a/demos/videos/Mammals/mammals.mp4 b/demos/videos/Mammals/Mammals.mp4 similarity index 100% rename from demos/videos/Mammals/mammals.mp4 rename to demos/videos/Mammals/Mammals.mp4 diff --git a/demos/videos/Mammals/mammals.webm b/demos/videos/Mammals/Mammals.webm similarity index 100% rename from demos/videos/Mammals/mammals.webm rename to demos/videos/Mammals/Mammals.webm diff --git a/demos/videos/PolarMammalAdaptations/PolarMammalAdaptations.en.vtt b/demos/videos/PolarMammalAdaptations/PolarMammalAdaptations.en.vtt new file mode 100644 index 0000000..f11377c --- /dev/null +++ b/demos/videos/PolarMammalAdaptations/PolarMammalAdaptations.en.vtt @@ -0,0 +1,202 @@ +WEBVTT + +1 +00:00:00.023 --> 00:00:03.090 +http://beyondpenguins.nsdl.org + +2 +00:00:03.090 --> 00:00:06.031 +An interview with Dr. Ross MacPhee + +3 +00:00:06.031 --> 00:00:08.082 +Curator and Researcher at the American Museum of Natural History + +4 +00:00:08.082 --> 00:00:13.074 +What adaptations do mammals have in order to survive at the poles? + +5 +00:00:13.074 --> 00:00:16.057 +We can think of a couple of important ways in which adaptation has + +6 +00:00:16.057 --> 00:00:21.054 +made it possible for mammals to live in polar regions. + +7 +00:00:21.054 --> 00:00:24.038 +Insulation is the big deal, this is either + +8 +00:00:24.038 --> 00:00:29.081 +in the form of what kind of fur or hair that the body has + +9 +00:00:29.081 --> 00:00:31.095 +or the amount of blubber + +10 +00:00:31.095 --> 00:00:35.061 +or a very efficient circulatory system, high basic metabolism + +11 +00:00:35.061 --> 00:00:41.019 +All of these features have in common the idea that you need to keep the animal warm + +12 +00:00:41.019 --> 00:00:44.086 +Marine mammals in fact have less of a problem than you might think in a way + +13 +00:00:44.086 --> 00:00:48.029 +because as long as the water is water, it's not frozen + +14 +00:00:48.029 --> 00:00:51.017 +and it's going to be at 32 degrees or above. + +15 +00:00:51.017 --> 00:00:57.021 +So they're able in fact to do quite well with a few inches of blubber + +16 +00:00:57.021 --> 00:01:02.097 +and the kind of fur patterning that is very high in its R value + +17 +00:01:02.097 --> 00:01:04.055 +so you don't lose a lot of heat. + +18 +00:01:04.055 --> 00:01:06.073 +On land it's a different deal + +19 +00:01:06.073 --> 00:01:09.065 +there you have extreme temperature excursions + +20 +00:01:09.065 --> 00:01:13.004 +and that is worry if you are any kind of mammal + +21 +00:01:13.004 --> 00:01:15.069 +because you're not going to be adapted to the very coldest conditions + +22 +00:01:15.069 --> 00:01:18.071 +and there's other kinds of adaptations that will have to come in here. + +23 +00:01:18.071 --> 00:01:20.094 +One of the significant ones is hibernation. + +24 +00:01:20.094 --> 00:01:27.021 +There's a number of arctic mammals both on the carnivore side and the non-carnivore side + +25 +00:01:27.021 --> 00:01:31.081 +that go in for various kinds of either true deep sleep + +26 +00:01:31.081 --> 00:01:35.001 +which is characteristic of hibernation or at least estivation + +27 +00:01:35.001 --> 00:01:37.098 +which is where they really reduce their activities. + +28 +00:01:37.098 --> 00:01:40.063 +Let's take a look at bears, for example. + +29 +00:01:40.063 --> 00:01:45.065 +Female polar bears give birth while they're hibernating. + +30 +00:01:45.065 --> 00:01:48.094 +They dig out a hole in the snow, a cave in the snow and + +31 +00:01:48.094 --> 00:01:51.042 +that's where they stay over the winter months and + +32 +00:01:51.042 --> 00:01:55.002 +pups are actually born while the mother is still hibernating. + +33 +00:01:55.002 --> 00:02:00.037 +And this permits her to provide a very warm kind of enclosure + +34 +00:02:00.037 --> 00:02:03.094 +for the cubs as they are developing outside of her body + +35 +00:02:03.094 --> 00:02:10.086 +and also reduces the need for her to part from them during the very early stages of their development. + +36 +00:02:10.086 --> 00:02:16.033 +For small mammals, think of rodents, it's different again. + +37 +00:02:16.033 --> 00:02:19.087 +Most of them actually stay active the entire winter + +38 +00:02:19.087 --> 00:02:23.031 +and if you go up into arctic regions just after the snow melts + +39 +00:02:23.031 --> 00:02:26.014 +you can see their little runways everywhere where they've been + +40 +00:02:26.014 --> 00:02:29.099 +actually going under the snow and feeding on grass shoots or roots + +41 +00:02:29.099 --> 00:02:32.036 +anything that's preserved there for them. + +42 +00:02:32.036 --> 00:02:34.069 +http://beyondpenguins.nsdl.org + +43 +00:02:34.069 --> 00:02:38.054 +For more: Go online to the January 2009 issue on mammals. + +44 +00:02:38.054 --> 00:02:43.040 +Special thanks to Dr. MacPhee and the American Museum of Natural History for the making of this recording. + +45 +00:02:43.040 --> 00:02:44.095 +Recorded at AMNH, October 2008 + +46 +00:02:44.095 --> 00:02:46.007 +Karen Taber, Producer + +47 +00:02:46.007 --> 00:02:48.016 +Podcast produced by Robert Payo + +48 +00:02:48.016 --> 00:02:50.038 +National Science Digital Library + +49 +00:02:50.038 --> 00:02:51.045 +November 2008 + +50 +00:02:51.045 --> 09:59:59.000 +Funding for this podcast was made possible by the National Science Foundation. + diff --git a/demos/videos/PolarMammals/PolarMammals.en.vtt b/demos/videos/PolarMammals/PolarMammals.en.vtt new file mode 100644 index 0000000..feeab94 --- /dev/null +++ b/demos/videos/PolarMammals/PolarMammals.en.vtt @@ -0,0 +1,134 @@ +WEBVTT + +1 +00:00:00.041 --> 00:00:03.057 +http://beyondpenguins.nsdl.org + +2 +00:00:03.057 --> 00:00:08.083 +An interview with Dr. Ross MacPhee Curator and Researcher at the American Museum of Natural History + +3 +00:00:08.083 --> 00:00:13.065 +What mammals live in the polar regions? + +4 +00:00:13.065 --> 00:00:16.062 +So in terms of characteristically arctic mammals + +5 +00:00:16.062 --> 00:00:19.041 +what we can think of are first of all the large herbivores + +6 +00:00:19.041 --> 00:00:22.038 +these would be musk oxen, and caribou + +7 +00:00:22.038 --> 00:00:27.000 +some of the major carnivores including polar bear, arctic fox + +8 +00:00:27.000 --> 00:00:30.041 +and then a host of other mostly smaller mammals + +9 +00:00:30.041 --> 00:00:35.083 +arctic hare comes to mind and several of the other rodent groups + +10 +00:00:35.083 --> 00:00:39.010 +are also represented in the arctic. + +11 +00:00:39.010 --> 00:00:41.074 +We also should think about sea mammals in this regard + +12 +00:00:41.074 --> 00:00:45.037 +because the sea mammals in fact in arctic waters are very diverse + +13 +00:00:45.037 --> 00:00:50.057 +in additions to large whales, both toothed whales and baleen or filter feeding whales + +14 +00:00:50.057 --> 00:00:56.051 +we've got a whole host of smaller whales including porpoises and orcas + +15 +00:00:56.051 --> 00:00:59.050 +and then there's the seal group. + +16 +00:00:59.050 --> 00:01:03.090 +We've got the seals that live all the time in arctic waters + +17 +00:01:03.090 --> 00:01:07.038 +like walruses and sea lions and + +18 +00:01:07.038 --> 00:01:12.077 +also other kinds of members of the same group like ribbon seals. + +19 +00:01:12.077 --> 00:01:16.090 +Now these different groups of course, all have different feeding regimes. + +20 +00:01:16.090 --> 00:01:21.017 +So in the case of seals we think of them as basically being carnivores. + +21 +00:01:21.017 --> 00:01:27.083 +But for walruses for example they actually feed almost exclusively on shellfish + +22 +00:01:27.083 --> 00:01:32.018 +which they take into their mouths and actually suck out the contents. + +23 +00:01:32.018 --> 00:01:34.092 +So that's a very specialized kind of adaptive regime + +24 +00:01:34.092 --> 00:01:41.016 +that wouldn't be characteristic for example of a ribbon seal which simply goes after fish. + +25 +00:01:41.016 --> 00:01:43.050 +http://beyondpenguins.nsdl.org + +26 +00:01:43.050 --> 00:01:46.050 +For more: Go online to the January 2009 issue on mammals. + +27 +00:01:46.050 --> 00:01:49.043 +Special thanks to Dr. MacPhee and the American Museum of Natural History for the making of this recording. + +28 +00:01:49.043 --> 00:01:51.072 +Recorded at AMNH, October 2008 + +29 +00:01:51.072 --> 00:01:52.085 +Karen Taber, Producer + +30 +00:01:52.085 --> 00:01:55.074 +Podcast produced by Robery Payo + +31 +00:01:55.074 --> 00:01:58.014 +National Science Digital Library November 2008 + +32 +00:01:58.014 --> 00:02:00.066 +Funding for this podcast was made possible by the + +33 +00:02:00.66 --> 09:59:59.00 +National Service Foundation + diff --git a/demos/videos/ReorganizeFuture/ReorganizeFuture.en.vtt b/demos/videos/ReorganizeFuture/ReorganizeFuture.en.vtt new file mode 100644 index 0000000..46db043 --- /dev/null +++ b/demos/videos/ReorganizeFuture/ReorganizeFuture.en.vtt @@ -0,0 +1,434 @@ +WEBVTT + +1 +00:00:01.77 --> 00:00:04.03 +Eeny, meeny, miny, moe, + +2 +00:00:04.03 --> 00:00:05.99 +Catch a tiger by the toe + +3 +00:00:05.99 --> 00:00:08.05 +If he hollers let him go + +4 +00:00:08.05 --> 00:00:10.96 +Eeny, meeny, miny moe. + +5 +00:00:12.70 --> 00:00:14.64 +I'm Jutta Treviranus + +6 +00:00:14.64 --> 00:00:16.04 +and I've come to wonder + +7 +00:00:16.04 --> 00:00:18.38 +whether we have a chance to reorganize our future. + +8 +00:00:18.38 --> 00:00:23.23 +David Kelley says that the future of design is human centred. + +9 +00:00:23.23 --> 00:00:25.51 +Most experts agree. + +10 +00:00:25.51 --> 00:00:29.02 +That leaves the question - which human? + +11 +00:00:29.02 --> 00:00:32.46 +An inevitable human condition is diversity. + +12 +00:00:32.46 --> 00:00:34.70 +There's no typical human, + +13 +00:00:34.70 --> 00:00:37.33 +even clones and identical twins are not the same. + +14 +00:00:37.33 --> 00:00:40.42 +We differ from one to the next, + +15 +00:00:40.42 --> 00:00:43.54 +but also from one moment to the next, + +16 +00:00:43.54 --> 00:00:46.84 +from one context to the next. + +17 +00:00:46.84 --> 00:00:49.91 +But diversity and difference become overwhelming + +18 +00:00:49.91 --> 00:00:52.94 +and we develop strategies to deal with this diversity. + +19 +00:00:52.94 --> 00:00:55.56 +We try to make things simpler, + +20 +00:00:55.60 --> 00:00:57.50 +less complex, less chaotic + +21 +00:00:57.50 --> 00:01:00.00 +Another part of the human condition is that + +22 +00:01:00.00 --> 00:01:03.18 +we try to find commonality and connections. + +23 +00:01:03.20 --> 00:01:09.53 +We form groups informal and formal with implicit and explicit criteria. + +24 +00:01:09.55 --> 00:01:13.97 +We organize, we create categories, we filter, we label. + +25 +00:01:13.97 --> 00:01:19.67 +At our most insecure and overwhelmed we divide in two, we create binaries: + +26 +00:01:19.67 --> 00:01:21.86 +male, female + +27 +00:01:21.86 --> 00:01:24.21 +disabled, normal + +28 +00:01:24.21 --> 00:01:27.03 +left, right + +29 +00:01:27.03 --> 00:01:29.57 +us, them. + +30 +00:01:29.57 --> 00:01:34.98 +This all results in issues of who belongs and who is excluded. + +31 +00:01:34.98 --> 00:01:37.53 +Membership in groups can be self assigned, + +32 +00:01:37.53 --> 00:01:40.46 +may be imposed, may even be policed. + +33 +00:01:40.46 --> 00:01:44.17 +Groups are used to assert or assign privileges and powers. + +34 +00:01:44.17 --> 00:01:46.92 +We use groups to judge + +35 +00:01:46.92 --> 00:01:49.22 +values get assigned to groups + +36 +00:01:49.22 --> 00:01:51.88 +often characteristics that have nothing to do with + +37 +00:01:51.88 --> 00:01:53.35 +the original founding properties of groups + +38 +00:01:53.35 --> 00:01:55.98 +are generalized to all individuals in the group. + +39 +00:01:55.98 --> 00:02:00.07 +Sometimes, people who are in an imposed group + +40 +00:02:00.07 --> 00:02:02.00 +take ownership of the group and reform + +41 +00:02:02.00 --> 00:02:04.42 +the classifications and values from within. + +42 +00:02:04.42 --> 00:02:08.44 +Occasionally, someone has the audacity + +43 +00:02:08.44 --> 00:02:11.00 +to break out of the category we have put her in + +44 +00:02:11.00 --> 00:02:16.90 +but to preserve our category, we may dismiss her as an anomaly. + +45 +00:02:16.90 --> 00:02:20.03 +Some groups are more fluid while others are more fixed. + +46 +00:02:20.03 --> 00:02:23.56 +We not only form groups, but groups of groups + +47 +00:02:23.56 --> 00:02:25.66 +and groups, of groups, of groups. + +48 +00:02:25.66 --> 00:02:29.09 +Membership in one group can grant us membership in other groups. + +49 +00:02:29.09 --> 00:02:33.03 +But despite all this, we are diverse, + +50 +00:02:33.03 --> 00:02:34.85 +we are complex, + +51 +00:02:34.85 --> 00:02:36.45 +we are chaotic. + +52 +00:02:36.45 --> 00:02:38.81 +Individually we're different + +53 +00:02:38.81 --> 00:02:40.44 +over time, in different contexts, + +54 +00:02:40.44 --> 00:02:42.35 +in different roles, in different groups. + +55 +00:02:42.35 --> 00:02:45.42 +We need to assert our uniqueness, + +56 +00:02:45.42 --> 00:02:47.86 +we need to form and refine our identity. + +57 +00:02:47.86 --> 00:02:50.91 +We struggle with the identity imposed on us. + +58 +00:02:50.91 --> 00:02:56.36 +Generally, people do not fit easily into assigned categories + +59 +00:02:56.36 --> 00:02:58.98 +and yet we persist in assigning them. + +60 +00:02:58.98 --> 00:03:02.63 +And then, something new comes along + +61 +00:03:02.63 --> 00:03:05.41 +and shakes up our groups, our categories and our rules, + +62 +00:03:05.41 --> 00:03:08.26 +and we need to adjust, rebuild and rethink. + +63 +00:03:08.26 --> 00:03:12.53 +Something like, networks and digital stuff. + +64 +00:03:12.53 --> 00:03:15.47 +This new digital and connected world + +65 +00:03:15.47 --> 00:03:17.87 +puts into question how we group things + +66 +00:03:17.87 --> 00:03:20.75 +and challenges our excuses for leaving people out. + +67 +00:03:20.75 --> 00:03:25.46 +The digital changes our view of time, space and distance + +68 +00:03:25.46 --> 00:03:31.08 +and by extension our view of design, what is possible and what things cost. + +69 +00:03:31.08 --> 00:03:36.04 +Digital things are plastic, mutable, malleable and adaptable. + +70 +00:03:36.04 --> 00:03:39.50 +Before, not everyone could fit, + +71 +00:03:39.50 --> 00:03:42.16 +allowing someone in meant someone else was left out. + +72 +00:03:42.16 --> 00:03:46.06 +In the digital, room is very stretchy. + +73 +00:03:46.06 --> 00:03:49.76 +Before, what we created could not fit everyone + +74 +00:03:49.76 --> 00:03:51.77 +so we made it fit the largest group. + +75 +00:03:51.77 --> 00:03:54.53 +We made it for the group called average or typical + +76 +00:03:54.53 --> 00:03:58.26 +this left out everyone not average or typical. + +77 +00:03:58.26 --> 00:04:03.39 +In the digital reality the things we make can reconfigure, adapt + +78 +00:04:03.39 --> 00:04:06.27 +and take a form that is best for each individual. + +79 +00:04:06.27 --> 00:04:11.90 +In the solid world, each copy cost almost the same as the original. + +80 +00:04:11.90 --> 00:04:14.35 +Consumption actually consumed. + +81 +00:04:14.35 --> 00:04:18.56 +In the digital world, we can copy almost without cost. + +82 +00:04:18.56 --> 00:04:21.00 +Consumption no longer consumes. + +83 +00:04:21.00 --> 00:04:24.52 +Before, it took a great deal of time and effort + +84 +00:04:24.52 --> 00:04:27.23 +to deliver things, especially to people far away. + +85 +00:04:27.23 --> 00:04:30.93 +Now it is as easy to deliver things around the world + +86 +00:04:30.93 --> 00:04:33.13 +as it is to deliver things next door. + +87 +00:04:33.13 --> 00:04:36.85 +Before, if we didn't place things in a fixed spot + +88 +00:04:36.85 --> 00:04:39.53 +we would have a hard time finding them again. + +89 +00:04:39.53 --> 00:04:43.63 +Now we can place them anywhere on the network and + +90 +00:04:43.63 --> 00:04:46.26 +retrieve them anywhere on the network. + +91 +00:04:46.26 --> 00:04:50.13 +Before, we needed to label things unambiguously and simply + +92 +00:04:50.13 --> 00:04:52.80 +so we could recognize them and know what to do with them. + +93 +00:04:52.80 --> 00:04:56.44 +Now we can see a description of each person or thing + +94 +00:04:56.44 --> 00:04:59.02 +that is useful and relevant to our purpose. + +95 +00:04:59.02 --> 00:05:03.01 +And by the way, we have learned that + +96 +00:05:03.01 --> 00:05:06.36 +inclusion and equality are good for all of us. + +97 +00:05:06.36 --> 00:05:09.35 +We are all healthier, wealthier and wiser + +98 +00:05:09.35 --> 00:05:12.19 +when our society is inclusive and equal. + +99 +00:05:12.19 --> 00:05:15.36 +We've also discovered that diverse groups + +100 +00:05:15.36 --> 00:05:18.93 +are more innovative and creative, and better at planning and predicting. + +101 +00:05:18.93 --> 00:05:23.73 +We've experimented with new organization like + +102 +00:05:23.73 --> 00:05:26.33 +most popular, to be ignored + +103 +00:05:26.33 --> 00:05:28.69 +friend, not friend. + +104 +00:05:28.69 --> 00:05:31.03 +But we can do better. + +105 +00:05:31.03 --> 00:05:33.26 +We can afford to be generous in our design, + +106 +00:05:33.26 --> 00:05:35.39 +we have fewer excuses to exclude. + +107 +00:05:35.39 --> 00:05:37.56 +We can be true to our diversity. + +108 +00:05:37.56 --> 00:05:43.06 +Perhaps now, we can find a way to make room for us all. + diff --git a/demos/videos/ReorganizeFuture/ReorganizeFuture.fr.vtt b/demos/videos/ReorganizeFuture/ReorganizeFuture.fr.vtt new file mode 100644 index 0000000..e647de8 --- /dev/null +++ b/demos/videos/ReorganizeFuture/ReorganizeFuture.fr.vtt @@ -0,0 +1,434 @@ +WEBVTT + +1 +00:00:01.77 --> 00:00:04.03 +Eeny, meeny, miny, moe, + +2 +00:00:04.03 --> 00:00:05.99 +Catch un tigre par le gros orteil + +3 +00:00:05.99 --> 00:00:08.05 +S'il crie le laisser aller + +4 +00:00:08.05 --> 00:00:10.96 +Eeny, meeny, miny moe. + +5 +00:00:12.70 --> 00:00:14.64 +Je suis Jutta Treviranus + +6 +00:00:14.64 --> 00:00:16.04 +et je suis venu à me demander + +7 +00:00:16.04 --> 00:00:18.38 +si nous avons une chance de réorganiser notre avenir. + +8 +00:00:18.38 --> 00:00:23.23 +David Kelley affirme que l'avenir du design est centré humaine. + +9 +00:00:23.23 --> 00:00:25.51 +La plupart des experts s'accordent à dire. + +10 +00:00:25.51 --> 00:00:29.02 +Reste la question - ce qui de l'homme? + +11 +00:00:29.02 --> 00:00:32.46 +Une condition humaine est inévitable, la diversité. + +12 +00:00:32.46 --> 00:00:34.70 +Il n'y a aucun humain typique. + +13 +00:00:34.70 --> 00:00:37.33 +Même les clones et les jumeaux identiques ne sont pas les mêmes. + +14 +00:00:37.33 --> 00:00:40.42 +Nous différons de l'un à l'autre, + +15 +00:00:40.42 --> 00:00:43.54 +mais aussi d'un moment à l'autre, + +16 +00:00:43.54 --> 00:00:46.84 +d'un contexte à l'autre. + +17 +00:00:46.84 --> 00:00:49.91 +Mais la diversité et la différence devient écrasante + +18 +00:00:49.91 --> 00:00:52.94 +et nous développons des stratégies pour faire face à cette diversité. + +19 +00:00:52.94 --> 00:00:55.56 +Nous essayons de rendre les choses plus simples, + +20 +00:00:55.60 --> 00:00:57.50 +moins complexe, moins chaotique. + +21 +00:00:57.50 --> 00:01:00.00 +Une autre partie de la condition humaine est que nous + +22 +00:01:00.00 --> 00:01:03.18 +essayer de trouver communité et connexions. + +23 +00:01:03.20 --> 00:01:09.53 +Nous formons des groupes formels et informels avec des critères explicites et implicites. + +24 +00:01:09.55 --> 00:01:13.97 +Nous organisons, nous créons des catégories, on filtre, on étiquette. + +25 +00:01:13.97 --> 00:01:19.67 +A notre plus précaires et accablé nous divisons en deux, nous créons des binaires: + +26 +00:01:19.67 --> 00:01:21.86 +masculin, féminin + +27 +00:01:21.86 --> 00:01:24.21 +handicapés, normale + +28 +00:01:24.21 --> 00:01:27.03 +gauche, droite + +29 +00:01:27.03 --> 00:01:29.57 +nous, eux. + +30 +00:01:29.57 --> 00:01:34.98 +Cela se traduit tout dans les questions de qui appartient et qui est exclu. + +31 +00:01:34.98 --> 00:01:37.53 +L'adhésion à des groupes peuvent être auto assignés, + +32 +00:01:37.53 --> 00:01:40.46 +peut être imposée, peut-être même policée. + +33 +00:01:40.46 --> 00:01:44.17 +Les groupes sont utilisés pour affirmer ou d'attribuer des privilèges et des pouvoirs. + +34 +00:01:44.17 --> 00:01:46.92 +Nous utilisons des groupes de juger + +35 +00:01:46.92 --> 00:01:49.22 +valeurs sont attribuées à des groupes + +36 +00:01:49.22 --> 00:01:51.88 +souvent des caractéristiques qui n'ont rien à voir avec + +37 +00:01:51.88 --> 00:01:53.35 +les propriétés originales des groupes fondateurs du + +38 +00:01:53.35 --> 00:01:55.98 +sont généralisés à tous les individus dans le groupe. + +39 +00:01:55.98 --> 00:02:00.07 +Parfois, les gens qui sont dans un groupe imposé + +40 +00:02:00.07 --> 00:02:02.00 +prendre la propriété du groupe et de la réforme + +41 +00:02:02.00 --> 00:02:04.42 +les classifications et les valeurs de l'intérieur. + +42 +00:02:04.42 --> 00:02:08.44 +Parfois, quelqu'un a l'audace + +43 +00:02:08.44 --> 00:02:11.00 +pour sortir de la catégorie, nous avons la mettre dans + +44 +00:02:11.00 --> 00:02:16.90 +mais pour préserver notre catégorie, nous pouvons la renvoyer comme une anomalie. + +45 +00:02:16.90 --> 00:02:20.03 +Certains groupes sont plus fluides tandis que d'autres sont plus fixes. + +46 +00:02:20.03 --> 00:02:23.56 +Nous les groupes se forment pas seulement, mais des groupes de groupes + +47 +00:02:23.56 --> 00:02:25.66 +et des groupes, des groupes, des groupes. + +48 +00:02:25.66 --> 00:02:29.09 +L'adhésion à un groupe peut nous accorder l'adhésion à d'autres groupes. + +49 +00:02:29.09 --> 00:02:33.03 +Mais malgré tout cela, nous sommes diversifiés + +50 +00:02:33.03 --> 00:02:34.85 +nous sommes complexe + +51 +00:02:34.85 --> 00:02:36.45 +nous sommes chaotique. + +52 +00:02:36.45 --> 00:02:38.81 +Individuellement, nous sommes différents + +53 +00:02:38.81 --> 00:02:40.44 +au fil du temps, dans des contextes différents + +54 +00:02:40.44 --> 00:02:42.35 +dans des rôles différents, dans des groupes différents. + +55 +00:02:42.35 --> 00:02:45.42 +Nous devons affirmer notre spécificité + +56 +00:02:45.42 --> 00:02:47.86 +nous avons besoin de former et de perfectionner notre identité. + +57 +00:02:47.86 --> 00:02:50.91 +Nous luttons avec l'identité qui nous est imposé. + +58 +00:02:50.91 --> 00:02:56.36 +Généralement, les gens ne s'intègrent pas facilement dans les catégories assignées + +59 +00:02:56.36 --> 00:02:58.98 +et pourtant nous persistons à les affecter. + +60 +00:02:58.98 --> 00:03:02.63 +Et puis, quelque chose de nouveau arrive + +61 +00:03:02.63 --> 00:03:05.41 +et secoue nos groupes, nos catégories et nos règles + +62 +00:03:05.41 --> 00:03:08.26 +et nous avons besoin d'ajuster, de reconstruire et de repenser. + +63 +00:03:08.26 --> 00:03:12.53 +Quelque chose comme, réseaux et des trucs numérique. + +64 +00:03:12.53 --> 00:03:15.47 +Ce nouveau monde numérique et connecté + +65 +00:03:15.47 --> 00:03:17.87 +remet en question la façon dont nous les choses de groupe + +66 +00:03:17.87 --> 00:03:20.75 +et les défis nos excuses pour laisser les gens sortir. + +67 +00:03:20.75 --> 00:03:25.46 +Le numérique change notre vision du temps, d'espace et de distance + +68 +00:03:25.46 --> 00:03:31.08 +et par extension notre point de vue du design, ce qui est possible et quelles choses coût. + +69 +00:03:31.08 --> 00:03:36.04 +Things Digital sont en plastique, mutable, malléable et adaptable. + +70 +00:03:36.04 --> 00:03:39.50 +Avant, tout le monde ne pouvait en forme + +71 +00:03:39.50 --> 00:03:42.16 +permettre à quelqu'un de quelqu'un d'autre était destiné à l'écart. + +72 +00:03:42.16 --> 00:03:46.06 +. Dans le numérique, la chambre est très extensible + +73 +00:03:46.06 --> 00:03:49.76 +Avant, ce que nous avons créé ne pouvait pas convenir à tous + +74 +00:03:49.76 --> 00:03:51.77 +. Alors nous avons fait l'adapter le plus grand groupe + +75 +00:03:51.77 --> 00:03:54.53 +Nous l'avons fait pour le groupe appelé moyen ou typique + +76 +00:03:54.53 --> 00:03:58.26 +cette gauche à tous de ne pas en moyenne ou typique. + +77 +00:03:58.26 --> 00:04:03.39 +Dans la réalité numérique des choses que nous faisons peut reconfigurer, adapter + +78 +00:04:03.39 --> 00:04:06.27 +et prendre une forme qui est le mieux pour chaque individu. + +79 +00:04:06.27 --> 00:04:11.90 +Dans le monde solide, chaque copie coûte presque le même que l'original. + +80 +00:04:11.90 --> 00:04:14.35 +la consommation a réellement consommé. + +81 +00:04:14.35 --> 00:04:18.56 +Dans le monde numérique, nous pouvons copier presque sans coût. + +82 +00:04:18.56 --> 00:04:21.00 +La consommation ne consomme plus. + +83 +00:04:21.00 --> 00:04:24.52 +Avant, il a fallu beaucoup de temps et d'effort + +84 +00:04:24.52 --> 00:04:27.23 +pour livrer des choses, surtout pour les gens très loin. + +85 +00:04:27.23 --> 00:04:30.93 +Maintenant, il est aussi facile de livrer des choses dans le monde + +86 +00:04:30.93 --> 00:04:33.13 +. Comme il est de livrer des choses à côté + +87 +00:04:33.13 --> 00:04:36.85 +Avant, si on ne place pas les choses dans un endroit fixe + +88 +00:04:36.85 --> 00:04:39.53 +nous aurions du mal à les retrouver. + +89 +00:04:39.53 --> 00:04:43.63 +Maintenant, nous pouvons les placer n'importe où sur le réseau et + +90 +00:04:43.63 --> 00:04:46.26 +les récupérer n'importe où sur le réseau. + +91 +00:04:46.26 --> 00:04:50.13 +Avant, nous avions besoin d'étiqueter les choses clairement et simplement + +92 +00:04:50.13 --> 00:04:52.80 +. Afin que nous puissions les reconnaître et de savoir quoi faire avec eux + +93 +00:04:52.80 --> 00:04:56.44 +Maintenant nous pouvons voir une description de chaque personne ou une chose + +94 +00:04:56.44 --> 00:04:59.02 +ce qui est utile et pertinente à notre but. + +95 +00:04:59.02 --> 00:05:03.01 +Et en passant, nous avons appris que + +96 +00:05:03.01 --> 00:05:06.36 +l'inclusion et l'égalité sont bons pour nous tous. + +97 +00:05:06.36 --> 00:05:09.35 +Nous sommes tous sains, plus riches et plus sage + +98 +00:05:09.35 --> 00:05:12.19 +quand notre société est inclusive et égalitaire. + +99 +00:05:12.19 --> 00:05:15.36 +Nous avons également découvert que les divers groupes + +100 +00:05:15.36 --> 00:05:18.93 +sont plus innovantes et créatives, et mieux à la planification et la prévision. + +101 +00:05:18.93 --> 00:05:23.73 +Nous avons expérimenté avec la nouvelle organisation comme + +102 +00:05:23.73 --> 00:05:26.33 +le plus populaire, pour être ignoré + +103 +00:05:26.33 --> 00:05:28.69 +ami, pas un ami. + +104 +00:05:28.69 --> 00:05:31.03 +Mais nous pouvons faire mieux. + +105 +00:05:31.03 --> 00:05:33.26 +Nous pouvons nous permettre d'être généreux dans notre conception + +106 +00:05:33.26 --> 00:05:35.39 +nous avons moins d'excuses à exclure. + +107 +00:05:35.39 --> 00:05:37.56 +Nous pouvons être fidèles à notre diversité. + +108 +00:05:37.56 --> 00:05:43.06 +Peut-être maintenant, nous pouvons trouver un moyen de faire de la place pour nous tous. + diff --git a/html/videoPlayer_template.html b/html/videoPlayer_template.html index d76729c..64b025a 100644 --- a/html/videoPlayer_template.html +++ b/html/videoPlayer_template.html @@ -20,22 +20,24 @@
- -
- -
diff --git a/js/VideoPlayer.js b/js/VideoPlayer.js index 1371ff8..269c1b6 100644 --- a/js/VideoPlayer.js +++ b/js/VideoPlayer.js @@ -111,7 +111,8 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt applier: "{videoPlayer}.applier", events: { onMediaReady: "{videoPlayer}.events.onMediaReady" - } + }, + sources: "{videoPlayer}.options.video.sources" } }, controllers: { @@ -121,6 +122,8 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt options: { model: "{videoPlayer}.model", applier: "{videoPlayer}.applier", + captions: "{videoPlayer}.options.video.captions", + transcripts: "{videoPlayer}.options.video.transcripts", events: { onControllersReady: "{videoPlayer}.events.onControllersReady", onVolumeChange: "{videoPlayer}.events.onVolumeChange", @@ -130,35 +133,24 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt } } }, - captionner: { - type: "fluid.videoPlayer.captionner", - container: "{videoPlayer}.dom.caption", - createOnEvent: "onCreateCaptionnerReady", - options: { - model: "{videoPlayer}.model", - applier: "{videoPlayer}.applier" - } - }, - captionLoader: { - type: "fluid.videoPlayer.captionLoader", - container: "{videoPlayer}.container", - createOnEvent: "onHtml5Detected", + html5Captionator: { + type: "fluid.videoPlayer.html5Captionator", + container: "{videoPlayer}.dom.video", + createOnEvent: "onHTML5BrowserDetected", options: { model: "{videoPlayer}.model", applier: "{videoPlayer}.applier", - events: { - onReady: "{videoPlayer}.events.onCreateCaptionnerReady", - onCaptionsLoaded: "{videoPlayer}.events.onCaptionsLoaded" - } + captions: "{videoPlayer}.options.video.captions" } }, transcript: { type: "fluid.videoPlayer.transcript", container: "{videoPlayer}.dom.transcript", - createOnEvent: "onHtml5Detected", + createOnEvent: "onHTML5BrowserDetected", options: { model: "{videoPlayer}.model", applier: "{videoPlayer}.applier", + transcripts: "{videoPlayer}.options.video.transcripts", components: { transriptInterval: { type: "fluid.videoPlayer.intervalEventsConductor", @@ -185,7 +177,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt }, intervalEventsConductor: { type: "fluid.videoPlayer.intervalEventsConductor", - createOnEvent: "onCaptionsLoaded", + createOnEvent: "onCreateMediaReady", options: { components: { html5MediaTimer: { @@ -195,7 +187,6 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt } } }, - intervalList: "{captionLoader}.options.intervalList", events: { onTimeChange: "{videoPlayer}.events.onTimeChange", onIntervalChange: "{videoPlayer}.events.onIntervalChange" @@ -207,15 +198,12 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt postInitFunction: "fluid.videoPlayer.postInit", finalInitFunction: "fluid.videoPlayer.finalInit", events: { - onHtml5Detected: null, - onCaptionsLoaded: null, onVolumeChange: null, onScrub: null, onTemplateReady: null, onViewReady: null, onMediaReady: null, onControllersReady: null, - onCaptionnerReady: null, afterScrub: null, onStartScrub: null, onOldBrowserDetected: null, @@ -229,7 +217,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt // The following events are private onCreateControllersReady: null, onCreateMediaReady: null, - onCreateCaptionnerReady: null + onHTML5BrowserDetected: null }, listeners: { onViewReady: "{videoPlayer}.fullscreen" @@ -252,38 +240,24 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt keyBindings: defaultKeys, produceTree: "fluid.videoPlayer.produceTree", controls: "custom", + video: { + sources: [], + captions: [], + transcripts: [] + }, model: { - states: { - play: false, - currentTime: 0, - totalTime: 0, - displayCaptions: true, - displayTranscripts: true, - fullscreen: false, - volume: 60, - muted: false, - canPlay: false - }, - video: { - sources: null + currentTracks: { + captions: [], + transcripts: [] }, - captions: { - selection: "none", - choices: [], - names: [], - show: false, - sources: null, - conversionServiceUrl: "/videoPlayer/conversion_service/index.php", - track: undefined - }, - transcripts: { - selection: "none", - choices: [], - names: [], - show: false, - sources: null, - track: undefined - } + currentTime: 0, + totalTime: 0, + displayCaptions: true, + displayTranscripts: true, + fullscreen: false, + volume: 60, + muted: false, + canPlay: false }, templates: { videoPlayer: { @@ -304,8 +278,8 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt key: that.options.keyBindings.fullscreen.key, activateHandler: function () { that.applier.fireChangeRequest({ - path: "states.fullscreen", - value: !that.model.states.fullscreen + path: "fullscreen", + value: !that.model.fullscreen }); } }, { @@ -313,8 +287,8 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt key: that.options.keyBindings.captions.key, activateHandler: function () { that.applier.fireChangeRequest({ - path: "states.displayCaptions", - value: !that.model.states.displayCaptions + path: "displayCaptions", + value: !that.model.displayCaptions }); } }, { @@ -322,7 +296,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt key: that.options.keyBindings.transcripts.key, activateHandler: function () { that.applier.fireChangeRequest({ - path: "states.displayTranscripts", + path: "displayTranscripts", value: !that.model.states.displayTranscripts }); } @@ -367,8 +341,8 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt }; var bindVideoPlayerModel = function (that) { - that.applier.modelChanged.addListener("states.fullscreen", that.fullscreen); - that.applier.modelChanged.addListener("states.canPlay", function () { + that.applier.modelChanged.addListener("fullscreen", that.fullscreen); + that.applier.modelChanged.addListener("canPlay", function () { that.events.onViewReady.fire(); }); }; @@ -386,7 +360,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt } }] }; - } else if (that.canRenderMedia(that.model.video.sources)) { + } else if (that.canRenderMedia(that.options.video.sources)) { // Keep the selector to render "fluid.videoPlayer.media" that.options.selectorsToIgnore.push("video"); } @@ -400,29 +374,13 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt }; fluid.videoPlayer.preInit = function (that) { - // build the 'choices' from the caption list provided - fluid.each(that.options.model.captions.sources, function (value, key) { - that.options.model.captions.choices.push(key); - that.options.model.captions.names.push(key); - }); - // add the 'turn captions off' option - that.options.model.captions.choices.push("none"); - that.options.model.captions.names.push(that.options.strings.captionsOff); - - // build the 'choices' from the transcript list provided - fluid.each(that.options.model.transcripts.sources, function (value, key) { - that.options.model.transcripts.choices.push(key); - that.options.model.transcripts.names.push(key); - }); - // add the 'turn transcripts off' option - that.options.model.transcripts.choices.push("none"); - that.options.model.transcripts.names.push(that.options.strings.transcriptsOff); that.fullscreen = function () { var videoContainer = that.locate("videoContainer"); var video = that.locate("video"); var videoControllersContainer = that.locate("videoControllersContainer"); - if (that.model.states.fullscreen === true) { + + if (that.model.fullscreen === true) { var windowWidth = window.innerWidth + "px"; videoContainer.css({ @@ -457,30 +415,30 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt that.play = function (ev) { that.applier.fireChangeRequest({ - "path": "states.play", - "value": !that.model.states.play + "path": "play", + "value": !that.model.play }); }; that.incrVolume = function () { - if (that.model.states.volume < 100) { - var newVol = (that.model.states.volume + 10) / 100.0; + if (that.model.volume < 100) { + var newVol = (that.model.volume + 10) / 100.0; that.events.onVolumeChange.fire(newVol <= 1 ? newVol : 1); } }; that.decrVolume = function () { - if (that.model.states.volume > 0) { - var newVol = (that.model.states.volume - 10) / 100.0; + if (that.model.volume > 0) { + var newVol = (that.model.volume - 10) / 100.0; that.events.onVolumeChange.fire(newVol >= 0 ? newVol : 0); } }; that.incrTime = function () { that.events.onStartScrub.fire(); - if (that.model.states.currentTime < that.model.states.totalTime) { - var newVol = that.model.states.currentTime + that.model.states.totalTime * 0.05; - that.events.onScrub.fire(newVol <= that.model.states.totalTime ? newVol : that.model.states.totalTime); + if (that.model.currentTime < that.model.totalTime) { + var newVol = that.model.currentTime + that.model.totalTime * 0.05; + that.events.onScrub.fire(newVol <= that.model.totalTime ? newVol : that.model.totalTime); } that.events.afterScrub.fire(); }; @@ -488,8 +446,8 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt that.decrTime = function () { that.events.onStartScrub.fire(); - if (that.model.states.currentTime > 0) { - var newVol = that.model.states.currentTime - that.model.states.totalTime * 0.05; + if (that.model.currentTime > 0) { + var newVol = that.model.currentTime - that.model.totalTime * 0.05; that.events.onScrub.fire(newVol >= 0 ? newVol : 0); } that.events.afterScrub.fire(); @@ -500,7 +458,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt that.container.attr("role", "application"); // Render each media source with its custom renderer, registered by type. - // If we aren't on an HTML 5 video-enabled browser, don't bother setting up the controller or captions. + // If we aren't on an HTML 5 video-enabled browser, don't bother setting up the controller, captions or transcripts. fluid.fetchResources(that.options.templates, function (res) { var fetchFailed = false; @@ -530,7 +488,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt if (!fetchFailed) { that.events.onTemplateReady.fire(); - if (that.canRenderMedia(that.model.video.sources)) { + if (that.canRenderMedia(that.options.video.sources)) { that.events.onCreateMediaReady.fire(); } if (that.canRenderControllers(that.options.controls)) { @@ -540,7 +498,8 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt // (i.e. captionator and/or mediaelement.js), we will // not need to do this. if (fluid.hasFeature("fluid.browser.html5")) { - that.events.onHtml5Detected.fire(); + that.events.onHTML5BrowserDetected.fire(); + that.fullscreen(); } } @@ -580,15 +539,4 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt } }); - fluid.demands("fluid.videoPlayer.captionner.eventBinder", ["fluid.videoPlayer.captionner", "fluid.videoPlayer"], { - options: { - listeners: { - "{videoPlayer}.events.onCaptionsLoaded": "{captionner}.resyncCaptions", - "{videoPlayer}.events.afterScrub": "{captionner}.resyncCaptions", - "{videoPlayer}.events.onStartScrub": "{captionner}.hideCaptions", - "{videoPlayer}.events.onIntervalChange": "{captionner}.displayCaptionForInterval" - } - } - }); - })(jQuery); diff --git a/js/VideoPlayer_captionLoader.js b/js/VideoPlayer_captionLoader.js deleted file mode 100644 index bde6588..0000000 --- a/js/VideoPlayer_captionLoader.js +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2009 University of Toronto -Copyright 2011 Charly Molter -Copyright 2011-2012 OCAD University - -Licensed under the Educational Community License (ECL), Version 2.0 or the New -BSD license. You may not use this file except in compliance with one these -Licenses. - -You may obtain a copy of the ECL 2.0 License and BSD License at -https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt -*/ - -/*global jQuery, window, fluid*/ - -// JSLint options -/*jslint white: true, funcinvoke: true, undef: true, newcap: true, nomen: true, regexp: true, bitwise: true, browser: true, forin: true, maxerr: 100, indent: 4 */ - - -(function ($) { - /** - * captionLoader renders loads from an Js object src element a caption file and converts it to JsonCC. - * - * @param {Object} options configuration options for the comoponent - * Note: when the caption is loaded by Ajax the event onCaptionsLoaded is fired - */ - var bindCaptionLoaderModel = function (that) { - that.applier.modelChanged.addListener("captions.selection", that.loadCaptions, "captionLoader"); - }; - - fluid.defaults("fluid.videoPlayer.captionLoader", { - gradeNames: ["fluid.viewComponent", "autoInit"], - finalInitFunction: "fluid.videoPlayer.captionLoader.finalInit", - preInitFunction: "fluid.videoPlayer.captionLoader.preInit", - events: { - onReady: null, - onCaptionsLoaded: null - }, - invokers: { - convertToMilli: { - funcName: "fluid.videoPlayer.captionLoader.convertToMilli", - args: ["{arguments}.0"] - } - }, - intervalList: null - }); - - /** - * Convert the time in the format of hh:mm:ss.mmm to milliseconds. - * The time is normally extracted from the subtitle files in WebVTT compatible format. - * WebVTT standard for timestamp: http://dev.w3.org/html5/webvtt/#webvtt-cue-timings - * - * @param time: in the format hh:mm:ss.mmm ("hh:" is optional) - * @return a number in millisecond - * TODO: This should be removed once capscribe desktop gives us the time in millis in the captions - */ - fluid.videoPlayer.captionLoader.convertToMilli = function (time) { - if (!time || !time.match(/^(\d{2}:)?\d{2}:\d{2}\.\d{1,3}$/)) { - return null; - } - - var splitTime = time.split(":"); - - // Handle the optional "hh:" in the input - if (splitTime.length === 2) { - // "hh:" part is NOT given - var hourStr = "0"; - var minStr = splitTime[0]; - var secWithMilliSecStr = splitTime[1]; - } else { - // "hh:" part is given - var hourStr = splitTime[0]; - var minStr = splitTime[1]; - var secWithMilliSecStr = splitTime[2]; - } - - var splitSec = secWithMilliSecStr.split("."); - var hours = parseFloat(hourStr); - var mins = parseFloat(minStr) + (hours * 60); - var secs = parseFloat(splitSec[0]) + (mins * 60); - return Math.round(secs * 1000 + parseInt(splitSec[1], 10)); - }; - - fluid.videoPlayer.captionLoader.preInit = function (that) { - - that.setCaptions = function (captions) { - // Render the caption area if necessary - captions = (typeof (captions) === "string") ? JSON.parse(captions) : captions; - //we get the actual captions and get rid of the rest - if (captions.captionCollection) { - captions = captions.captionCollection; - } - - that.applier.requestChange("captions.track", captions); - - // Construct intervalList that's used by intervalEventsConductor to fire intervalChange event - that.options.intervalList = []; - fluid.each(captions, function (value, key) { - that.options.intervalList[key] = { - begin: that.convertToMilli(value.inTime), - end: that.convertToMilli(value.outTime) - }; - }); - - that.events.onCaptionsLoaded.fire(captions); - return that; - }; - - //Creates an ajax query and uses or not a convertor for the captions - that.loadCaptions = function () { - var caps = that.model.captions.sources[that.model.captions.selection]; - if (caps) { - var opts = { - type: "GET", - dataType: "text", - success: that.setCaptions - }; - if (caps.type !== "JSONcc") { - opts.url = that.model.captions.conversionServiceUrl; - opts.data = { - cc_result: 0, - cc_url: caps.src, - cc_target: "JSONcc", - cc_name: "__no_name" - }; - } else { - opts.url = caps.src; - - } - $.ajax(opts); - } - }; - }; - - fluid.videoPlayer.captionLoader.finalInit = function (that) { - bindCaptionLoaderModel(that); - - //if we provided default captions when we created the component we load it - if (that.model.captions.sources && (that.model.captions.selection !== "none")) { - that.loadCaptions(); - } else { - that.applier.fireChangeRequest({ - path: "states.displayCaptions", - value: false - }); - } - that.events.onReady.fire(); - return that; - }; - -})(jQuery); diff --git a/js/VideoPlayer_captionner.js b/js/VideoPlayer_captionner.js deleted file mode 100644 index 4b9a246..0000000 --- a/js/VideoPlayer_captionner.js +++ /dev/null @@ -1,169 +0,0 @@ -/* -Copyright 2009 University of Toronto -Copyright 2011 Charly Molter -Copyright 2011-2012 OCAD University - -Licensed under the Educational Community License (ECL), Version 2.0 or the New -BSD license. You may not use this file except in compliance with one these -Licenses. - -You may obtain a copy of the ECL 2.0 License and BSD License at -https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt -*/ - -/*global jQuery, window, fluid*/ - -// JSLint options -/*jslint white: true, funcinvoke: true, undef: true, newcap: true, nomen: true, regexp: true, bitwise: true, browser: true, forin: true, maxerr: 100, indent: 4 */ - - -(function ($) { - - //creates the container for a caption and adds it to the DOM - var makeCaption = function (that, caption) { - var captionElt = $("
" + caption.caption + "
"); - captionElt.addClass(that.options.styles.caption); - if (caption.textStyles) { - captionElt.css(caption.textStyles); - } - that.container.append(captionElt); - return captionElt; - }; - - var displayCaption = function (that, caption) { - caption.container = makeCaption(that, caption).fadeIn("fast", "linear"); - var temp = that.model.captions.currentCaptions; - temp.push(caption); - that.applier.fireChangeRequest({ - path: "captions.currentCaptions", - value: temp - }); - }; - - //delete and undisplay a piece of caption - var removeCaption = function (that, elt) { - elt.container.fadeOut("slow", function () { - elt.container.remove(); - }); - var temp = that.model.captions.currentCaptions; - temp.splice(elt, 1); - that.applier.fireChangeRequest({ - path: "captions.currentCaptions", - value: temp - }); - }; - - var bindCaptionnerModel = function (that) { - that.applier.modelChanged.addListener("states.displayCaptions", that.toggleCaptionView); - }; - - var createCaptionnerMarkup = function (that) { - that.toggleCaptionView(); - }; - - /** - * captionner is responsible for displaying captions in a one-at-a-time style. - * - * @param {Object} container the container in which the captions should be displayed - * @param {Object} options configuration options for the component - */ - - fluid.defaults("fluid.videoPlayer.captionner", { - gradeNames: ["fluid.viewComponent", "autoInit"], - components: { - captionnerEventBinder: { - type: "fluid.videoPlayer.captionner.eventBinder", - createOnEvent: "onCaptionnerReady" - } - }, - finalInitFunction: "fluid.videoPlayer.captionner.finalInit", - preInitFunction: "fluid.videoPlayer.captionner.preInit", - events: { - onCaptionnerReady: "{videoPlayer}.events.onCaptionnerReady" - }, - selectors: { - caption: ".flc-videoPlayer-caption-captionText" - }, - styles: { - caption: "fl-videoPlayer-caption-captionText" - }, - model: { - captions: { - currentCaptions: [], - currentIndex: 0 - } - } - }); - - fluid.videoPlayer.captionner.preInit = function (that) { - that.resyncCaptions = function () { - //we clean the screen of the captions that were there - that.container.empty(); - that.applier.fireChangeRequest({ - path: "captions.currentCaptions", - value: [] - }); - that.applier.fireChangeRequest({ - path: "captions.currentIndex", - value: 0 - }); - - that.showCaptions(); - }; - - that.displayCaptionForInterval = function (trackId, previousTrackId) { - if (that.model.captions.track) { - // Remove the previous caption - if (previousTrackId) { - removeCaption(that, that.model.captions.track[previousTrackId]); - } - - // Display the current caption - if (trackId) { - that.applier.fireChangeRequest({ - path: "captions.currentIndex", - value: trackId + 1 - }); - - var nextCaption = that.model.captions.track[trackId]; - if (nextCaption !== null && $.inArray(nextCaption, that.model.captions.currentCaptions) === -1) { - displayCaption(that, nextCaption); - } - } - } - - }; - - that.toggleCaptionView = function () { - if (that.model.states.displayCaptions === true) { - that.container.fadeIn("fast", "linear"); - } else { - that.container.fadeOut("fast", "linear"); - } - }; - - that.hideCaptions = function () { - that.container.hide(); - }; - - that.showCaptions = function () { - that.container.show(); - }; - }; - - fluid.videoPlayer.captionner.finalInit = function (that) { - createCaptionnerMarkup(that); - bindCaptionnerModel(that); - - that.events.onCaptionnerReady.fire(that); - }; - - /******************************************************************************************* - * Captionner Event Binder: Binds events between components "videoPlayer" and "captionner" * - *******************************************************************************************/ - - fluid.defaults("fluid.videoPlayer.captionner.eventBinder", { - gradeNames: ["fluid.eventedComponent", "autoInit"] - }); - -})(jQuery); diff --git a/js/VideoPlayer_controllers.js b/js/VideoPlayer_controllers.js index f7e9d73..839f978 100644 --- a/js/VideoPlayer_controllers.js +++ b/js/VideoPlayer_controllers.js @@ -30,9 +30,9 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt //add all the modelChanged listener to the applier // TODO: Privacy is inherited. Consider making this public var bindControllerModel = function (that) { - that.applier.modelChanged.addListener("states.canPlay", function () { - that.locate("play").attr("disabled", !that.model.states.canPlay); - that.locate("fullscreen").attr("disabled", !that.model.states.canPlay); + that.applier.modelChanged.addListener("canPlay", function () { + that.locate("play").attr("disabled", !that.model.canPlay); + that.locate("fullscreen").attr("disabled", !that.model.canPlay); }); }; @@ -64,19 +64,47 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt } }, captionControls: { - type: "fluid.videoPlayer.controllers.captionControls", + type: "fluid.videoPlayer.controllers.languageControls", container: "{controllers}.dom.captionControlsContainer", options: { + languages: "{controllers}.options.captions", model: "{controllers}.model", - applier: "{controllers}.applier" + modelPath: "currentTracks.captions", + showHidePath: "displayCaptions", + currentLanguagePath: "currentTracks.captions", + applier: "{controllers}.applier", + selectors: { + button: ".flc-videoPlayer-captions-button", + menu: ".flc-videoPlayer-captions-languageMenu" + }, + strings: { + showLanguage: "Show Captions", + hideLanguage: "Hide Captions", + press: "Captions", + release: "Captions" + } } }, transcriptControls: { - type: "fluid.videoPlayer.controllers.transcriptControls", + type: "fluid.videoPlayer.controllers.languageControls", container: "{controllers}.dom.transcriptControlsContainer", options: { + languages: "{controllers}.options.transcripts", model: "{controllers}.model", - applier: "{controllers}.applier" + modelPath: "currentTracks.transcripts", + showHidePath: "displayTranscripts", + currentLanguagePath: "currentTracks.transcripts", + applier: "{controllers}.applier", + selectors: { + button: ".flc-videoPlayer-transcripts-button", + menu: ".flc-videoPlayer-transcripts-languageMenu" + }, + strings: { + showLanguage: "Show Transcripts", + hideLanguage: "Hide Transcripts", + press: "Transcripts", + release: "Transcripts" + } } }, playButton: { @@ -95,7 +123,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt release: "Pause" }, model: "{controllers}.model", - modelPath: "states.play", + modelPath: "play", applier: "{controllers}.applier" } }, @@ -115,7 +143,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt release: "Exit full screen mode" }, model: "{controllers}.model", - modelPath: "states.fullscreen", + modelPath: "fullscreen", applier: "{controllers}.applier" } } @@ -163,7 +191,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt //change the text of the selected time var updateTime = function (that, element) { var time = that.locate(element); - time.text(fluid.videoPlayer.formatTime(that.model.states[element])); + time.text(fluid.videoPlayer.formatTime(that.model[element])); }; // TODO: Privacy is inherited. Consider making this public @@ -186,16 +214,16 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt // TODO: This function is inherited. Consider making this public var bindScrubberModel = function (that) { // Setup the scrubber when we know the duration of the video. - that.applier.modelChanged.addListener("states.startTime", that.updateMin); - that.applier.modelChanged.addListener("states.startTime", that.updateMax); - that.applier.modelChanged.addListener("states.totalTime", that.updateMax); + that.applier.modelChanged.addListener("startTime", that.updateMin); + that.applier.modelChanged.addListener("startTime", that.updateMax); + that.applier.modelChanged.addListener("totalTime", that.updateMax); // Bind to the video's timeupdate event so we can programmatically update the slider. - that.applier.modelChanged.addListener("states.currentTime", that.updateCurrent); + that.applier.modelChanged.addListener("currentTime", that.updateCurrent); - that.applier.modelChanged.addListener("states.canPlay", function () { + that.applier.modelChanged.addListener("canPlay", function () { var scrubber = that.locate("scrubber"); - if (that.model.states.canPlay === true) { + if (that.model.canPlay === true) { scrubber.slider("enable"); } else { scrubber.slider("disable"); @@ -250,30 +278,30 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt // TODO: these methods should be public functions, since people might like to alter them // (inherited code) that.updateMin = function () { - var startTime = that.model.states.startTime || 0; + var startTime = that.model.startTime || 0; var scrubber = that.locate("scrubber"); - scrubber.slider("option", "min", startTime + that.model.states.currentTime); + scrubber.slider("option", "min", startTime + that.model.currentTime); that.locate("handle").attr({ - "aria-valuemin": startTime + that.model.states.currentTime + "aria-valuemin": startTime + that.model.currentTime }); }; that.updateMax = function () { updateTime(that, "totalTime"); var scrubber = that.locate("scrubber"); - scrubber.slider("option", "max", that.model.states.totalTime); + scrubber.slider("option", "max", that.model.totalTime); that.locate("handle").attr({ - "aria-valuemax": that.model.states.totalTime + "aria-valuemax": that.model.totalTime }); }; that.updateCurrent = function () { updateTime(that, "currentTime"); var scrubber = that.locate("scrubber"); - scrubber.slider("value", that.model.states.currentTime); + scrubber.slider("value", that.model.currentTime); that.locate("handle").attr({ - "aria-valuenow": that.model.states.totalTime, - "aria-valuetext": fluid.videoPlayer.formatTime(that.model.states.currentTime) + " of " + fluid.videoPlayer.formatTime(that.model.states.totalTime) + "aria-valuenow": that.model.totalTime, + "aria-valuetext": fluid.videoPlayer.formatTime(that.model.currentTime) + " of " + fluid.videoPlayer.formatTime(that.model.totalTime) }); }; }; @@ -305,9 +333,9 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt // TODO: Privacy is inherited. Consider making this public var bindVolumeModel = function (that) { - that.applier.modelChanged.addListener("states.volume", that.updateVolume); - that.applier.modelChanged.addListener("states.canPlay", function () { - that.locate("mute").attr("disabled", !that.model.states.canPlay); + that.applier.modelChanged.addListener("volume", that.updateVolume); + that.applier.modelChanged.addListener("canPlay", function () { + that.locate("mute").attr("disabled", !that.model.canPlay); }); }; @@ -318,23 +346,24 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt volumeControl.slider({ orientation: "vertical", range: "min", - min: that.model.states.minVolume, - max: that.model.states.maxVolume, - value: that.model.states.volume + min: that.model.minVolume, + max: that.model.maxVolume, + value: that.model.volume }); // TODO: This in inherited. Do we need to add aria to sliders ourselves? that.locate("handle").attr({ "aria-label": that.options.strings.volume, - "aria-valuemin": that.model.states.minVolume, - "aria-valuemax": that.model.states.maxVolume, - "aria-valuenow": that.model.states.volume, - "aria-valuetext": that.model.states.volume + "%", + "aria-valuemin": that.model.minVolume, + "aria-valuemax": that.model.maxVolume, + "aria-valuenow": that.model.volume, + "aria-valuetext": that.model.volume + "%", "role": "slider" }); fluid.tabindex(that.container, 0); fluid.tabindex(that.locate("mute"), -1); fluid.tabindex(volumeControl, -1); + fluid.tabindex(that.locate("handle"), -1); fluid.activatable(that.container, function (evt) { that.muteButton.events.onPress.fire(evt); @@ -364,14 +393,10 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt onChange: null }, model: { - // TODO: the 'states' is to mimic the videoPlayer model layout - // Ideally, the volumeControls should operate without requiring that knowledge. - states: { - muted: false, - volume: 50, - minVolume: 0, - maxVolume: 100 - } + muted: false, + volume: 50, + minVolume: 0, + maxVolume: 100 }, selectors: { mute: ".flc-videoPlayer-mute", @@ -403,7 +428,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt release: "Un-mute" }, model: "{volumeControls}.model", - modelPath: "states.muted", + modelPath: "muted", applier: "{volumeControls}.applier" } } @@ -421,7 +446,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt }; that.updateVolume = function () { - var volume = that.model.states.volume; + var volume = that.model.volume; var volumeControl = that.locate("volumeControl"); volumeControl.slider("value", volume); that.locate("handle").attr({ @@ -439,283 +464,6 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt }; - /***************************************************************************** - Caption controls - Toggle button plus language selection pull-down - *****************************************************************************/ - // TODO: show/hide of captions not yet working; turning off just switches to English - fluid.defaults("fluid.videoPlayer.controllers.captionControls", { - gradeNames: ["fluid.rendererComponent", "autoInit"], - renderOnInit: true, - rendererOptions: { - autoBind: true - }, - finalInitFunction: "fluid.videoPlayer.controllers.captionControls.finalInit", - produceTree: "fluid.videoPlayer.controllers.captionControls.produceTree", - events: { - onReady: null - }, - model: { - // TODO: the 'captions' is to mimic the videoPlayer model layout - // Ideally, the captionControls should operate without requiring that knowledge. - captions: { - selection: "none", - choices: [], - names: [], - show: false, - sources: null, - conversionServiceUrl: "/videoPlayer/conversion_service/index.php", - maxNumber: 3, - track: undefined - } - }, - selectors: { - button: ".flc-videoPlayer-captions-button", - languageList: ".flc-videoPlayer-captions-languageList", - languageRow: ".flc-videoPlayer-captions-language", - languageButton: ".flc-videoPlayer-captions-languageButton", - languageLabel: ".flc-videoPlayer-captions-languageLabel" - }, - repeatingSelectors: ["languageRow"], - selectorsToIgnore: ["languageList"], - styles: { - selected: "fl-videoPlayer-caption-selected" - }, - // TODO: Strings should be moved out into a single top-level bundle (FLUID-4590) - strings: { - captionsOff: "Captions OFF", - turnCaptionsOff: "Turn Captions OFF" - }, - components: { - captionButton: { - type: "fluid.videoPlayer.controllers.toggleButton", - container: "{captionControls}.container", - options: { - selectors: { - button: ".flc-videoPlayer-captions-button" - }, - styles: { - pressed: "fl-videoPlayer-caption-active" - }, - // TODO: Strings should be moved out into a single top-level bundle (FLUID-4590) - strings: { - press: "Captions", - release: "Captions" - } - } - } - } - }); - - // TODO: FLUID-4589 Restructure the caption model to reduce the code logic here - fluid.videoPlayer.controllers.captionControls.setUpCaptionControls = function (that) { - that.captionsOffOption = $(that.locate("languageLabel")[that.model.captions.choices.indexOf("none")]); - that.locate("languageList").hide(); - that.captionsOffOption.text(that.model.captions.selection === "none" ? that.options.strings.captionsOff : that.options.strings.turnCaptionsOff); - $(that.locate("languageLabel")[that.model.captions.choices.indexOf(that.model.captions.selection)]).addClass(that.options.styles.selected); - }; - - fluid.videoPlayer.controllers.captionControls.bindCaptionDOMEvents = function (that) { - that.captionButton.events.onPress.addListener(function (evt) { - that.locate("languageList").toggle(); - // prevent the default onPress handler from toggling the button state: - // it should only toggle if the user turns captions on or off - return false; - }); - }; - - // TODO: FLUID-4589 Restructure the caption model to reduce the code logic here - fluid.videoPlayer.controllers.captionControls.bindCaptionModel = function (that) { - that.applier.modelChanged.addListener("captions.selection", function (model, oldModel, changeRequest) { - var oldSel = oldModel.captions.selection; - var newSel = model.captions.selection; - if (oldSel === newSel) { - return true; - } - - // TODO: can we do this in CSS? - var labels = that.locate("languageLabel"); - $(labels[model.captions.choices.indexOf(oldSel)]).removeClass(that.options.styles.selected); - $(labels[model.captions.choices.indexOf(newSel)]).addClass(that.options.styles.selected); - - // TODO: Can we move the responsibility to requestStateChange elsewhere? - if ((oldSel === "none") || (newSel === "none")) { - that.captionButton.requestStateChange(); - that.captionsOffOption.text(newSel === "none" ? that.options.strings.captionsOff : that.options.strings.turnCaptionsOff); - } - - return true; - }, "captionControls"); - }; - - fluid.videoPlayer.controllers.captionControls.finalInit = function (that) { - fluid.videoPlayer.controllers.captionControls.setUpCaptionControls(that); - fluid.videoPlayer.controllers.captionControls.bindCaptionDOMEvents(that); - fluid.videoPlayer.controllers.captionControls.bindCaptionModel(that); - that.events.onReady.fire(that); - }; - - fluid.videoPlayer.controllers.captionControls.produceTree = function (that) { - return { - button: { - // TODO: Note that until FLUID-4573 is fixed, this binding doesn't actually do anything - value: "${captions.show}" - }, - expander: { - type: "fluid.renderer.selection.inputs", - rowID: "languageRow", - labelID: "languageLabel", - inputID: "languageButton", - selectID: "captionLanguages", - tree: { - selection: "${captions.selection}", - optionlist: "${captions.choices}", - optionnames: "${captions.names}" - } - } - }; - }; - - /***************************************************************************** - Transcript controls - Toggle button plus language selection pull-down - *****************************************************************************/ - fluid.defaults("fluid.videoPlayer.controllers.transcriptControls", { - gradeNames: ["fluid.rendererComponent", "autoInit"], - renderOnInit: true, - rendererOptions: { - autoBind: true - }, - finalInitFunction: "fluid.videoPlayer.controllers.transcriptControls.finalInit", - produceTree: "fluid.videoPlayer.controllers.transcriptControls.produceTree", - events: { - onReady: null - }, - model: { - // TODO: the 'transcripts' is to mimic the videoPlayer model layout - // Ideally, the transcriptControls should operate without requiring that knowledge. - transcripts: { - selection: "none", - choices: [], - names: [], - show: false, - sources: null, - track: undefined - } - }, - selectors: { - button: ".flc-videoPlayer-transcripts-button", - tLanguageList: ".flc-videoPlayer-transcripts-languageList", - tLanguageRow: ".flc-videoPlayer-transcripts-language", - tLanguageLabel: ".flc-videoPlayer-transcripts-languageLabel", - tLanguageButton: ".flc-videoPlayer-transcripts-languageButton" - }, - repeatingSelectors: ["tLanguageRow"], - selectorsToIgnore: ["tLanguageList"], - styles: { - selected: "fl-videoPlayer-transcript-selected" - }, - // TODO: Strings should be moved out into a single top-level bundle (FLUID-4590) - strings: { - transcriptsOff: "Transcripts OFF", - turnTranscriptsOff: "Turn Transcripts OFF" - }, - components: { - transcriptButton: { - type: "fluid.videoPlayer.controllers.toggleButton", - container: "{transcriptControls}.container", - options: { - selectors: { - button: ".flc-videoPlayer-transcripts-button" - }, - styles: { - pressed: "fl-videoPlayer-transcript-active" - }, - // TODO: Strings should be moved out into a single top-level bundle (FLUID-4590) - strings: { - press: "Transcripts", - release: "Transcripts" - } - } - } - } - }); - - // TODO: FLUID-4589 Restructure the transcript model to reduce the code logic here - fluid.videoPlayer.controllers.transcriptControls.setUpTranscriptControls = function (that) { - that.transcriptsOffOption = $(that.locate("tLanguageLabel")[that.model.transcripts.choices.indexOf("none")]); - that.locate("tLanguageList").hide(); - that.transcriptsOffOption.text(that.model.transcripts.selection === "none" ? that.options.strings.transcriptsOff : that.options.strings.turnTranscriptsOff); - $(that.locate("tLanguageLabel")[that.model.transcripts.choices.indexOf(that.model.transcripts.selection)]).addClass(that.options.styles.selected); - }; - - fluid.videoPlayer.controllers.transcriptControls.bindTranscriptDOMEvents = function (that) { - that.transcriptButton.events.onPress.addListener(function (evt) { - that.locate("tLanguageList").toggle(); - // prevent the default onPress handler from toggling the button state: - // it should only toggle if the user turns transcripts on or off - return false; - }); - }; - - // TODO: FLUID-4589 Restructure the transcript model to reduce the code logic here - fluid.videoPlayer.controllers.transcriptControls.bindTranscriptModel = function (that) { - that.applier.modelChanged.addListener("transcripts.selection", function (model, oldModel, changeRequest) { - var oldSel = oldModel.transcripts.selection; - var newSel = model.transcripts.selection; - if (oldSel === newSel) { - return true; - } - - // TODO: can we do this in CSS? - var labels = that.locate("tLanguageLabel"); - $(labels[model.transcripts.choices.indexOf(oldSel)]).removeClass(that.options.styles.selected); - $(labels[model.transcripts.choices.indexOf(newSel)]).addClass(that.options.styles.selected); - - // TODO: Can we move the responsibility to requestStateChange elsewhere? - if ((oldSel === "none") || (newSel === "none")) { - that.transcriptButton.requestStateChange(); - that.transcriptsOffOption.text(newSel === "none" ? that.options.strings.transcriptsOff : that.options.strings.turnTranscriptsOff); - } - - // Update "states.displayTranscripts" which determines the show/hide of the transcript area - that.applier.fireChangeRequest({ - path: "states.displayTranscripts", - value: !(newSel === "none") - }); - - return true; - }, "transcriptControls"); - }; - - fluid.videoPlayer.controllers.transcriptControls.finalInit = function (that) { - fluid.videoPlayer.controllers.transcriptControls.setUpTranscriptControls(that); - fluid.videoPlayer.controllers.transcriptControls.bindTranscriptDOMEvents(that); - fluid.videoPlayer.controllers.transcriptControls.bindTranscriptModel(that); - that.events.onReady.fire(that); - }; - - fluid.videoPlayer.controllers.transcriptControls.produceTree = function (that) { - return { - button: { - // TODO: Note that until FLUID-4573 is fixed, this binding doesn't actually do anything - value: "${transcripts.show}" - }, - expander: { - type: "fluid.renderer.selection.inputs", - rowID: "tLanguageRow", - labelID: "tLanguageLabel", - inputID: "tLanguageButton", - selectID: "transcriptLanguages", - tree: { - selection: "${transcripts.selection}", - optionlist: "${transcripts.choices}", - optionnames: "${transcripts.names}" - } - } - }; - }; - /***************************************************************************** Toggle button subcomponent Used for Play, Mute, Fullscreen, Captions, Transcripts @@ -773,6 +521,9 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt that.enabled = function (state) { that.locate("button").prop("disabled", !state); }; + that.focus = function () { + that.locate("button").focus(); + }; }; fluid.videoPlayer.controllers.toggleButton.setUpToggleButton = function (that) { @@ -791,9 +542,8 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt that.updatePressedState(); }; - fluid.videoPlayer.controllers.toggleButton.bindToggleButtonEvents = function (that) { - var button = that.locate("button"); - button.click(function (evt) { + fluid.videoPlayer.controllers.toggleButton.bindEventListeners = function (that) { + that.locate("button").click(function (evt) { that.events.onPress.fire(evt); if (evt) { evt.stopPropagation(); @@ -807,8 +557,345 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt fluid.videoPlayer.controllers.toggleButton.finalInit = function (that) { fluid.videoPlayer.controllers.toggleButton.setUpToggleButton(that); - fluid.videoPlayer.controllers.toggleButton.bindToggleButtonEvents(that); + fluid.videoPlayer.controllers.toggleButton.bindEventListeners(that); that.events.onReady.fire(that); }; - + + /***************************************************************************** + Language Menu subcomponent + Used for Captions, Transcripts, Audio Descriptions. + Starts with a list of languages and adds the "none, please" options. + Eventually, we'll add the "Make new" and "Request new" buttons. + Note that the language menu cannot share the model of the controls: it + needs the list of captions (or transcripts, etc) as its model for rendering. + *****************************************************************************/ + fluid.defaults("fluid.videoPlayer.controllers.languageMenu", { + gradeNames: ["fluid.rendererComponent", "autoInit"], + renderOnInit: true, + preInitFunction: "fluid.videoPlayer.controllers.languageMenu.preInit", + postInitFunction: "fluid.videoPlayer.controllers.languageMenu.postInit", + finalInitFunction: "fluid.videoPlayer.controllers.languageMenu.finalInit", + produceTree: "fluid.videoPlayer.controllers.languageMenu.produceTree", + languages: {}, + model: {}, + events: { + onReady: null, + activated: null, + hiddenByKeyboard: null, + languageOnOff: null, + trackChanged: "preventable" + }, + listeners: { + trackChanged: { + listener: "fluid.videoPlayer.controllers.languageMenu.updateTracks", + priority: "last" + } + }, + selectors: { + menuItem: ".flc-videoPlayer-menuItem", + language: ".flc-videoPlayer-language", + showHide: ".flc-videoPlayer-languageNone" + }, + repeatingSelectors: ["language"], + strings: { + showLanguage: "Show Language", + hideLanguage: "Hide Language" + }, + styles: { + selected: "fl-videoPlayer-menuItem-selected", + active: "fl-videoPlayer-menuItem-active" + }, + hideOnInit: true + }); + + // TODO: Could this be specified declaratively, in a "protoTree" option? + fluid.videoPlayer.controllers.languageMenu.produceTree = function (that) { + var tree = { + // create a menu item for each language in the model + expander: { + type: "fluid.renderer.repeat", + repeatID: "language", + controlledBy: "languages", + pathAs: "lang", + tree: { + value: "${{lang}.label}" + } + }, + // add the 'turn off' option + showHide: { + value: that.options.strings.showLanguage + } + }; + return tree; + }; + + fluid.videoPlayer.controllers.languageMenu.selectLastItem = function (that) { + that.container.fluid("selectable.select", that.locate("menuItem").last()); + }; + + fluid.videoPlayer.controllers.languageMenu.setUpKeyboardA11y = function (that) { + that.container.fluid("tabbable"); + that.container.fluid("selectable", { + direction: fluid.a11y.orientation.VERTICAL, + selectableSelector: that.options.selectors.menuItem, + onSelect: function (el) { + that.show(); + $(el).addClass(that.options.styles.selected); + }, + onUnselect: function (el) { + $(el).removeClass(that.options.styles.selected); + }, + rememberSelectionState: false, + autoSelectFirstItem: false, + noWrap: true + }); + + // When a menu item is activated using the keyboard, in addition to hiding the menu, + // focus must be return to the button + that.locate("language").fluid("activatable", function (evt) { + that.activate(that.locate("language").index(evt.currentTarget)); + that.events.activatedByKeyboard.fire(); + return false; + }); + var noneButton = that.locate("showHide"); + noneButton.fluid("activatable", function (evt) { + that.applier.requestChange("showLanguage", !that.model.showLanguage); + that.hide(); + if (that.model.showLanguage) { + that.events.hiddenByKeyboard.fire(); + } + return false; + }); + + // when the DOWN arrow is used on the bottom item of the menu, the menu should hide + // and focus should return to the button + noneButton.keydown(function (evt) { + if (evt.which === $.ui.keyCode.DOWN) { + that.hide(); + that.events.hiddenByKeyboard.fire(); + return false; + } + return true; + }); + }; + + fluid.videoPlayer.controllers.languageMenu.bindEventListeners = function (that) { + var langList = that.locate("language"); + langList.click(function (evt) { + that.activate(langList.index(evt.currentTarget)); + }); + + that.locate("showHide").click(function (evt) { + that.applier.requestChange("showLanguage", !that.model.showLanguage); + that.hide(); + }); + + // TODO: We currently only support one active language. Indexing into the array will change + // when we support more + that.applier.modelChanged.addListener("activeLanguages.0", function (model, oldModel, changeRequest) { + var newTrack = model.activeLanguages; + var oldTrack = oldModel.activeLanguages; + if (newTrack == oldTrack) { + return; + } + that.applier.requestChange("showLanguage", true); + that.events.trackChanged.fire(that, newTrack, oldTrack); + }); + + that.applier.modelChanged.addListener("showLanguage", function (model, oldModel, changeRequest) { + that.locate("showHide").text(that.model.showLanguage ? that.options.strings.hideLanguage : that.options.strings.showLanguage); + that.events.languageOnOff.fire(that.model.showLanguage); + }); + + }; + + fluid.videoPlayer.controllers.languageMenu.updateTracks = function (that, activeTrack) { + var menuItems = that.locate("menuItem"); + menuItems.removeClass(that.options.styles.selected).removeClass(that.options.styles.active); + $(menuItems[that.model.activeLanguages[0]]).addClass(that.options.styles.active); + that.hide(); + }; + + fluid.videoPlayer.controllers.languageMenu.preInit = function (that) { + if (that.options.model.languages) { + if (that.options.model.activeLanguages[0] === undefined) { + that.options.model.activeLanguages[0] = 0; + } + } + + that.toggleView = function () { + that.container.toggle(); + return false; + }; + that.hide = function () { + that.locate("language").removeClass(that.options.styles.selected); + that.container.hide(); + }; + }; + + fluid.videoPlayer.controllers.languageMenu.postInit = function (that) { + that.show = function () { + that.container.show(); + }; + that.showAndSelect = function () { + that.show(); + that.container.fluid("selectable.select", that.locate("menuItem").last()); + }; + that.activate = function (index) { + that.applier.requestChange("activeLanguages.0", index); + }; + }; + + fluid.videoPlayer.controllers.languageMenu.finalInit = function (that) { + fluid.videoPlayer.controllers.languageMenu.bindEventListeners(that); + fluid.videoPlayer.controllers.languageMenu.setUpKeyboardA11y(that); + if (that.model.languages) { + $(that.locate("menuItem")[that.model.activeLanguages[0]]).addClass(that.options.styles.active); + } + that.hide(); + that.events.onReady.fire(that); + }; + + + /***************************************************************************** + Language Controls subcomponent: a button and its associated languageMenu + Used for Captions, Transcripts, Audio Descriptions. + Note that the "pressed/released" state of the button reflects the show/hide + state of the captions, and so does not become "pressed" when activated; + activation only shows the menu + *****************************************************************************/ + fluid.defaults("fluid.videoPlayer.controllers.languageControls", { + gradeNames: ["fluid.viewComponent", "autoInit"], + preInitFunction: "fluid.videoPlayer.controllers.languageControls.preInit", + finalInitFunction: "fluid.videoPlayer.controllers.languageControls.finalInit", + selectors: { + button: ".flc-videoPlayer-languageButton", + menu: ".flc-videoPlayer-languageMenu" + }, + events: { + onReady: null, + onRenderingComplete: null, + activatedByKeyboard: null + }, + languages: [], + currentLanguagePath: "", + showHidePath: "", + strings: { + showLanguage: "Show Language", + hideLanguage: "Hide Language" + }, + components: { + button: { + type: "fluid.videoPlayer.controllers.toggleButton", + container: "{languageControls}.container", + options: { + selectors: { + button: "{languageControls}.options.selectors.button" + }, + // TODO: Strings should be moved out into a single top-level bundle (FLUID-4590) + strings: "{languageControls}.options.strings", + events: { + activatedByKeyboard: "{languageControls}.events.activatedByKeyboard" + }, + model: "{languageControls}.model", + modelPath: "{languageControls}.options.showHidePath", + applier: "{languageControls}.applier" + } + }, + menu: { + type: "fluid.videoPlayer.controllers.languageMenu", + container: "{languageControls}.dom.menu", + options: { + model: { + languages: "{languageControls}.options.languages", + }, + modelPath: "{languageControls}.options.modelPath", + showHidePath: "{languageControls}.options.showHidePath", + strings: "{languageControls}.options.strings" + } + }, + eventBinder: { + type: "fluid.videoPlayer.controllers.languageControls.eventBinder", + createOnEvent: "onRenderingComplete" + } + } + }); + + fluid.videoPlayer.controllers.languageControls.preInit = function (that) { + that.options.components.menu.options.model.activeLanguages = fluid.get(that.model, that.options.currentLanguagePath); + that.options.components.menu.options.model.showLanguage = fluid.get(that.model, that.options.showHidePath); + + that.updateLanguage = function (newIndex) { + that.applier.requestChange(that.options.currentLanguagePath, newIndex); + }; + + that.updateShowHide = function (show) { + that.applier.requestChange(that.options.showHidePath, show); + } + }; + + fluid.videoPlayer.controllers.languageControls.setUpKeyboardA11y = function (that) { + fluid.tabindex(that.locate("menu"), -1); + that.locate("button").fluid("activatable", [fluid.identity, { + additionalBindings: [{ + // in addition to space and enter, we want the UP arrow key to show the menu + // but we also want it to automatically select the first item above the button, + // i.e. the bottom item in the menu + key: $.ui.keyCode.UP, + activateHandler: function () { + that.events.activatedByKeyboard.fire(); + return false; + } + }] + }]); + fluid.deadMansBlur(that.container, { + exclusions: [that.menu.options.selectors.menuItem, that.options.selectors.button], + handler: function () { + that.menu.hide(); + } + }); + + // TODO: This is a workaround for around FLUID-4606 (there's a button tag inside the anchor; + // it's for styling only, and we don't want it in the tab order) + $("button", that.locate("button")).fluid("tabindex", -1); + }; + + fluid.videoPlayer.controllers.languageControls.finalInit = function (that) { + fluid.videoPlayer.controllers.languageControls.setUpKeyboardA11y(that); + that.events.onRenderingComplete.fire(that); + + that.applier.modelChanged.addListener(that.options.showHidePath, function (model, oldModel, changeRequest) { + // TODO: This assumes an API for the button subcomponent: Should this be accomplished though and event? + that.button.updatePressedState(); + }); + that.events.onReady.fire(that); + + }; + + /************************************************************************************** + * LanguageControls Event Binder: Binds events between components "button" and "menu" * + **************************************************************************************/ + + fluid.defaults("fluid.videoPlayer.controllers.languageControls.eventBinder", { + gradeNames: ["fluid.eventedComponent", "autoInit"], + events: { + onReady: null + }, + listeners: { + "{button}.events.onPress": "{menu}.toggleView", + "{button}.events.activatedByKeyboard": "{menu}.showAndSelect", + + "{menu}.events.trackChanged": { + listener: "{languageControls}.updateLanguage", + args: ["{arguments}.1"] + }, + "{menu}.events.hiddenByKeyboard": "{button}.focus", + "{menu}.events.languageOnOff": "{languageControls}.updateShowHide" + }, + finalInitFunction: "fluid.videoPlayer.controllers.languageControls.eventBinder.finalInit" + }); + fluid.videoPlayer.controllers.languageControls.eventBinder.finalInit = function (that) { + that.events.onReady.fire(); + }; + })(jQuery); diff --git a/js/VideoPlayer_html5Captionator.js b/js/VideoPlayer_html5Captionator.js new file mode 100644 index 0000000..fc53b63 --- /dev/null +++ b/js/VideoPlayer_html5Captionator.js @@ -0,0 +1,125 @@ +/* +Copyright 2009 University of Toronto +Copyright 2011 Charly Molter +Copyright 2011 OCAD University + +Licensed under the Educational Community License (ECL), Version 2.0 or the New +BSD license. You may not use this file except in compliance with one these +Licenses. + +You may obtain a copy of the ECL 2.0 License and BSD License at +https://source.fluidproject.org/svn/LICENSE.txt +*/ + +/*global jQuery, window, fluid, captionator*/ + +// JSLint options +/*jslint white: true, funcinvoke: true, undef: true, newcap: true, nomen: true, regexp: true, bitwise: true, browser: true, forin: true, maxerr: 100, indent: 4 */ + + +(function ($) { + + /******************************************************************** + * HTML5 Captionator * + * A wrapper component of captionatorjs (http://captionatorjs.com/) * + * that makes it accessible in the infusion way. * + ********************************************************************/ + + fluid.defaults("fluid.videoPlayer.html5Captionator", { + gradeNames: ["fluid.viewComponent", "autoInit"], + finalInitFunction: "fluid.videoPlayer.html5Captionator.finalInit", + preInitFunction: "fluid.videoPlayer.html5Captionator.preInit", + model: {}, + captions: [], + events: { + onReady: null + }, + elPaths: { + currentCaptions: "currentTracks.captions", + displayCaptions: "displayCaptions" + } + }); + + + var bindCaptionatorModel = function (that) { + var elPaths = that.options.elPaths; + that.applier.modelChanged.addListener(elPaths.currentCaptions, that.changeCaptions); + that.applier.modelChanged.addListener(elPaths.displayCaptions, that.displayCaptions); + }; + + // Hide all tracks + fluid.videoPlayer.html5Captionator.hideAllTracks = function (tracks) { + fluid.each(tracks, function (element) { + element.mode = captionator.TextTrack.OFF; + }); + }; + + // show captions depending on which one is on in the model + fluid.videoPlayer.html5Captionator.showCurrentTrack = function (currentCaptions, tracks, captionSources) { + fluid.each(captionSources, function (element, key) { + tracks[key].mode = captionator.TextTrack[$.inArray(key, currentCaptions) === -1 ? "OFF" : "SHOWING"]; + }); + }; + + // hide all captions + fluid.videoPlayer.html5Captionator.preInit = function (that) { + + // listener for hiding/showing all captions + that.displayCaptions = function () { + var tracks = that.container[0].tracks; + var elPaths = that.options.elPaths; + if (fluid.get(that.model, elPaths.displayCaptions)) { + fluid.videoPlayer.html5Captionator.showCurrentTrack(fluid.get(that.model, elPaths.currentCaptions), tracks, that.options.captions); + } else { + fluid.videoPlayer.html5Captionator.hideAllTracks(tracks); + } + }; + + // listener for changed selected currentTrack + that.changeCaptions = function () { + fluid.videoPlayer.html5Captionator.showCurrentTrack(fluid.get(that.model, that.options.elPaths.currentCaptions), that.container[0].tracks, that.options.captions); + }; + }; + + + fluid.videoPlayer.html5Captionator.finalInit = function (that) { + var captions = that.options.captions || []; + var elPaths = that.options.elPaths; + + // Before we go any further check if it makes sense to create captionator and bind events + if(captions.length === 0) { + return false; + } + + var currentCaptions = fluid.get(that.model, elPaths.currentCaptions) || []; + + // If currentTrack is not specified, then default it to the first track + if (currentCaptions.length === 0) { + //that.model.currentCaptions.push(0); + that.applier.requestChange(elPaths.currentCaptions, [0]); + } + + // Start adding tracks to the video tag + fluid.each(captions, function (element, key) { + + var trackTag = $(""); + var attributes = fluid.filterKeys(fluid.copy(element), ["kind", "src", "type", "srclang", "label"], false); + + if ($.inArray(key, fluid.get(that.model, elPaths.currentCaptions)) !== -1) { + attributes.default = "true"; + } + trackTag.attr(attributes); + + that.container.append(trackTag); + }); + + // Create captionator code which will add a captionator div to the HTML + captionator.captionify(that.container[0], null, { + appendCueCanvasTo: that.container[0].parentNode + }); + + bindCaptionatorModel(that); + that.events.onReady.fire(that); + }; + +})(jQuery); diff --git a/js/VideoPlayer_media.js b/js/VideoPlayer_media.js index b911003..f167926 100644 --- a/js/VideoPlayer_media.js +++ b/js/VideoPlayer_media.js @@ -44,7 +44,8 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt "video/ogg": "fluid.videoPlayer.media.createSourceMarkup.html5SourceTag", "video/ogv": "fluid.videoPlayer.media.createSourceMarkup.html5SourceTag", "youtube": "fluid.videoPlayer.media.createSourceMarkup.youTubePlayer" - } + }, + sources: [] }); fluid.videoPlayer.media.createSourceMarkup = { @@ -64,19 +65,19 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt }; var renderSources = function (that) { - $.each(that.model.video.sources, function (idx, source) { + $.each(that.options.sources, function (idx, source) { var renderer = that.options.sourceRenderers[source.type]; if ($.isFunction(renderer)) { renderer.apply(null, [that, source]); } else { - fluid.invokeGlobalFunction(renderer, [that, source]); + fluid.invokeGlobalFunction(renderer, [that, source]); } }); }; var bindMediaModel = function (that) { - that.applier.modelChanged.addListener("states.play", that.play); - that.applier.modelChanged.addListener("states.muted", that.mute); + that.applier.modelChanged.addListener("play", that.play); + that.applier.modelChanged.addListener("muted", that.mute); }; var getcanPlayData = function (data) { @@ -91,22 +92,22 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt // FF doesn't implement startTime from the HTML 5 spec. var startTime = ev.data.obj.startTime || 0; that.applier.fireChangeRequest({ - path: "states.totalTime", + path: "totalTime", value: ev.data.obj.duration }); that.applier.fireChangeRequest({ - path: "states.currentTime", + path: "currentTime", value: ev.data.obj.currentTime }); that.applier.fireChangeRequest({ - path: "states.startTime", + path: "startTime", value: startTime }); }); video.bind("volumechange", {obj: video[0]}, function (ev) { that.applier.fireChangeRequest({ - path: "states.volume", + path: "volume", value: ev.data.obj.volume * 100 }); }); @@ -114,32 +115,32 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt //all browser don't support the canplay so we do all different states video.bind("canplay", {obj: video[0]}, function (ev) { that.applier.fireChangeRequest({ - path: "states.canPlay", + path: "canPlay", value: getcanPlayData(ev.data.obj) }); }); video.bind("canplaythrough", {obj: video[0]}, function (ev) { that.applier.fireChangeRequest({ - path: "states.canPlay", + path: "canPlay", value: getcanPlayData(ev.data.obj) }); }); video.bind("loadeddata", {obj: video[0]}, function (ev) { that.applier.fireChangeRequest({ - path: "states.canPlay", + path: "canPlay", value: getcanPlayData(ev.data.obj) }); }); video.bind("ended", function () { that.applier.fireChangeRequest({ - path: "states.play", + path: "play", value: false }); that.applier.fireChangeRequest({ - path: "states.currentTime", + path: "currentTime", value: 0 }); }); @@ -148,7 +149,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt fluid.videoPlayer.media.preInit = function (that) { that.updateCurrentTime = function (currentTime) { that.applier.fireChangeRequest({ - path: "states.currentTime", + path: "currentTime", value: currentTime }); }; @@ -162,7 +163,7 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt }; that.play = function () { - if (that.model.states.play === true) { + if (that.model.play === true) { that.container[0].play(); } else { that.container[0].pause(); @@ -170,11 +171,11 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt }; that.mute = function () { - that.container[0].muted = that.model.states.muted; + that.container[0].muted = that.model.muted; }; that.refresh = function () { - that.setVolume(that.model.states.volume / 100); + that.setVolume(that.model.volume / 100); that.play(); }; }; diff --git a/js/VideoPlayer_transcript.js b/js/VideoPlayer_transcript.js index 32cc61f..01c3d27 100644 --- a/js/VideoPlayer_transcript.js +++ b/js/VideoPlayer_transcript.js @@ -28,20 +28,15 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt rendererOptions: { autoBind: true }, + preInitFunction: "fluid.videoPlayer.transcript.preInit", finalInitFunction: "fluid.videoPlayer.transcript.finalInit", + produceTree: "fluid.videoPlayer.transcript.produceTree", components: { transriptInterval: { type: "fluid.videoPlayer.intervalEventsConductor", createOnEvent: "onReady" } }, - protoTree: { - langaugeDropdown: { - selection: "${transcripts.selection}", - optionlist: "${transcripts.choices}", - optionnames: "${transcripts.names}" - } - }, events: { onTranscriptAreaShow: null, onTranscriptAreaHide: null, @@ -51,15 +46,22 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt onReady: null }, model: { - transcripts: { - selection: "none", - choices: [], - names: [], - show: false, - sources: null, - track: undefined - } + selection: "none", + choices: [], + labels: [] }, + transcripts: [], + transcriptElementIdPrefix: "flc-videoPlayer-transcript-element", // ToDo: Is this the right place to save this info? +// model: { +// transcripts: { +// selection: "none", +// choices: [], +// names: [], +// show: false, +// sources: null, +// track: undefined +// } +// }, invokers: { convertToMilli: { funcName: "fluid.videoPlayer.transcript.convertToMilli", @@ -71,7 +73,13 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt closeButton: ".flc-videoPlayer-transcripts-close-button", transcriptText: ".flc-videoPlayer-transcript-text" }, - selectorsToIgnore: ["closeButton", "transcriptText"] + selectorsToIgnore: ["closeButton", "transcriptText"], + strings: { + transcriptsOff: "Turn Transcripts Off" + }, + styles: { + highlight: ".fl-videoPlayer-transcript-element-highlight" + } }); /** Functions to show/hide the transcript area **/ @@ -89,7 +97,11 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt // Show/Hide the transcript area based on the flag "states.displayTranscripts" fluid.videoPlayer.transcript.switchTranscriptArea = function (that) { - that.model.states.displayTranscripts ? fluid.videoPlayer.transcript.showTranscriptArea(that) : fluid.videoPlayer.transcript.hideTranscriptArea(that); + if (that.model.displayTranscripts) { + fluid.videoPlayer.transcript.showTranscriptArea(that); + } else { + fluid.videoPlayer.transcript.hideTranscriptArea(that); + } }; /** Functions to load and parse the transcript file **/ @@ -107,19 +119,21 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt return null; } + var hourStr, minStr, secWithMilliSecStr; + var splitTime = time.split(":"); // Handle the optional "hh:" in the input if (splitTime.length === 2) { // "hh:" part is NOT given - var hourStr = "0"; - var minStr = splitTime[0]; - var secWithMilliSecStr = splitTime[1]; + hourStr = "0"; + minStr = splitTime[0]; + secWithMilliSecStr = splitTime[1]; } else { // "hh:" part is given - var hourStr = splitTime[0]; - var minStr = splitTime[1]; - var secWithMilliSecStr = splitTime[2]; + hourStr = splitTime[0]; + minStr = splitTime[1]; + secWithMilliSecStr = splitTime[2]; } var splitSec = secWithMilliSecStr.split("."); @@ -129,14 +143,44 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt return Math.round(secs * 1000 + parseInt(splitSec[1], 10)); }; - fluid.videoPlayer.transcript.parseTranscriptFile = function (that, transcripts) { + fluid.videoPlayer.transcript.getTranscriptElementId = function (that, transcriptIndex) { + return that.options.transcriptElementIdPrefix + "-" + that.id + "-" + transcriptIndex; + }; + + fluid.videoPlayer.transcript.getTranscriptElement = function (transcriptElementContent, idName) { + return "" + transcriptElementContent + ""; + }; + + fluid.videoPlayer.transcript.displayTranscript = function (that, transcriptText) { + that.locate("transcriptText").html(transcriptText); + }; + + fluid.videoPlayer.transcript.highlightTranscriptElement = function (that, currentTrackId, previousTrackId) { + // Display the current transcript + if (currentTrackId !== null) { + var nextTranscript = that.model.track[currentTrackId]; + if (nextTranscript) { + that.locate("transcriptText").text(nextTranscript.transcript); + } + } + }; + + fluid.videoPlayer.transcript.parseTranscriptFile = function (that, transcripts, currentIndex) { transcripts = (typeof (transcripts) === "string") ? JSON.parse(transcripts) : transcripts; - //we get the actual transcripts and get rid of the rest if (transcripts.transcriptCollection) { transcripts = transcripts.transcriptCollection; } - that.applier.requestChange("transcripts.track", transcripts); + that.options.transcripts[currentIndex].tracks = transcripts; + + // Generate the transcript text + var transcriptText = ""; + for (var i = 0; i < transcripts.length; i++) { + transcriptText = transcriptText + fluid.videoPlayer.transcript.getTranscriptElement(transcripts[i].transcript, fluid.videoPlayer.transcript.getTranscriptElementId(that, i)) + " "; + } + + that.options.transcripts[currentIndex].transcriptText = transcriptText; + fluid.videoPlayer.transcript.displayTranscript(that, transcriptText); // Construct intervalList that's used by intervalEventsConductor to fire intervalChange event var intervalList = []; @@ -150,28 +194,22 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt that.events.onTranscriptsLoaded.fire(intervalList); }; - fluid.videoPlayer.transcript.loadTranscript = function (that) { - // Exit if transcript is turned off or the transcript sources are not provided - if (that.model.transcripts.selection === "none" || that.model.transcripts.choices.length === 0) { - return true; - } - - // The main process to load in the transcript file - var transcriptSource = that.model.transcripts.sources[that.model.transcripts.selection]; + fluid.videoPlayer.transcript.loadTranscript = function (that, currentIndex) { + var transcriptSource = that.options.transcripts[currentIndex]; if (transcriptSource) { var opts = { type: "GET", dataType: "text", success: function (data) { - fluid.videoPlayer.transcript.parseTranscriptFile(that, data); + fluid.videoPlayer.transcript.parseTranscriptFile(that, data, currentIndex); }, error: function () { fluid.log("Error loading transcript: " + transcriptSource.src + ". Are you sure this file exists?"); - that.events.onLoadTranscriptError.fire(); + that.events.onLoadTranscriptError.fire(transcriptSource); } }; if (transcriptSource.type !== "JSONcc") { - opts.url = that.model.transcripts.conversionServiceUrl; + opts.url = that.model.conversionServiceUrl; opts.data = { cc_result: 0, cc_url: transcriptSource.src, @@ -186,34 +224,36 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt } }; - fluid.videoPlayer.transcript.displayTranscript = function (that, currentTrackId, previousTrackId) { - // Display the current transcript - if (currentTrackId !== null) { - var nextTranscript = that.model.transcripts.track[currentTrackId]; - if (nextTranscript) { - that.locate("transcriptText").text(nextTranscript.transcript); - } + fluid.videoPlayer.transcript.prepareTranscript = function (that) { + // Transcript display only supports one language at a time + if (that.model.currentTracks.transcripts[0] === "none") { + that.applier.requestChange("displayTranscripts", false); + return true; + } + + var currentTranscriptIndex = parseInt(that.model.currentTracks.transcripts[0], 10); + + // Load the transcript only if it's never been loaded before + if (that.options.transcripts[currentTranscriptIndex].transcriptText) { + fluid.videoPlayer.transcript.displayTranscript(that, that.options.transcripts[currentTranscriptIndex].transcriptText); + } else { + fluid.videoPlayer.transcript.loadTranscript(that, currentTranscriptIndex); } }; fluid.videoPlayer.transcript.bindTranscriptDOMEvents = function (that) { that.locate("closeButton").click(function () { - that.applier.fireChangeRequest({ - path: "transcripts.selection", - value: "none" - }); - - fluid.videoPlayer.transcript.hideTranscriptArea(that); + that.applier.requestChange("displayTranscripts", false); }); }; fluid.videoPlayer.transcript.bindTranscriptModel = function (that) { - that.applier.modelChanged.addListener("states.displayTranscripts", function () { + that.applier.modelChanged.addListener("displayTranscripts", function () { fluid.videoPlayer.transcript.switchTranscriptArea(that); }); - that.applier.modelChanged.addListener("transcripts.selection", function () { - fluid.videoPlayer.transcript.loadTranscript(that); + that.applier.modelChanged.addListener("currentTracks.transcripts", function () { + fluid.videoPlayer.transcript.prepareTranscript(that); }); that.events.onTranscriptsLoaded.addListener(function (intervalList) { @@ -221,15 +261,38 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt }); that.events.onIntervalChange.addListener(function (currentInterval, previousInterval) { - fluid.videoPlayer.transcript.displayTranscript(that, currentInterval, previousInterval); + fluid.videoPlayer.transcript.highlightTranscriptElement(that, currentInterval, previousInterval); }); }; + fluid.videoPlayer.transcript.preInit = function (that) { + // build the 'choices' from the transcript list provided + fluid.each(that.options.transcripts, function (value, key) { + // ToDo: convert the integer to string to avoid the "unrecognized text" error at rendering dropdown list box + // The integer is converted back at the listener for currentTracks.transcripts.0. Needs a better solution for this. + that.options.model.choices.push(key.toString()); + that.options.model.labels.push(value.label); + }); + // add the 'turn transcripts off' option + that.options.model.choices.push("none"); + that.options.model.labels.push(that.options.strings.transcriptsOff); + }; + + fluid.videoPlayer.transcript.produceTree = function (that) { + return { + langaugeDropdown: { + selection: "${currentTracks.transcripts.0}", + optionlist: "${choices}", + optionnames: "${labels}" + } + }; + }; + fluid.videoPlayer.transcript.finalInit = function (that) { fluid.videoPlayer.transcript.bindTranscriptDOMEvents(that); fluid.videoPlayer.transcript.bindTranscriptModel(that); - fluid.videoPlayer.transcript.loadTranscript(that); + fluid.videoPlayer.transcript.prepareTranscript(that); fluid.videoPlayer.transcript.switchTranscriptArea(that); that.events.onReady.fire(that); diff --git a/lib/captionator/LICENSE.txt b/lib/captionator/LICENSE.txt new file mode 100644 index 0000000..32624ac --- /dev/null +++ b/lib/captionator/LICENSE.txt @@ -0,0 +1,20 @@ +The MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/lib/captionator/README.txt b/lib/captionator/README.txt new file mode 100644 index 0000000..cfdffc9 --- /dev/null +++ b/lib/captionator/README.txt @@ -0,0 +1,9 @@ +Attribution: + * Captionator.js is a JavaScript polyfill designed to provide support for the element and TextTrack javascript API. It currently supports subtitles in SUB, SRT, Youtube's SBV, WebVTT, and WebVTT with Google's proposed timestamp syntax. It will soon support LRC, DFXP/TTML and the WebVTT v2 proposed features. More information could be found http://captionatorjs.com/ + * Captionator code is a completely free opensource software. More details on the license could be obtained on https://github.com/cgiffard/Captionator/tree/captionplanet + +Source: + * Original code is available on GIT https://github.com/cgiffard/Captionator.git + +Instructions: + * All necessary documentation about the Captionator could be found on the official Captionator website http://captionatorjs.com/ diff --git a/lib/captionator/css/captions.css b/lib/captionator/css/captions.css new file mode 100644 index 0000000..016b34b --- /dev/null +++ b/lib/captionator/css/captions.css @@ -0,0 +1,30 @@ +/* + This file is designed to show how CSS can be used to style elements in the captions themselves. + You don't have to include it in your own project. +*/ + +.arduino { + color: red; + text-transform: uppercase; + font-family: "Helvetica Neue"; + font-weight: lighter; +} + +.captionator-title a { + text-decoration: none; + font-weight: bold; + letter-spacing: 5pt; + font-family: "Helvetica Neue"; + font-weight: bolder; + color: red; +} + +q.voice:before { + content: attr(title); + color:red; + margin-right: 0.5em; +} + +q.voice:after { + content: ""; +} \ No newline at end of file diff --git a/lib/captionator/js/captionator-min.js b/lib/captionator/js/captionator-min.js new file mode 100644 index 0000000..7fb1cca --- /dev/null +++ b/lib/captionator/js/captionator-min.js @@ -0,0 +1,64 @@ +/* + Captionator 0.5.1 [CaptionCrunch] + Christopher Giffard, 2011 + Share and enjoy + + https://github.com/cgiffard/Captionator +*/ + +(function(){var A=10,B=16,D=4.5,E=1.5,I=[0,0,0,0.5],J=false,c={createDOMException:function(b,c,a){try{document.querySelectorAll("div/[]")}catch(d){var e=function(a,b,c){this.code=a;this.message=b;this.name=c};e.prototype=d;return new e(b,c,a)}},compareArray:function(b,c){if(!(b instanceof Array)||!(c instanceof Array))return false;if(b.length!==c.length)return false;for(var a in b)if(b.hasOwnProperty(a)&&b[a]!==c[a])return false;return true},generateID:function(b){for(var c="",b=b?b:10;c.length0&&a>c.TextTrack.OFF&&this.loadTrack(this.src,null),this.videoNode._captionator_dirtyBit=true,c.rebuildCaptions(this.videoNode),a===c.TextTrack.OFF))this.cues.length= +0,this.readyState=c.TextTrack.NONE}else throw Error("Illegal mode value for track: "+a);};this.getDefault=function(){return this.internalDefault};Object.prototype.__defineGetter__?(this.__defineGetter__("mode",this.getMode),this.__defineSetter__("mode",this.setMode),this.__defineGetter__("default",this.getDefault)):Object.defineProperty&&(Object.defineProperty(this,"mode",{get:this.getMode,set:this.setMode}),Object.defineProperty(this,"default",{get:this.getDefault}));this.loadTrack=function(a,b){var h, +f=new XMLHttpRequest;if(this.readyState===c.TextTrack.LOADED)b instanceof Function&&b(h);else{this.src=a;this.readyState=c.TextTrack.LOADING;var d=this;f.open("GET",a,true);f.onreadystatechange=function(){if(f.readyState===4)if(f.status===200){var a=d.videoNode._captionatorOptions||{};if(d.kind==="metadata")a.processCueHTML=false,a.sanitiseCueHTML=false;h=c.parseCaptions(f.responseText,a);d.readyState=c.TextTrack.LOADED;d.cues.loadCues(h);d.activeCues.refreshCues.apply(d.activeCues);d.videoNode._captionator_dirtyBit= +true;c.rebuildCaptions(d.videoNode);d.onload.call(this);b instanceof Function&&b.call(d,h)}else d.readyState=c.TextTrack.ERROR,d.onerror()};try{f.send(null)}catch(k){d.readyState=c.TextTrack.ERROR,d.onerror(k)}}};this.addCue=function(a){if(a&&a instanceof c.TextTrackCue)this.cues.addCue(a);else throw Error("The argument is null or not an instance of TextTrackCue.");};this.removeCue=function(){}};c.TextTrack.NONE=0;c.TextTrack.LOADING=1;c.TextTrack.LOADED=2;c.TextTrack.ERROR=3;c.TextTrack.OFF=0;c.TextTrack.HIDDEN= +1;c.TextTrack.SHOWING=2;c.TextTrackCueList=function(a){this.track=a instanceof c.TextTrack?a:null;this.getCueById=function(a){return this.filter(function(b){return b.id===a})[0]};this.loadCues=function(a){for(var b=0;b +0});e instanceof Array&&e.forEach(function(a){var b={D:"direction",L:"linePosition",T:"textPosition",A:"alignment",S:"size"},a=a.split(":");b[a[0]]&&(q[b[a[0]]]=a[1]);b[a[0]]in u&&(u[b[a[0]]]=a[1])})}if(this.linePosition.match(/\%/))this.snapToLines=false;this.getCueAsSource=function(){return String(this.text)};this.getCueAsHTML=function(){var a=document.createDocumentFragment(),b=document.createElement("div");b.innerHTML=String(this.text);Array.prototype.forEach.call(b.childNodes,function(b){a.appendChild(b.cloneNode(true))}); +return a};this.isActive=function(){var a=0;if(this.track instanceof c.TextTrack&&(this.track.mode===c.TextTrack.SHOWING||this.track.mode===c.TextTrack.HIDDEN)&&this.track.readyState===c.TextTrack.LOADED)try{if(a=this.track.videoNode.currentTime,this.startTime<=a&&this.endTime>=a){if(!this.wasActive)this.wasActive=true,this.onenter();return true}}catch(b){return false}if(this.wasActive)this.wasActive=false,this.onexit();return false};Object.prototype.__defineGetter__?this.__defineGetter__("active", +this.isActive):Object.defineProperty&&Object.defineProperty(this,"active",{get:this.isActive});this.toString=function(){return"TextTrackCue:"+this.id+"\n"+String(this.text)};this.onenter=function(){};this.onexit=function(){}};c.MediaTrack=function(a,b,f,d,e,o){var l=function(a){return a.filter(function(a){try{var b=document.createElement(a.getAttribute("type").split("/").shift());return!(!b.canPlayType||!b.canPlayType(a.getAttribute("type")).replace(/no/,""))}catch(c){return false}}).shift().getAttribute("src")}; +this.onload=function(){};this.onerror=function(){};this.id=a||"";this.internalMode=this.internalMode=c.TextTrack.OFF;this.mediaElement=null;this.kind=b||"audiodescription";this.label=f||"";this.language=d||"";this.readyState=c.TextTrack.NONE;this.type=o||"x/unknown";this.mediaType=null;this.src="";if(typeof e==="string")this.src=e;else if(e instanceof NodeList)this.src=l(e);if(this.type.match(/^video\//))this.mediaType="video";else if(this.type.match(/^audio\//))this.mediaType="audio";this.getMode= +function(){return this.internalMode};this.setMode=function(a){if([c.TextTrack.OFF,c.TextTrack.HIDDEN,c.TextTrack.SHOWING].indexOf(a)!==-1){if(a!==this.internalMode)this.internalMode=a,a===c.TextTrack.HIDDEN&&!this.mediaElement&&this.buildMediaElement(),a===c.TextTrack.SHOWING&&this.showMediaElement(),(a===c.TextTrack.OFF||a===c.TextTrack.HIDDEN)&&this.hideMediaElement()}else throw Error("Illegal mode value for track.");};Object.prototype.__defineGetter__?(this.__defineGetter__("mode",this.getMode), +this.__defineSetter__("mode",this.setMode)):Object.defineProperty&&Object.defineProperty(this,"mode",{get:this.getMode,set:this.setMode});this.hideMediaElement=function(){if(this.mediaElement&&(this.mediaElement.paused||this.mediaElement.pause(),this.mediaElement instanceof HTMLVideoElement))this.mediaElement.style.display="none"};this.showMediaElement=function(){if(this.mediaElement){if(this.mediaElement.parentNode||document.body.appendChild(this.mediaElement),this.mediaElement instanceof HTMLVideoElement)this.mediaElement.style.display= +"block"}else this.buildMediaElement(),document.body.appendChild(this.mediaElement)};this.buildMediaElement=function(){try{if(this.type.match(/^video\//))this.mediaElement=document.createElement("video"),this.mediaElement.className="captionator-mediaElement-"+this.kind,c.styleNode(this.mediaElement,this.kind,this.videoNode);else if(this.type.match(/^audio\//))this.mediaElement=new Audio;this.mediaElement.type=this.type;this.mediaElement.src=this.src;this.mediaElement.load();this.mediaElement.trackObject= +this;this.readyState=c.TextTrack.LOADING;var a=this.mediaElement;this.mediaElement.addEventListener("progress",function(){a.trackObject.readyState=c.TextTrack.LOADING},false);this.mediaElement.addEventListener("canplaythrough",function(){a.trackObject.readyState=c.TextTrack.LOADED;a.trackObject.onload.call(a.trackObject)},false);this.mediaElement.addEventListener("error",function(b){a.trackObject.readyState=c.TextTrack.ERROR;a.trackObject.mode=c.TextTrack.OFF;a.trackObject.mediaElement=null;a.trackObject.onerror.call(a.trackObject, +b)},false)}catch(b){this.readyState=c.TextTrack.ERROR,this.mode=c.TextTrack.OFF,this.mediaElement=null,this.onerror&&this.onerror.apply(this,b)}}};c.CaptionatorCueStructure=function(a,b){var c=this;this.isTimeDependent=false;this.cueSource=a;this.options=b;this.processedCue=null;this.toString=function(f){if(b.processCueHTML!==false){var d=function(a,b){if(c.processedCue===null){var e="",h,g;for(h in a)if(h.match(/^\d+$/)&&a.hasOwnProperty(h))if(g=a[h],g instanceof Object&&g.children&&g.children.length)if(g.token=== +"v")e+=''+d(g.children,b+1)+"";else if(g.token==="c")e+=""+d(g.children,b+1)+"";else if(g.timeIn>0){if(f===null||f===void 0||f>0&&f>=g.timeIn)e+=""+d(g.children,b+1)+ +""}else e+=g.rawToken+d(g.children,b+1)+"";else if(g instanceof String||typeof g==="string"||typeof g==="number")e+=g;if(!c.isTimeDependent&&b===0)c.processedCue=e;return e}else return c.processedCue};return d(this,0)}else return a}};c.CaptionatorCueStructure.prototype=[];if(a.exportObjects)window.TextTrack=c.TextTrack,window.TextTrackCueList=c.TextTrackCueList,window.ActiveTextTrackCueList=c.ActiveTextTrackCueList,window.TextTrackCue=c.TextTrackCue,window.MediaTrack=c.MediaTrack; +J=true}[].slice.call(document.getElementsByTagName("video"),0).forEach(function(a){a.addTextTrack=function(b,f,d,e,o,l,q){var u="subtitles,captions,descriptions,captions,metadata,chapters,karaoke,lyrics,tickertext,audiodescription,commentary,alternate,signlanguage".split(","),x=u.slice(0,7),b=typeof b==="string"?b:"",d=typeof d==="string"?d:"",e=typeof e==="string"?e:"",q=typeof q==="boolean"?q:false;if(!u.filter(function(a){return f===a?true:false}).length)throw c.createDOMException(12,"DOMException 12: SYNTAX_ERR: You must use a valid kind when creating a TimedTextTrack.", +"SYNTAX_ERR");if(x.filter(function(a){return f===a?true:false}).length)if(b=new c.TextTrack(b,f,d,e,o,null)){if(!(a.tracks instanceof Array))a.tracks=[];a.tracks.push(b);return b}else return false;else if(b=new c.MediaTrack(b,f,d,e,o,l,q)){if(!(a.mediaTracks instanceof Array))a.mediaTracks=[];a.mediaTracks.push(b);return b}else return false}});if(!b||b===false||b===void 0||b===null)d=[].slice.call(document.getElementsByTagName("video"),0);else if(b instanceof Array)for(e=0;e0?e.querySelectorAll("source"):e.getAttribute("src"),h=b.addTextTrack(e.getAttribute("id")||c.generateID(),e.getAttribute("kind"),e.getAttribute("label"),e.getAttribute("srclang").split("-")[0],h,e.getAttribute("type"),e.hasAttribute("default"));e.track=h;h.trackNode=e;h.videoNode= +b;d.push(h);var k=false;if((h.kind==="subtitles"||h.kind==="captions")&&f===h.language&&a.enableCaptionsByDefault)d.filter(function(a){return(a.kind==="captions"||a.kind==="subtitles")&&f===a.language&&a.mode===c.TextTrack.SHOWING?true:false}).length||(k=true);h.kind==="chapters"&&f===h.language&&(d.filter(function(a){return a.kind==="chapters"&&a.mode===c.TextTrack.SHOWING?true:false}).length||(k=true));h.kind==="descriptions"&&a.enableDescriptionsByDefault===true&&f===h.language&&(d.filter(function(a){return a.kind=== +"descriptions"&&a.mode===c.TextTrack.SHOWING?true:false}).length||(k=true));k===true&&d.forEach(function(a){if(a.trackNode.hasAttribute("default")&&a.mode===c.TextTrack.SHOWING)a.mode=c.TextTrack.HIDDEN});if(e.hasAttribute("default")&&!d.filter(function(a){return a.trackNode.hasAttribute("default")&&a.trackNode!==e?true:false}).length)k=true,h.internalDefault=true;if(k===true)h.mode=c.TextTrack.SHOWING});b.addEventListener("timeupdate",function(b){b=b.target;try{b.tracks.forEach(function(a){a.activeCues.refreshCues.apply(a.activeCues)})}catch(d){}a.renderer instanceof +Function?a.renderer.call(c,b):c.rebuildCaptions(b);c.synchroniseMediaElements(b)},false);window.addEventListener("resize",function(){b._captionator_dirtyBit=true;c.rebuildCaptions(b)},false);b.addEventListener("play",function(){c.synchroniseMediaElements(b)},false);b.addEventListener("pause",function(){c.synchroniseMediaElements(b)},false);a.enableHighResolution===true&&window.setInterval(function(){try{b.tracks.forEach(function(a){a.activeCues.refreshCues.apply(a.activeCues)})}catch(d){}a.renderer instanceof +Function?a.renderer.call(c,b):c.rebuildCaptions(b)},20)}return b},rebuildCaptions:function(b){var f=b.currentTime,a=[],d=false,e=[],g=[];(b.tracks||[]).forEach(function(b){b.mode===c.TextTrack.SHOWING&&b.readyState===c.TextTrack.LOADED&&(g=[].slice.call(b.activeCues,0),g=g.sort(function(a,b){return a.startTime>b.startTime?-1:1}),a=a.concat(g))});e=a.map(function(a){return a.track.id+"."+a.id+":"+a.text.toString(f).length});if((d=!c.compareArray(e,b._captionator_previousActiveCues))||b._captionator_dirtyBit)b._captionator_dirtyBit= +false,b._captionator_availableCueArea=null,b._captionator_previousActiveCues=e,c.styleCueCanvas(b),b._containerObject.innerHTML="",a.forEach(function(a){var d=document.createElement("div");d.id=String(a.id).length?a.id:c.generateID();d.className="captionator-cue";d.innerHTML=a.text.toString(f);b._containerObject.appendChild(d);c.styleCue(d,a,b)})},synchroniseMediaElements:function(b){var f=function(a,b){try{b.seeking&&a.pause();if(a.currentTimeb.currentTime+0.5)a.currentTime= +b.currentTime;a.paused&&!b.paused?a.play():!a.paused&&b.paused&&a.pause()}catch(c){}};(b.mediaTracks||[]).forEach(function(a){a.mode===c.TextTrack.SHOWING&&a.readyState>=c.TextTrack.LOADING&&f(a.mediaElement,b)});b.id&&[].slice.call(document.body.querySelectorAll("*[syncMaster="+b.id+"]"),0).forEach(function(a){(a.tagName.toLowerCase()==="video"||a.tagName.toLowerCase()==="audio")&&f(a,b)})},getNodeMetrics:function(b){for(var c=window.getComputedStyle(b,null),a=b,d=b.offsetTop,e=b.offsetLeft,g=b, +h=0,k=0,g=parseInt(c.getPropertyValue("width"),10),h=parseInt(c.getPropertyValue("height"),10);a=a.offsetParent;)d+=a.offsetTop,e+=a.offsetLeft;if(b.hasAttribute("controls"))b=navigator.userAgent.toLowerCase(),b.indexOf("chrome")!==-1?k=32:b.indexOf("opera")!==-1?k=25:b.indexOf("firefox")!==-1?k=28:b.indexOf("ie 9")!==-1||b.indexOf("ipad")!==-1?k=44:b.indexOf("safari")!==-1&&(k=25);else if(b._captionatorOptions)b=b._captionatorOptions,b.controlHeight&&(k=parseInt(b.controlHeight,10));return{left:e, +top:d,width:g,height:h,controlHeight:k}},applyStyles:function(b,c){for(var a in c)({}).hasOwnProperty.call(c,a)&&(b.style[a]=c[a])},checkDirection:function(b){var c=RegExp("^[^\u0591-\u07ff\ufb1d-\ufdfd\ufe70-\ufefc]*[A-Za-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02b8\u0300-\u0590\u0800-\u1fff\u2c00-\ufb1c\ufdfe-\ufe6f\ufefd-\uffff]");return RegExp("^[^A-Za-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02b8\u0300-\u0590\u0800-\u1fff\u2c00-\ufb1c\ufdfe-\ufe6f\ufefd-\uffff]*[\u0591-\u07ff\ufb1d-\ufdfd\ufe70-\ufefc]").test(b)? +"rtl":c.test(b)?"ltr":""},styleCue:function(b,f,a){var d=0,e=0,g=0,h=0,k,z,m=0,o=0,l,q,u,x,p,t,C=0,F=k=d=0,G=0,y=0,v=0,j,s,r=0,G=a._captionatorOptions||{},i,e=50,d=m=0,e=true,K=function(a){var b=function(a){return!!a.length},d,e,f,g,h=0,i=function(a){h++;c.applyStyles(a,{display:"block",lineHeight:"auto",height:l+"px",width:t+"px",textAlign:"center"})};for(d in a.childNodes)if(a.childNodes.hasOwnProperty(d))e=a.childNodes[d],e.nodeType===3?(g=document.createDocumentFragment(),f=e.nodeValue,g.appendChild(document.createElement("span")), +g.childNodes[0].innerHTML=""+f.split(/(.)/).filter(b).join("")+"",[].slice.call(g.querySelectorAll("span.captionator-cue-character"),0).forEach(i),e.parentNode.replaceChild(g,e)):a.childNodes[d].nodeType===1&&(h+=K(a.childNodes[d]));return h};i=c.getNodeMetrics(a);if(!a._captionator_availableCueArea)a._captionator_availableCueArea={bottom:i.height-i.controlHeight,right:i.width,top:0,left:0,height:i.height- +i.controlHeight,width:i.width};f.direction==="horizontal"&&(c.applyStyles(b,{width:"auto",position:"static",display:"inline-block",padding:"1em"}),m=parseInt(b.offsetWidth,10),d=Math.floor(m/a._captionator_availableCueArea.width*100),d=d<=100?d:100);m=i.height*(D/100)/96*72;m=m>=A?m:A;l=Math.floor(m/72*96);q=Math.floor(m*E);q=q>B?q:B;t=p=Math.ceil(q/72*96);p*Math.floor(i.height/p)d?k-=e:k=d),g=f.snapToLines===true?a._captionator_availableCueArea.width*(k/100):i.width*(k/100),f.textPosition==="auto"?d=(a._captionator_availableCueArea.right-g)/2+a._captionator_availableCueArea.left:(e=parseFloat(String(f.textPosition).replace(/[^\d\.]/ig, +"")),d=(a._captionator_availableCueArea.right-g)*(e/100)+a._captionator_availableCueArea.left),f.snapToLines===true?e=(u-1)*p+a._captionator_availableCueArea.top:(k=i.controlHeight+p+o*2,e=(i.height-k)*(f.linePosition/100))):(e=a._captionator_availableCueArea.top,d=a._captionator_availableCueArea.right-t,g=t,h=a._captionator_availableCueArea.height*(k/100),d=K(b),k=[].slice.call(b.querySelectorAll("span.captionator-cue-character"),0),C=Math.floor((h-o*2)/l),g=Math.ceil(d/C)*t,F=Math.ceil(d/C),y=(d- +C*(F-1))*l,f.snapToLines===true?d=f.direction==="vertical-lr"?a._captionator_availableCueArea.left:a._captionator_availableCueArea.right-g:(d=g+m*2,d=f.direction==="vertical-lr"?(i.width-d)*(f.linePosition/100):i.width-d-(i.width-d)*(f.linePosition/100)),f.textPosition==="auto"?e=(a._captionator_availableCueArea.bottom-h)/2+a._captionator_availableCueArea.top:(f.textPosition=parseFloat(String(f.textPosition).replace(/[^\d\.]/ig,"")),e=(a._captionator_availableCueArea.bottom-h)*(f.textPosition/100)+ +a._captionator_availableCueArea.top),s=j=r=v=0,k.forEach(function(a){j=f.direction==="vertical-lr"?t*v:g-t*(v+1);f.alignment==="start"||f.alignment!=="start"&&v=C-1?(r=0,v++):r++}));f.direction==="horizontal"&&(z=c.checkDirection(String(f.text))==="rtl"?{start:"right",middle:"center",end:"left"}[f.alignment]:{start:"left",middle:"center",end:"right"}[f.alignment]); +c.applyStyles(b,{position:"absolute",overflow:"hidden",width:g+"px",height:h+"px",top:e+"px",left:d+"px",padding:o+"px "+m+"px",textAlign:z,backgroundColor:"rgba("+I.join(",")+")",direction:c.checkDirection(String(f.text)),lineHeight:q+"pt",boxSizing:"border-box"});if(f.direction==="vertical"||f.direction==="vertical-lr")d-a._captionator_availableCueArea.left-a._captionator_availableCueArea.left>=a._captionator_availableCueArea.right-(d+g)?a._captionator_availableCueArea.right=d:a._captionator_availableCueArea.left= +d+g,a._captionator_availableCueArea.width=a._captionator_availableCueArea.right-a._captionator_availableCueArea.left;else{if(b.scrollHeight>b.offsetHeight*1.2){if(f.snapToLines){for(z=0;b.scrollHeight>b.offsetHeight*1.2;)h+=p,b.style.height=h+"px",z++;e-=z*p}else h=b.scrollHeight+o,k=i.controlHeight+h+o*2,e=(i.height-k)*(f.linePosition/100),b.style.height=h+"px";b.style.top=e+"px"}if(e-a._captionator_availableCueArea.top-a._captionator_availableCueArea.top>=a._captionator_availableCueArea.bottom- +(e+h)&&a._captionator_availableCueArea.bottom>e)a._captionator_availableCueArea.bottom=e;else if(a._captionator_availableCueArea.top=0;b--)n.moveTo(i.width-b*t-0.5,-0.5),n.lineTo(i.width-b*t-0.5,i.height);n.closePath();n.stroke();n.beginPath();n.strokeStyle="rgba(255,255,0,0.5)";for(b=0;b<=x;b++)n.moveTo(b*t+0.5,-0.5),n.lineTo(b*t+0.5,i.height);n.stroke();a.linesDrawn=true})()}},styleCueCanvas:function(b){var f, +a,d=b._captionatorOptions instanceof Object?b._captionatorOptions:{};if(!(b instanceof HTMLVideoElement))throw Error("Cannot style a cue canvas for a non-video node!");if(b._containerObject)a=b._containerObject,f=a.id;if(a)a.parentNode||document.body.appendChild(a);else{a=document.createElement("div");a.className="captionator-cue-canvas";f=c.generateID();a.id=f;if(d.appendCueCanvasTo){var e=null;if(d.appendCueCanvasTo instanceof HTMLElement)e=d.appendCueCanvasTo;else if(typeof d.appendCueCanvasTo=== +"string")try{var g=document.querySelectorAll(d.appendCueCanvasTo);if(g.length>0)e=g[0];else throw null;}catch(h){e=document.body,d.appendCueCanvasTo=false}else e=document.body,d.appendCueCanvasTo=false;e.appendChild(a)}else document.body.appendChild(a);b._containerObject=a;a.setAttribute("aria-live","polite");a.setAttribute("aria-atomic","true")}String(b.getAttribute("aria-describedby")).indexOf(f)===-1&&(e=b.hasAttribute("aria-describedby")?b.getAttribute("aria-describedby")+" ":"",b.setAttribute("aria-describedby", +e+f));e=c.getNodeMetrics(b);b=e.height*(D/100)/96*72;b=b>=A?b:A;f=Math.floor(b*E);f=f>B?f:B;c.applyStyles(a,{position:"absolute",overflow:"hidden",zIndex:100,height:e.height-e.controlHeight+"px",width:e.width+"px",top:(d.appendCueCanvasTo?0:e.top)+"px",left:(d.appendCueCanvasTo?0:e.left)+"px",color:"white",fontFamily:"Verdana, Helvetica, Arial, sans-serif",fontSize:b+"pt",lineHeight:f+"pt",boxSizing:"border-box"});if(window.navigator.userAgent.toLowerCase().indexOf("chrome/10")>-1)a.style.backgroundColor= +"rgba(0,0,0,0.01"+Math.random().toString().replace(".","")+")"},parseCaptions:function(b,f){var f=f instanceof Object?f:{},a="",d=[],e="",g=[],h=/^(\d{2})?:?(\d{2}):(\d{2})\.(\d+)\,(\d{2})?:?(\d{2}):(\d{2})\.(\d+)\s*(.*)/,k=/^(\d+)?:?(\d{2}):(\d{2})\.(\d+)\,(\d+)?:?(\d{2}):(\d{2})\.(\d+)\s*(.*)/,z=/^(\d{2})?:?(\d{2}):(\d{2})[\.\,](\d+)\s+\-\-\>\s+(\d{2})?:?(\d{2}):(\d{2})[\.\,](\d+)\s*(.*)/,m=/(\d{2})?:?(\d{2}):(\d{2})[\.\,](\d+)/,o=/^([\d\.]+)\s+\+([\d\.]+)\s*(.*)/,l=/^\[(\d{2})?:?(\d{2})\:(\d{2})\.(\d{2})\]\s*(.*?)$/i, +q=/^(DEFAULTS|DEFAULT)\s+\-\-\>\s+(.*)/g,u=/^(STYLE|STYLES)\s+\-\-\>\s*\n([\s\S]*)/g,x=/^(COMMENT|COMMENTS)\s+\-\-\>\s+(.*)/g;if(b){var p=function(a){var b=new c.CaptionatorCueStructure(a,f),d=[],e,g,h,j=[];h=0;var k=function(a){return!!a.replace(/[^a-z0-9]+/ig,"").length},d=a.split(/(<\/?[^>]+>)/ig).filter(function(a){return!!a.replace(/\s*/ig,"")});h=b;for(e in d)if(d.hasOwnProperty(e))if(g=d[e],g.substr(0,1)==="<")if(g.substr(1,1)==="/"){if(a=g.substr(2).split(/[\s>]+/g)[0],j.length>0){g=0;for(h= +j.length-1;h>=0;h--){var o=j[h][j[h].length-1];g=h;if(o.token===a)break}h=j[g];j=j.slice(0,g)}}else{if(g.substr(1).match(m)||g.match(/^]+>/i)||g.match(/^/)||g.match(/^<(b|i|u|ruby|rt)>/)||f.sanitiseCueHTML!==false){a={token:g.replace(/[<\/>]+/ig,"").split(/[\s\.]+/)[0],rawToken:g,children:[]};if(a.token==="v")a.voice=g.match(/^]+)>/i)[1];else if(a.token==="c")a.classes=g.replace(/[<\/>\s]+/ig,"").split(/[\.]+/ig).slice(1).filter(k);else if(g=a.rawToken.match(m))b.isTimeDependent= +true,g=g.slice(1),a.timeIn=parseInt((g[0]||0)*3600,10)+parseInt((g[1]||0)*60,10)+parseInt(g[2]||0,10)+parseFloat("0."+(g[3]||0));h.push(a);j.push(h);h=a.children}}else f.sanitiseCueHTML!==false&&(g=g.replace(//g,">").replace(/\&/g,"&"),f.ignoreWhitespace||(g=g.replace(/\n+/g,"
"))),h.push(g);return b},d=b.replace(/\r\n/g,"\n").replace(/\r/g,"\n");l.exec(b)?(d=d.split(/\n+/g),a="LRC"):d=d.split(/\n\n+/g);return d=d.filter(function(b){return b.match(/^WEBVTT(\s*FILE)?/ig)? +(a="WebVTT",false):b.replace(/\s*/ig,"").length?true:false}).map(function(b,d){var l,m,y,v,j,s="",r;if(r=q.exec(b))return g=r.slice(2).join(""),g=g.split(/\s+/g).filter(function(a){return a&&!!a.length}),null;else if(r=u.exec(b))return e+=r[r.length-1],null;else if(r=x.exec(b))return null;for(l=a==="LRC"?[b.substr(0,b.indexOf("]")),b.substr(b.indexOf("]")+1)]:b.split(/\n/g);!l[0].replace(/\s+/ig,"").length&&l.length>0;)l.shift();for(r=l[0].match(/^\s*[a-z0-9]+\s*$/ig)?String(l.shift().replace(/\s*/ig, +"")):d;0 0 && value > captionator.TextTrack.OFF) { + this.loadTrack(this.src,null); + } + + // Refresh all captions on video + this.videoNode._captionator_dirtyBit = true; + captionator.rebuildCaptions(this.videoNode); + + if (value === captionator.TextTrack.OFF) { + // make sure the resource is reloaded next time (Is this correct behaviour?) + this.cues.length = 0; // Destroy existing cue data (bugfix) + this.readyState = captionator.TextTrack.NONE; + } + } + } else { + throw new Error("Illegal mode value for track: " + value); + } + }; + + // Create getter for default + this.getDefault = function() { + return this.internalDefault; + }; + + if (Object.prototype.__defineGetter__) { + this.__defineGetter__("mode", this.getMode); + this.__defineSetter__("mode", this.setMode); + this.__defineGetter__("default", this.getDefault); + } else if (Object.defineProperty) { + Object.defineProperty(this,"mode", + {get: this.getMode, set: this.setMode} + ); + Object.defineProperty(this,"default", + {get: this.getDefault} + ); + } + + this.loadTrack = function(source, callback) { + var captionData, ajaxObject = new XMLHttpRequest(); + if (this.readyState === captionator.TextTrack.LOADED) { + if (callback instanceof Function) { + callback(captionData); + } + } else { + this.src = source; + this.readyState = captionator.TextTrack.LOADING; + + var currentTrackElement = this; + ajaxObject.open('GET', source, true); + ajaxObject.onreadystatechange = function (eventData) { + if (ajaxObject.readyState === 4) { + if(ajaxObject.status === 200) { + var TrackProcessingOptions = currentTrackElement.videoNode._captionatorOptions || {}; + if (currentTrackElement.kind === "metadata") { + // People can load whatever data they please into metadata tracks. + // Don't process it. + TrackProcessingOptions.processCueHTML = false; + TrackProcessingOptions.sanitiseCueHTML = false; + } + + captionData = captionator.parseCaptions(ajaxObject.responseText,TrackProcessingOptions); + currentTrackElement.readyState = captionator.TextTrack.LOADED; + currentTrackElement.cues.loadCues(captionData); + currentTrackElement.activeCues.refreshCues.apply(currentTrackElement.activeCues); + currentTrackElement.videoNode._captionator_dirtyBit = true; + captionator.rebuildCaptions(currentTrackElement.videoNode); + currentTrackElement.onload.call(this); + + if (callback instanceof Function) { + callback.call(currentTrackElement,captionData); + } + } else { + // Throw error handler, if defined + currentTrackElement.readyState = captionator.TextTrack.ERROR; + currentTrackElement.onerror(); + } + } + }; + try { + ajaxObject.send(null); + } catch(Error) { + // Throw error handler, if defined + currentTrackElement.readyState = captionator.TextTrack.ERROR; + currentTrackElement.onerror(Error); + } + } + }; + + // mutableTextTrack.addCue(cue) + // Adds the given cue to mutableTextTrack's text track list of cues. + // Raises an exception if the argument is null, associated with another text track, or already in the list of cues. + + this.addCue = function(cue) { + if (cue && cue instanceof captionator.TextTrackCue) { + this.cues.addCue(cue); + } else { + throw new Error("The argument is null or not an instance of TextTrackCue."); + } + }; + + // mutableTextTrack.removeCue(cue) + // Removes the given cue from mutableTextTrack's text track list of cues. + // Raises an exception if the argument is null, associated with another text track, or not in the list of cues. + + this.removeCue = function() { + + }; + }; + // Define constants for TextTrack.readyState + captionator.TextTrack.NONE = 0; + captionator.TextTrack.LOADING = 1; + captionator.TextTrack.LOADED = 2; + captionator.TextTrack.ERROR = 3; + // Define constants for TextTrack.mode + captionator.TextTrack.OFF = 0; + captionator.TextTrack.HIDDEN = 1; + captionator.TextTrack.SHOWING = 2; + + // Define read-only properties + /** + * @constructor + */ + captionator.TextTrackCueList = function TextTrackCueList(track) { + this.track = track instanceof captionator.TextTrack ? track : null; + + this.getCueById = function(cueID) { + return this.filter(function(currentCue) { + return currentCue.id === cueID; + })[0]; + }; + + this.loadCues = function(cueData) { + for (var cueIndex = 0; cueIndex < cueData.length; cueIndex ++) { + cueData[cueIndex].track = this.track; + Array.prototype.push.call(this,cueData[cueIndex]); + } + }; + + this.addCue = function(cue) { + if (cue && cue instanceof captionator.TextTrackCue) { + if (cue.track === this.track || !cue.track) { + // TODO: Check whether cue is already in list of cues. + // TODO: Sort cue list based on TextTrackCue.startTime. + Array.prototype.push.call(this,cue); + } else { + throw new Error("This cue is associated with a different track!"); + } + } else { + throw new Error("The argument is null or not an instance of TextTrackCue."); + } + }; + + this.toString = function() { + return "[TextTrackCueList]"; + }; + }; + captionator.TextTrackCueList.prototype = []; + + /** + * @constructor + */ + captionator.ActiveTextTrackCueList = function ActiveTextTrackCueList(textTrackCueList,textTrack) { + // Among active cues: + + // The text track cues of a media element's text tracks are ordered relative to each + // other in the text track cue order, which is determined as follows: first group the + // cues by their text track, with the groups being sorted in the same order as their + // text tracks appear in the media element's list of text tracks; then, within each + // group, cues must be sorted by their start time, earliest first; then, any cues with + // the same start time must be sorted by their end time, earliest first; and finally, + // any cues with identical end times must be sorted in the order they were created (so + // e.g. for cues from a WebVTT file, that would be the order in which the cues were + // listed in the file). + + this.refreshCues = function() { + if (textTrackCueList.length) { + var cueList = this; + var cueListChanged = false; + var oldCueList = [].slice.call(this,0); + this.length = 0; + + textTrackCueList.forEach(function(cue) { + if (cue.active) { + cueList.push(cue); + + if (cueList[cueList.length-1] !== oldCueList[cueList.length-1]) { + cueListChanged = true; + } + } + }); + + if (cueListChanged) { + try { + textTrack.oncuechange(); + } catch(error){} + } + } + }; + + this.toString = function() { + return "[ActiveTextTrackCueList]"; + }; + + this.refreshCues(); + }; + captionator.ActiveTextTrackCueList.prototype = new captionator.TextTrackCueList(null); + + /** + * @constructor + */ + captionator.TextTrackCue = function TextTrackCue(id, startTime, endTime, text, settings, pauseOnExit, track) { + // Set up internal data store + this.id = id; + this.track = track instanceof captionator.TextTrack ? track : null; + this.startTime = parseFloat(startTime); + this.endTime = parseFloat(endTime); + this.text = typeof(text) === "string" || text instanceof captionator.CaptionatorCueStructure ? text : ""; + this.settings = typeof(settings) === "string" ? settings : ""; + this.intSettings = {}; + this.pauseOnExit = !!pauseOnExit; + this.wasActive = false; + + // Parse settings & set up cue defaults + + // A writing direction, either horizontal (a line extends horizontally and is positioned vertically, + // with consecutive lines displayed below each other), vertical growing left (a line extends vertically + // and is positioned horizontally, with consecutive lines displayed to the left of each other), or + // vertical growing right (a line extends vertically and is positioned horizontally, with consecutive + // lines displayed to the right of each other). + // Values: + // horizontal, vertical, vertical-lr + this.direction = "horizontal"; + + // A boolean indicating whether the line's position is a line position (positioned to a multiple of the + // line dimensions of the first line of the cue), or whether it is a percentage of the dimension of the video. + this.snapToLines = true; + + // Either a number giving the position of the lines of the cue, to be interpreted as defined by the + // writing direction and snap-to-lines flag of the cue, or the special value auto, which means the + // position is to depend on the other active tracks. + this.linePosition = "auto"; + + // A number giving the position of the text of the cue within each line, to be interpreted as a percentage + // of the video, as defined by the writing direction. + this.textPosition = 50; + + // A number giving the size of the box within which the text of each line of the cue is to be aligned, to + // be interpreted as a percentage of the video, as defined by the writing direction. + this.size = 0; + + // An alignment for the text of each line of the cue, either start alignment (the text is aligned towards its + // start side), middle alignment (the text is aligned centered between its start and end sides), end alignment + // (the text is aligned towards its end side). Which sides are the start and end sides depends on the + // Unicode bidirectional algorithm and the writing direction. [BIDI] + // Values: + // start, middle, end + this.alignment = "middle"; + + // Parse VTT Settings... + if (this.settings.length) { + var intSettings = this.intSettings; + var currentCue = this; + settings = settings.split(/\s+/).filter(function(settingItem) { return settingItem.length > 0;}); + if (settings instanceof Array) { + settings.forEach(function(cueItem) { + var settingMap = {"D":"direction","L":"linePosition","T":"textPosition","A":"alignment","S":"size"}; + cueItem = cueItem.split(":"); + if (settingMap[cueItem[0]]) { + intSettings[settingMap[cueItem[0]]] = cueItem[1]; + } + + if (settingMap[cueItem[0]] in currentCue) { + currentCue[settingMap[cueItem[0]]] = cueItem[1]; + } + }); + } + } + + if (this.linePosition.match(/\%/)) { + this.snapToLines = false; + } + + // Functions defined by spec (getters, kindof) + this.getCueAsSource = function getCueAsSource() { + // Choosing the below line instead will mean that the raw, unprocessed source will be returned instead. + // Not really sure which is the correct behaviour. + // return this.text instanceof captionator.CaptionatorCueStructure? this.text.cueSource : this.text; + return String(this.text); + }; + + this.getCueAsHTML = function getCueAsHTML() { + var DOMFragment = document.createDocumentFragment(); + var DOMNode = document.createElement("div"); + DOMNode.innerHTML = String(this.text); + + Array.prototype.forEach.call(DOMNode.childNodes,function(child) { + DOMFragment.appendChild(child.cloneNode(true)); + }); + + return DOMFragment; + }; + + this.isActive = function() { + var currentTime = 0; + if (this.track instanceof captionator.TextTrack) { + if ((this.track.mode === captionator.TextTrack.SHOWING || this.track.mode === captionator.TextTrack.HIDDEN) && this.track.readyState === captionator.TextTrack.LOADED) { + try { + currentTime = this.track.videoNode.currentTime; + + if (this.startTime <= currentTime && this.endTime >= currentTime) { + // Fire enter event if we were not active and now are + if (!this.wasActive) { + this.wasActive = true; + this.onenter(); + } + + return true; + } + } catch(Error) { + return false; + } + } + } + + // Fire exit event if we were active and now are not + if (this.wasActive) { + this.wasActive = false; + this.onexit(); + } + + return false; + }; + + if (Object.prototype.__defineGetter__) { + this.__defineGetter__("active", this.isActive); + } else if (Object.defineProperty) { + Object.defineProperty(this,"active", + {get: this.isActive} + ); + } + + this.toString = function toString() { + return "TextTrackCue:" + this.id + "\n" + String(this.text); + }; + + // Events defined by spec + this.onenter = function() {}; + this.onexit = function() {}; + }; + + // Captionator media extensions + /** + * @constructor + */ + captionator.MediaTrack = function MediaTrack(id,kind,label,language,src,type,isDefault) { + // This function is under construction! + // Eventually, the idea is that captionator will support timed video and audio tracks in addition to text subtitles + + var getSupportedMediaSource = function(sources) { + // Thanks Mr Pilgrim! :) + var supportedSource = sources + .filter(function(source,index) { + try { + var mediaElement = document.createElement(source.getAttribute("type").split("/").shift()); + return !!(mediaElement.canPlayType && mediaElement.canPlayType(source.getAttribute("type")).replace(/no/, '')); + } catch(Error) { + // (The type fragment before the / probably didn't match to 'video' or 'audio'. So... we don't support it.) + return false; + } + }) + .shift() + .getAttribute("src"); + + return supportedSource; + }; + + this.onload = function () {}; + this.onerror = function() {}; + + this.id = id || ""; + this.internalMode = captionator.TextTrack.OFF; + this.internalMode = captionator.TextTrack.OFF; + this.mediaElement = null; + this.kind = kind || "audiodescription"; + this.label = label || ""; + this.language = language || ""; + this.readyState = captionator.TextTrack.NONE; + this.type = type || "x/unknown"; // MIME type + this.mediaType = null; + this.src = ""; + + if (typeof(src) === "string") { + this.src = src; + } else if (src instanceof NodeList) { + this.src = getSupportedMediaSource(src); + } + + if (this.type.match(/^video\//)) { + this.mediaType = "video"; + } else if (this.type.match(/^audio\//)) { + this.mediaType = "audio"; + } + + // Create getters and setters for mode + this.getMode = function() { + return this.internalMode; + }; + + this.setMode = function(value) { + var allowedModes = [captionator.TextTrack.OFF,captionator.TextTrack.HIDDEN,captionator.TextTrack.SHOWING], containerID, container; + if (allowedModes.indexOf(value) !== -1) { + if (value !== this.internalMode) { + this.internalMode = value; + if (value === captionator.TextTrack.HIDDEN && !this.mediaElement) { + this.buildMediaElement(); + } + + if (value === captionator.TextTrack.SHOWING) { + this.showMediaElement(); + } + + if (value === captionator.TextTrack.OFF || value === captionator.TextTrack.HIDDEN) { + this.hideMediaElement(); + } + } + } else { + throw new Error("Illegal mode value for track."); + } + }; + + if (Object.prototype.__defineGetter__) { + this.__defineGetter__("mode", this.getMode); + this.__defineSetter__("mode", this.setMode); + } else if (Object.defineProperty) { + Object.defineProperty(this,"mode", + {get: this.getMode, set: this.setMode} + ); + } + + this.hideMediaElement = function() { + if (this.mediaElement) { + if (!this.mediaElement.paused) { + this.mediaElement.pause(); + } + + if (this.mediaElement instanceof HTMLVideoElement) { + this.mediaElement.style.display = "none"; + } + } + }; + + this.showMediaElement = function() { + if (!this.mediaElement) { + this.buildMediaElement(); + document.body.appendChild(this.mediaElement); + } else { + if (!this.mediaElement.parentNode) { + document.body.appendChild(this.mediaElement); + } + + if (this.mediaElement instanceof HTMLVideoElement) { + this.mediaElement.style.display = "block"; + } + } + }; + + this.buildMediaElement = function() { + try { + if (this.type.match(/^video\//)) { + this.mediaElement = document.createElement("video"); + this.mediaElement.className = "captionator-mediaElement-" + this.kind; + captionator.styleNode(this.mediaElement,this.kind,this.videoNode); + + } else if (this.type.match(/^audio\//)) { + this.mediaElement = new Audio(); + } + + this.mediaElement.type = this.type; + this.mediaElement.src = this.src; + this.mediaElement.load(); + this.mediaElement.trackObject = this; + this.readyState = captionator.TextTrack.LOADING; + var mediaElement = this.mediaElement; + + this.mediaElement.addEventListener("progress",function(eventData) { + mediaElement.trackObject.readyState = captionator.TextTrack.LOADING; + },false); + + this.mediaElement.addEventListener("canplaythrough",function(eventData) { + mediaElement.trackObject.readyState = captionator.TextTrack.LOADED; + mediaElement.trackObject.onload.call(mediaElement.trackObject); + },false); + + this.mediaElement.addEventListener("error",function(eventData) { + mediaElement.trackObject.readyState = captionator.TextTrack.ERROR; + mediaElement.trackObject.mode = captionator.TextTrack.OFF; + mediaElement.trackObject.mediaElement = null; + mediaElement.trackObject.onerror.call(mediaElement.trackObject,eventData); + },false); + + } catch(Error) { + this.readyState = captionator.TextTrack.ERROR; + this.mode = captionator.TextTrack.OFF; + this.mediaElement = null; + + if (this.onerror) { + this.onerror.apply(this,Error); + } + } + }; + }; + + // Captionator internal cue structure object + /** + * @constructor + */ + captionator.CaptionatorCueStructure = function CaptionatorCueStructure(cueSource,options) { + var cueStructureObject = this; + this.isTimeDependent = false; + this.cueSource = cueSource; + this.options = options; + this.processedCue = null; + this.toString = function toString(currentTimestamp) { + if (options.processCueHTML !== false) { + var processLayer = function(layerObject,depth) { + if (cueStructureObject.processedCue === null) { + var compositeHTML = "", itemIndex, cueChunk; + for (itemIndex in layerObject) { + if (itemIndex.match(/^\d+$/) && layerObject.hasOwnProperty(itemIndex)) { + // We're not a prototype function or local property, and we're in range + cueChunk = layerObject[itemIndex]; + // Don't generate text from the token if it has no contents + if (cueChunk instanceof Object && cueChunk.children && cueChunk.children.length) { + if (cueChunk.token === "v") { + compositeHTML +="" + + processLayer(cueChunk.children,depth+1) + + ""; + } else if(cueChunk.token === "c") { + compositeHTML +="" + + processLayer(cueChunk.children,depth+1) + + ""; + } else if(cueChunk.timeIn > 0) { + // If a timestamp is unspecified, or the timestamp suggests this token is valid to display, return it + if ((currentTimestamp === null || currentTimestamp === undefined) || + (currentTimestamp > 0 && currentTimestamp >= cueChunk.timeIn)) { + + compositeHTML +="" + + processLayer(cueChunk.children,depth+1) + + ""; + } + } else { + compositeHTML +=cueChunk.rawToken + + processLayer(cueChunk.children,depth+1) + + ""; + } + } else if (cueChunk instanceof String || typeof(cueChunk) === "string" || typeof(cueChunk) === "number") { + compositeHTML += cueChunk; + } else { + // Didn't match - file a bug! + } + } + } + + if (!cueStructureObject.isTimeDependent && depth === 0) { + cueStructureObject.processedCue = compositeHTML; + } + + return compositeHTML; + } else { + return cueStructureObject.processedCue; + } + }; + return processLayer(this,0); + } else { + return cueSource; + } + }; + }; + captionator.CaptionatorCueStructure.prototype = []; + + // if requested by options, export the object types + if (options.exportObjects) { + window.TextTrack = captionator.TextTrack; + window.TextTrackCueList = captionator.TextTrackCueList; + window.ActiveTextTrackCueList = captionator.ActiveTextTrackCueList; + window.TextTrackCue = captionator.TextTrackCue; + window.MediaTrack = captionator.MediaTrack; + } + + // Next time captionator.captionify() is called, the objects are already available to us. + objectsCreated = true; + } + + [].slice.call(document.getElementsByTagName("video"),0).forEach(function(videoElement) { + videoElement.addTextTrack = function(id,kind,label,language,src,type,isDefault) { + var allowedKinds = ["subtitles","captions","descriptions","captions","metadata","chapters", // WHATWG SPEC + "karaoke","lyrics","tickertext", // CAPTIONATOR TEXT EXTENSIONS + "audiodescription","commentary", // CAPTIONATOR AUDIO EXTENSIONS + "alternate","signlanguage"]; // CAPTIONATOR VIDEO EXTENSIONS + + var textKinds = allowedKinds.slice(0,7); + var newTrack; + id = typeof(id) === "string" ? id : ""; + label = typeof(label) === "string" ? label : ""; + language = typeof(language) === "string" ? language : ""; + isDefault = typeof(isDefault) === "boolean" ? isDefault : false; // Is this track set as the default? + + // If the kind isn't known, throw DOM syntax error exception + if (!allowedKinds.filter(function (currentKind){ + return kind === currentKind ? true : false; + }).length) { + throw captionator.createDOMException(12,"DOMException 12: SYNTAX_ERR: You must use a valid kind when creating a TimedTextTrack.","SYNTAX_ERR"); + } + + if (textKinds.filter(function (currentKind){ + return kind === currentKind ? true : false; + }).length) { + newTrack = new captionator.TextTrack(id,kind,label,language,src,null); + if (newTrack) { + if (!(videoElement.tracks instanceof Array)) { + videoElement.tracks = []; + } + + videoElement.tracks.push(newTrack); + return newTrack; + } else { + return false; + } + } else { + newTrack = new captionator.MediaTrack(id,kind,label,language,src,type,isDefault); + if (newTrack) { + if (!(videoElement.mediaTracks instanceof Array)) { + videoElement.mediaTracks = []; + } + + videoElement.mediaTracks.push(newTrack); + return newTrack; + } else { + return false; + } + } + }; + }); + + + if (!element || element === false || element === undefined || element === null) { + videoElements = [].slice.call(document.getElementsByTagName("video"),0); // select and convert to array + } else { + if (element instanceof Array) { + for (elementIndex = 0; elementIndex < element.length; elementIndex ++) { + if (typeof(element[elementIndex]) === "string") { + videoElements = videoElements.concat([].slice.call(document.querySelectorAll(element[elementIndex]),0)); // select and convert to array + } else if (element[elementIndex].constructor === HTMLVideoElement) { + videoElements.push(element[elementIndex]); + } + } + } else if (typeof(element) === "string") { + videoElements = [].slice.call(document.querySelectorAll(element),0); // select and convert to array + } else if (element.constructor === HTMLVideoElement) { + videoElements.push(element); + } + } + + if (videoElements.length) { + for (elementIndex = 0; elementIndex < videoElements.length; elementIndex ++) { + captionator.processVideoElement(videoElements[elementIndex],defaultLanguage,options); + } + return true; + } else { + return false; + } + }, + /* + captionator.processVideoElement(videoElement , + [defaultLanguage - string in BCP47], + [options - JS Object]) + + Processes track items within an HTMLVideoElement. The second and third parameter are both optional. + + First parameter: Mandatory HTMLVideoElement object. + + Second parameter: BCP-47 string for default language. If this parameter is omitted, the User Agent's language + will be used to choose a track. + + Third parameter: as yet unused - will implement animation settings and some other global options with this + parameter later. + + RETURNS: + + Reference to the HTMLVideoElement. + + + */ + "processVideoElement": function(videoElement,defaultLanguage,options) { + var trackList = []; + var language = navigator.language || navigator.userLanguage; + var globalLanguage = defaultLanguage || language.split("-")[0]; + options = options instanceof Object? options : {}; + + if (!videoElement.captioned) { + videoElement._captionatorOptions = options; + videoElement.className += (videoElement.className.length ? " " : "") + "captioned"; + videoElement.captioned = true; + + // Check whether video element has an ID. If not, create one + if (videoElement.id.length === 0) { + videoElement.id = captionator.generateID(); + } + + var enabledDefaultTrack = false; + [].slice.call(videoElement.querySelectorAll("track"),0).forEach(function(trackElement) { + var sources = null; + if (trackElement.querySelectorAll("source").length > 0) { + sources = trackElement.querySelectorAll("source"); + } else { + sources = trackElement.getAttribute("src"); + } + + var trackObject = videoElement.addTextTrack( + (trackElement.getAttribute("id")||captionator.generateID()), + trackElement.getAttribute("kind"), + trackElement.getAttribute("label"), + trackElement.getAttribute("srclang").split("-")[0], + sources, + trackElement.getAttribute("type"), + trackElement.hasAttribute("default")); // (Christopher) I think we can get away with this given it's a boolean attribute anyway + + trackElement.track = trackObject; + trackObject.trackNode = trackElement; + trackObject.videoNode = videoElement; + trackList.push(trackObject); + + // Now determine whether the track is visible by default. + // The comments in this section come straight from the spec... + var trackEnabled = false; + + // If the text track kind is subtitles or captions and the user has indicated an interest in having a track + // with this text track kind, text track language, and text track label enabled, and there is no other text track + // in the media element's list of text tracks with a text track kind of either subtitles or captions whose text track mode is showing + // ---> Let the text track mode be showing. + + if ((trackObject.kind === "subtitles" || trackObject.kind === "captions") && + (defaultLanguage === trackObject.language && options.enableCaptionsByDefault)) { + if (!trackList.filter(function(trackObject) { + if ((trackObject.kind === "captions" || trackObject.kind === "subtitles") && defaultLanguage === trackObject.language && trackObject.mode === captionator.TextTrack.SHOWING) { + return true; + } else { + return false; + } + }).length) { + trackEnabled = true; + } + } + + // If the text track kind is chapters and the text track language is one that the user agent has reason to believe is + // appropriate for the user, and there is no other text track in the media element's list of text tracks with a text track + // kind of chapters whose text track mode is showing + // ---> Let the text track mode be showing. + + if (trackObject.kind === "chapters" && (defaultLanguage === trackObject.language)) { + if (!trackList.filter(function(trackObject) { + if (trackObject.kind === "chapters" && trackObject.mode === captionator.TextTrack.SHOWING) { + return true; + } else { + return false; + } + }).length) { + trackEnabled = true; + } + } + + // If the text track kind is descriptions and the user has indicated an interest in having text descriptions + // with this text track language and text track label enabled, and there is no other text track in the media element's + // list of text tracks with a text track kind of descriptions whose text track mode is showing + + if (trackObject.kind === "descriptions" && (options.enableDescriptionsByDefault === true) && (defaultLanguage === trackObject.language)) { + if (!trackList.filter(function(trackObject) { + if (trackObject.kind === "descriptions" && trackObject.mode === captionator.TextTrack.SHOWING) { + return true; + } else { + return false; + } + }).length) { + trackEnabled = true; + } + } + + // If there is a text track in the media element's list of text tracks whose text track mode is showing by default, + // the user agent must furthermore change that text track's text track mode to hidden. + + if (trackEnabled === true) { + trackList.forEach(function(trackObject) { + if(trackObject.trackNode.hasAttribute("default") && trackObject.mode === captionator.TextTrack.SHOWING) { + trackObject.mode = captionator.TextTrack.HIDDEN; + } + }); + } + + // If the track element has a default attribute specified, and there is no other text track in the media element's + // list of text tracks whose text track mode is showing or showing by default + // Let the text track mode be showing by default. + + if (trackElement.hasAttribute("default")) { + if (!trackList.filter(function(trackObject) { + if (trackObject.trackNode.hasAttribute("default") && trackObject.trackNode !== trackElement) { + return true; + } else { + return false; + } + }).length) { + trackEnabled = true; + trackObject.internalDefault = true; + } + } + + // Otherwise + // Let the text track mode be disabled. + + if (trackEnabled === true) { + trackObject.mode = captionator.TextTrack.SHOWING; + } + }); + + videoElement.addEventListener("timeupdate", function(eventData){ + var videoElement = eventData.target; + // update active cues + try { + videoElement.tracks.forEach(function(track) { + track.activeCues.refreshCues.apply(track.activeCues); + }); + } catch(error) {} + + // External renderer? + if (options.renderer instanceof Function) { + options.renderer.call(captionator,videoElement); + } else { + captionator.rebuildCaptions(videoElement); + } + + captionator.synchroniseMediaElements(videoElement); + }, false); + + window.addEventListener("resize", function(eventData) { + videoElement._captionator_dirtyBit = true; // mark video as dirty, force captionator to rerender captions + captionator.rebuildCaptions(videoElement); + },false); + + videoElement.addEventListener("play", function(eventData){ + captionator.synchroniseMediaElements(videoElement); + },false); + + videoElement.addEventListener("pause", function(eventData){ + captionator.synchroniseMediaElements(videoElement); + },false); + + // Hires mode + if (options.enableHighResolution === true) { + window.setInterval(function captionatorHighResProcessor() { + try { + videoElement.tracks.forEach(function(track) { + track.activeCues.refreshCues.apply(track.activeCues); + }); + } catch(error) {} + + // External renderer? + if (options.renderer instanceof Function) { + options.renderer.call(captionator,videoElement); + } else { + captionator.rebuildCaptions(videoElement); + } + },20); + } + } + + return videoElement; + }, + /* + captionator.rebuildCaptions(HTMLVideoElement videoElement) + + Loops through all the TextTracks for a given element and manages their display (including generation of container elements.) + + First parameter: HTMLVideoElement object with associated TextTracks + + RETURNS: + + Nothing. + + */ + "rebuildCaptions": function(videoElement) { + var trackList = videoElement.tracks || []; + var options = videoElement._captionatorOptions instanceof Object ? videoElement._captionatorOptions : {}; + var currentTime = videoElement.currentTime; + var compositeActiveCues = []; + var cuesChanged = false; + var activeCueIDs = []; + var cueSortArray = []; + + // Work out what cues are showing... + trackList.forEach(function(track,trackIndex) { + if (track.mode === captionator.TextTrack.SHOWING && track.readyState === captionator.TextTrack.LOADED) { + cueSortArray = [].slice.call(track.activeCues,0); + + // Do a reverse sort + // Since the available cue render area is a square which decreases in size + // (away from each side of the video) with each successive cue added, + // and we want cues which are older to be displayed above cues which are newer, + // we sort active cues within each track so that older ones are rendered first. + + cueSortArray = cueSortArray.sort(function(cueA, cueB) { + if (cueA.startTime > cueB.startTime) { + return -1; + } else { + return 1; + } + }); + + compositeActiveCues = compositeActiveCues.concat(cueSortArray); + } + }); + + // Determine whether cues have changed - we generate an ID based on track ID, cue ID, and text length + activeCueIDs = compositeActiveCues.map(function(cue) {return cue.track.id + "." + cue.id + ":" + cue.text.toString(currentTime).length;}); + cuesChanged = !captionator.compareArray(activeCueIDs,videoElement._captionator_previousActiveCues); + + // If they've changed, we re-render our cue canvas. + if (cuesChanged || videoElement._captionator_dirtyBit) { + // If dirty bit was set, it certainly isn't now. + videoElement._captionator_dirtyBit = false; + + // Destroy internal tracking variable (which is used for caption rendering) + videoElement._captionator_availableCueArea = null; + + // Internal tracking variable to determine whether our composite active cue list for the video has changed + videoElement._captionator_previousActiveCues = activeCueIDs; + + // Get the canvas ready if it isn't already + captionator.styleCueCanvas(videoElement); + videoElement._containerObject.innerHTML = ""; + + // Now we render the cues + compositeActiveCues.forEach(function(cue) { + var cueNode = document.createElement("div"); + cueNode.id = String(cue.id).length ? cue.id : captionator.generateID(); + cueNode.className = "captionator-cue"; + cueNode.innerHTML = cue.text.toString(currentTime); + videoElement._containerObject.appendChild(cueNode); + captionator.styleCue(cueNode,cue,videoElement); + }); + } + }, + /* + captionator.synchroniseMediaElements(HTMLVideoElement videoElement) + + Loops through all the MediaTracks for a given element and manages their display/audibility, synchronising them to the playback of the + master video element. + + This function also synchronises regular HTML5 media elements with a property of syncMaster with a value equal to the ID of the current video + element. + + First parameter: HTMLVideoElement object with associated MediaTracks + + RETURNS: + + Nothing. + + */ + "synchroniseMediaElements": function(videoElement) { + var trackList = videoElement.mediaTracks || []; + var options = videoElement._captionatorOptions instanceof Object ? videoElement._captionatorOptions : {}; + var currentTime = videoElement.currentTime; + var synchronisationThreshold = 0.5; // How many seconds of drift will be tolerated before resynchronisation? + + var synchroniseElement = function(slave,master) { + try { + if (master.seeking) { + slave.pause(); + } + + if (slave.currentTime < master.currentTime - synchronisationThreshold || slave.currentTime > master.currentTime + synchronisationThreshold) { + slave.currentTime = master.currentTime; + } + + if (slave.paused && !master.paused) { + slave.play(); + } else if (!slave.paused && master.paused) { + slave.pause(); + } + } catch(Error) { + // Probably tried to seek to an unavailable chunk of video + } + }; + + // Work out what cues are showing... + trackList.forEach(function(track,trackIndex) { + if (track.mode === captionator.TextTrack.SHOWING && track.readyState >= captionator.TextTrack.LOADING) { + synchroniseElement(track.mediaElement,videoElement); + } + }); + + if (videoElement.id) { + [].slice.call(document.body.querySelectorAll("*[syncMaster=" + videoElement.id + "]"),0).forEach(function(mediaElement,index) { + if (mediaElement.tagName.toLowerCase() === "video" || mediaElement.tagName.toLowerCase() === "audio") { + synchroniseElement(mediaElement,videoElement); + } + }); + } + }, + /* + captionator.getNodeMetrics(DOMNode) + + Calculates and returns a number of sizing and position metrics from a DOMNode of any variety (though this function is intended + to be used with HTMLVideoElements.) Returns the height of the default controls on a video based on user agent detection + (As far as I know, there's no way to dynamically calculate the height of browser UI controls on a video.) + + First parameter: DOMNode from which to calculate sizing metrics. This parameter is mandatory. + + RETURNS: + + An object with the following properties: + left: The calculated left offset of the node + top: The calculated top offset of the node + height: The calculated height of the node + width: The calculated with of the node + controlHeight: If the node is a video and has the `controls` attribute present, the height of the UI controls for the video. Otherwise, zero. + + */ + "getNodeMetrics": function(DOMNode) { + var nodeComputedStyle = window.getComputedStyle(DOMNode,null); + var offsetObject = DOMNode; + var offsetTop = DOMNode.offsetTop, offsetLeft = DOMNode.offsetLeft; + var width = DOMNode, height = 0; + var controlHeight = 0; + + width = parseInt(nodeComputedStyle.getPropertyValue("width"),10); + height = parseInt(nodeComputedStyle.getPropertyValue("height"),10); + + // Slightly verbose expression in order to pass JSHint + while (!!(offsetObject = offsetObject.offsetParent)) { + offsetTop += offsetObject.offsetTop; + offsetLeft += offsetObject.offsetLeft; + } + + if (DOMNode.hasAttribute("controls")) { + // Get heights of default control strip in various browsers + // There could be a way to measure this live but I haven't thought/heard of it yet... + var UA = navigator.userAgent.toLowerCase(); + if (UA.indexOf("chrome") !== -1) { + controlHeight = 32; + } else if (UA.indexOf("opera") !== -1) { + controlHeight = 25; + } else if (UA.indexOf("firefox") !== -1) { + controlHeight = 28; + } else if (UA.indexOf("ie 9") !== -1 || UA.indexOf("ipad") !== -1) { + controlHeight = 44; + } else if (UA.indexOf("safari") !== -1) { + controlHeight = 25; + } + } else if (DOMNode._captionatorOptions) { + var tmpCaptionatorOptions = DOMNode._captionatorOptions; + if (tmpCaptionatorOptions.controlHeight) { + controlHeight = parseInt(tmpCaptionatorOptions.controlHeight,10); + } + } + + return { + left: offsetLeft, + top: offsetTop, + width: width, + height: height, + controlHeight: controlHeight + }; + }, + /* + captionator.applyStyles(DOMNode, Style Object) + + A fast way to apply multiple CSS styles to a DOMNode. + + First parameter: DOMNode to style. This parameter is mandatory. + + Second parameter: A key/value object where the keys are camel-cased variants of CSS property names to apply, + and the object values are CSS property values as per the spec. This parameter is mandatory. + + RETURNS: + + Nothing. + + */ + "applyStyles": function(StyleNode, styleObject) { + for (var styleName in styleObject) { + if ({}.hasOwnProperty.call(styleObject, styleName)) { + StyleNode.style[styleName] = styleObject[styleName]; + } + } + }, + /* + captionator.checkDirection(text) + + Determines whether the text string passed into the function is an RTL (right to left) or LTR (left to right) string. + + First parameter: Text string to check. This parameter is mandatory. + + RETURNS: + + The text string 'rtl' if the text is a right to left string, 'ltr' if the text is a left to right string, or an empty string + if the direction could not be determined. + + */ + "checkDirection": function(text) { + // Inspired by http://www.frequency-decoder.com/2008/12/12/automatically-detect-rtl-text + // Thanks guys! + var ltrChars = 'A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02B8\u0300-\u0590\u0800-\u1FFF'+'\u2C00-\uFB1C\uFDFE-\uFE6F\uFEFD-\uFFFF', + rtlChars = '\u0591-\u07FF\uFB1D-\uFDFD\uFE70-\uFEFC', + ltrDirCheckRe = new RegExp('^[^'+rtlChars+']*['+ltrChars+']'), + rtlDirCheckRe = new RegExp('^[^'+ltrChars+']*['+rtlChars+']'); + + return !!rtlDirCheckRe.test(text) ? 'rtl' : (!!ltrDirCheckRe.test(text) ? 'ltr' : ''); + }, + /* + captionator.styleCue(DOMNode, cueObject, videoNode) + + Styles and positions cue nodes according to the WebVTT specification. + + First parameter: The DOMNode representing the cue to style. This parameter is mandatory. + + Second parameter: The TextTrackCue itself. + + Third Parameter: The HTMLVideoElement with which the cue is associated. This parameter is mandatory. + + RETURNS: + + Nothing. + + */ + "styleCue": function(DOMNode, cueObject, videoElement) { + // Variables for maintaining render calculations + var cueX = 0, cueY = 0, cueWidth = 0, cueHeight = 0, cueSize, cueAlignment, cuePaddingLR = 0, cuePaddingTB = 0; + var baseFontSize, basePixelFontSize, baseLineHeight, tmpHeightExclusions; + var videoHeightInLines, videoWidthInLines, pixelLineHeight, verticalPixelLineHeight, charactersPerLine = 0, characterCount = 0; + var characters = 0, lineCount = 0, finalLineCharacterCount = 0, finalLineCharacterHeight = 0, currentLine = 0; + var characterX, characterY, characterPosition = 0; + var options = videoElement._captionatorOptions || {}; + var videoMetrics; + var maxCueSize = 100, internalTextPosition = 50, textBoundingBoxWidth = 0, textBoundingBoxPercentage = 0, autoSize = true; + + // Function to facilitate vertical text alignments in browsers which do not support writing-mode + // (sadly, all the good ones!) + var spanify = function(DOMNode) { + var stringHasLength = function(textString) { return !!textString.length; }; + var spanCode = ""; + var nodeIndex, currentNode, currentNodeValue, replacementFragment, characterCount = 0; + var styleSpan = function(span) { + characterCount ++; + captionator.applyStyles(span,{ + "display": "block", + "lineHeight": "auto", + "height": basePixelFontSize + "px", + "width": verticalPixelLineHeight + "px", + "textAlign": "center" + }); + }; + + for (nodeIndex in DOMNode.childNodes) { + if (DOMNode.childNodes.hasOwnProperty(nodeIndex)) { + currentNode = DOMNode.childNodes[nodeIndex]; + if (currentNode.nodeType === 3) { + replacementFragment = document.createDocumentFragment(); + currentNodeValue = currentNode.nodeValue; + + replacementFragment.appendChild(document.createElement("span")); + + replacementFragment.childNodes[0].innerHTML = + spanCode + + currentNodeValue + .split(/(.)/) + .filter(stringHasLength) + .join("" + spanCode) + + ""; + + [].slice.call(replacementFragment.querySelectorAll("span.captionator-cue-character"),0).forEach(styleSpan); + + currentNode.parentNode.replaceChild(replacementFragment,currentNode); + } else if (DOMNode.childNodes[nodeIndex].nodeType === 1) { + characterCount += spanify(DOMNode.childNodes[nodeIndex]); + } + } + } + + return characterCount; + }; + + // Set up the cue canvas + videoMetrics = captionator.getNodeMetrics(videoElement); + + // Define storage for the available cue area, diminished as further cues are added + // Cues occupy the largest possible area they can, either by width or height + // (depending on whether the `direction` of the cue is vertical or horizontal) + // Cues which have an explicit position set do not detract from this area. + // It is the subtitle author's responsibility to ensure they don't overlap if + // they decide to override default positioning! + + if (!videoElement._captionator_availableCueArea) { + videoElement._captionator_availableCueArea = { + "bottom": (videoMetrics.height-videoMetrics.controlHeight), + "right": videoMetrics.width, + "top": 0, + "left": 0, + "height": (videoMetrics.height-videoMetrics.controlHeight), + "width": videoMetrics.width + }; + } + + if (cueObject.direction === "horizontal") { + // Calculate text bounding box + // (isn't useful for vertical cues, because we're doing all glyph positioning ourselves.) + captionator.applyStyles(DOMNode,{ + "width": "auto", + "position": "static", + "display": "inline-block", + "padding": "1em" + }); + + textBoundingBoxWidth = parseInt(DOMNode.offsetWidth,10); + textBoundingBoxPercentage = Math.floor((textBoundingBoxWidth / videoElement._captionator_availableCueArea.width) * 100); + textBoundingBoxPercentage = textBoundingBoxPercentage <= 100 ? textBoundingBoxPercentage : 100; + } + + // Calculate font metrics + baseFontSize = ((videoMetrics.height * (fontSizeVerticalPercentage/100))/96)*72; + baseFontSize = baseFontSize >= minimumFontSize ? baseFontSize : minimumFontSize; + basePixelFontSize = Math.floor((baseFontSize/72)*96); + baseLineHeight = Math.floor(baseFontSize * lineHeightRatio); + baseLineHeight = baseLineHeight > minimumLineHeight ? baseLineHeight : minimumLineHeight; + pixelLineHeight = Math.ceil((baseLineHeight/72)*96); + verticalPixelLineHeight = pixelLineHeight; + + if (pixelLineHeight * Math.floor(videoMetrics.height / pixelLineHeight) < videoMetrics.height) { + pixelLineHeight = Math.floor(videoMetrics.height / Math.floor(videoMetrics.height / pixelLineHeight)); + baseLineHeight = Math.ceil((pixelLineHeight/96)*72); + } + + if (pixelLineHeight * Math.floor(videoMetrics.width / pixelLineHeight) < videoMetrics.width) { + verticalPixelLineHeight = Math.ceil(videoMetrics.width / Math.floor(videoMetrics.width / pixelLineHeight)); + } + + // Calculate render area height & width in lines + videoHeightInLines = Math.floor(videoElement._captionator_availableCueArea.height / pixelLineHeight); + videoWidthInLines = Math.floor(videoElement._captionator_availableCueArea.width / verticalPixelLineHeight); + + // Calculate cue size and padding + if (parseFloat(String(cueObject.size).replace(/[^\d\.]/ig,"")) === 0) { + // We assume (given a size of 0) that no explicit size was set. + // Depending on settings, we either use the WebVTT default size of 100% (the Captionator.js default behaviour), + // or the proportion of the video the text bounding box takes up (widthwise) as a percentage (proposed behaviour, LeanBack's default) + if (options.sizeCuesByTextBoundingBox === true) { + cueSize = textBoundingBoxPercentage; + } else { + cueSize = 100; + autoSize = false; + } + } else { + autoSize = false; + cueSize = parseFloat(String(cueObject.size).replace(/[^\d\.]/ig,"")); + cueSize = cueSize <= 100 ? cueSize : 100; + } + + cuePaddingLR = cueObject.direction === "horizontal" ? Math.floor(videoMetrics.width * 0.01) : 0; + cuePaddingTB = cueObject.direction === "horizontal" ? 0 : Math.floor(videoMetrics.height * 0.01); + + if (cueObject.linePosition === "auto") { + cueObject.linePosition = cueObject.direction === "horizontal" ? videoHeightInLines : videoWidthInLines; + } else if (String(cueObject.linePosition).match(/\%/)) { + cueObject.snapToLines = false; + cueObject.linePosition = parseFloat(String(cueObject.linePosition).replace(/\%/ig,"")); + } + + if (cueObject.direction === "horizontal") { + cueHeight = pixelLineHeight; + + if (cueObject.textPosition !== "auto" && autoSize) { + internalTextPosition = parseFloat(String(cueObject.textPosition).replace(/[^\d\.]/ig,"")); + + // Don't squish the text + if (cueSize - internalTextPosition > textBoundingBoxPercentage) { + cueSize -= internalTextPosition; + } else { + cueSize = textBoundingBoxPercentage; + } + } + + if (cueObject.snapToLines === true) { + cueWidth = videoElement._captionator_availableCueArea.width * (cueSize/100); + } else { + cueWidth = videoMetrics.width * (cueSize/100); + } + + if (cueObject.textPosition === "auto") { + cueX = ((videoElement._captionator_availableCueArea.right - cueWidth) / 2) + videoElement._captionator_availableCueArea.left; + } else { + internalTextPosition = parseFloat(String(cueObject.textPosition).replace(/[^\d\.]/ig,"")); + cueX = ((videoElement._captionator_availableCueArea.right - cueWidth) * (internalTextPosition/100)) + videoElement._captionator_availableCueArea.left; + } + + if (cueObject.snapToLines === true) { + cueY = ((videoHeightInLines-1) * pixelLineHeight) + videoElement._captionator_availableCueArea.top; + } else { + tmpHeightExclusions = videoMetrics.controlHeight + pixelLineHeight + (cuePaddingTB*2); + cueY = (videoMetrics.height - tmpHeightExclusions) * (cueObject.linePosition/100); + } + + } else { + // Basic positioning + cueY = videoElement._captionator_availableCueArea.top; + cueX = videoElement._captionator_availableCueArea.right - verticalPixelLineHeight; + cueWidth = verticalPixelLineHeight; + cueHeight = videoElement._captionator_availableCueArea.height * (cueSize/100); + + // Split into characters, and continue calculating width & positioning with new info + characterCount = spanify(DOMNode); + characters = [].slice.call(DOMNode.querySelectorAll("span.captionator-cue-character"),0); + charactersPerLine = Math.floor((cueHeight-cuePaddingTB*2)/basePixelFontSize); + cueWidth = Math.ceil(characterCount/charactersPerLine) * verticalPixelLineHeight; + lineCount = Math.ceil(characterCount/charactersPerLine); + finalLineCharacterCount = characterCount - (charactersPerLine * (lineCount - 1)); + finalLineCharacterHeight = finalLineCharacterCount * basePixelFontSize; + + // Work out CueX taking into account linePosition... + if (cueObject.snapToLines === true) { + cueX = cueObject.direction === "vertical-lr" ? videoElement._captionator_availableCueArea.left : videoElement._captionator_availableCueArea.right - cueWidth; + } else { + var temporaryWidthExclusions = cueWidth + (cuePaddingLR * 2); + if (cueObject.direction === "vertical-lr") { + cueX = (videoMetrics.width - temporaryWidthExclusions) * (cueObject.linePosition/100); + } else { + cueX = (videoMetrics.width-temporaryWidthExclusions) - ((videoMetrics.width - temporaryWidthExclusions) * (cueObject.linePosition/100)); + } + } + + // Work out CueY taking into account textPosition... + if (cueObject.textPosition === "auto") { + cueY = ((videoElement._captionator_availableCueArea.bottom - cueHeight) / 2) + videoElement._captionator_availableCueArea.top; + } else { + cueObject.textPosition = parseFloat(String(cueObject.textPosition).replace(/[^\d\.]/ig,"")); + cueY = ((videoElement._captionator_availableCueArea.bottom - cueHeight) * (cueObject.textPosition/100)) + + videoElement._captionator_availableCueArea.top; + } + + + // Iterate through the characters and position them accordingly... + currentLine = 0; + characterPosition = 0; + characterX = 0; + characterY = 0; + + characters.forEach(function(characterSpan,characterCount) { + if (cueObject.direction === "vertical-lr") { + characterX = verticalPixelLineHeight * currentLine; + } else { + characterX = cueWidth - (verticalPixelLineHeight * (currentLine+1)); + } + + if (cueObject.alignment === "start" || (cueObject.alignment !== "start" && currentLine < lineCount-1)) { + characterY = (characterPosition * basePixelFontSize) + cuePaddingTB; + } else if (cueObject.alignment === "end") { + characterY = ((characterPosition * basePixelFontSize)-basePixelFontSize) + ((cueHeight+(cuePaddingTB*2))-finalLineCharacterHeight); + } else if (cueObject.alignment === "middle") { + characterY = (((cueHeight - (cuePaddingTB*2))-finalLineCharacterHeight)/2) + (characterPosition * basePixelFontSize); + } + + captionator.applyStyles(characterSpan,{ + "position": "absolute", + "top": characterY + "px", + "left": characterX + "px" + }); + + if (characterPosition >= charactersPerLine-1) { + characterPosition = 0; + currentLine ++; + } else { + characterPosition ++; + } + }); + } + + if (cueObject.direction === "horizontal") { + if (captionator.checkDirection(String(cueObject.text)) === "rtl") { + cueAlignment = {"start":"right","middle":"center","end":"left"}[cueObject.alignment]; + } else { + cueAlignment = {"start":"left","middle":"center","end":"right"}[cueObject.alignment]; + } + } + + captionator.applyStyles(DOMNode,{ + "position": "absolute", + "overflow": "hidden", + "width": cueWidth + "px", + "height": cueHeight + "px", + "top": cueY + "px", + "left": cueX + "px", + "padding": cuePaddingTB + "px " + cuePaddingLR + "px", + "textAlign": cueAlignment, + "backgroundColor": "rgba(" + cueBackgroundColour.join(",") + ")", + "direction": captionator.checkDirection(String(cueObject.text)), + "lineHeight": baseLineHeight + "pt", + "boxSizing": "border-box" + }); + + if (cueObject.direction === "vertical" || cueObject.direction === "vertical-lr") { + // Work out how to shrink the available render area + // If subtracting from the right works out to a larger area, subtract from the right. + // Otherwise, subtract from the left. + if (((cueX - videoElement._captionator_availableCueArea.left) - videoElement._captionator_availableCueArea.left) >= + (videoElement._captionator_availableCueArea.right - (cueX + cueWidth))) { + + videoElement._captionator_availableCueArea.right = cueX; + } else { + videoElement._captionator_availableCueArea.left = cueX + cueWidth; + } + + videoElement._captionator_availableCueArea.width = + videoElement._captionator_availableCueArea.right - + videoElement._captionator_availableCueArea.left; + + } else { + // Now shift cue up if required to ensure it's all visible + if (DOMNode.scrollHeight > DOMNode.offsetHeight * 1.2) { + if (cueObject.snapToLines) { + var upwardAjustmentInLines = 0; + while (DOMNode.scrollHeight > DOMNode.offsetHeight * 1.2) { + cueHeight += pixelLineHeight; + DOMNode.style.height = cueHeight + "px"; + upwardAjustmentInLines ++; + } + + cueY = cueY - (upwardAjustmentInLines*pixelLineHeight); + DOMNode.style.top = cueY + "px"; + } else { + // Not working by lines, so instead of shifting up, simply throw out old cueY calculation + // and completely recalculate its value + var upwardAjustment = (DOMNode.scrollHeight - cueHeight); + cueHeight = (DOMNode.scrollHeight + cuePaddingTB); + tmpHeightExclusions = videoMetrics.controlHeight + cueHeight + (cuePaddingTB*2); + cueY = (videoMetrics.height - tmpHeightExclusions) * (cueObject.linePosition/100); + + DOMNode.style.height = cueHeight + "px"; + DOMNode.style.top = cueY + "px"; + } + } + + // Work out how to shrink the available render area + // If subtracting from the bottom works out to a larger area, subtract from the bottom. + // Otherwise, subtract from the top. + if (((cueY - videoElement._captionator_availableCueArea.top) - videoElement._captionator_availableCueArea.top) >= + (videoElement._captionator_availableCueArea.bottom - (cueY + cueHeight)) && + videoElement._captionator_availableCueArea.bottom > cueY) { + + videoElement._captionator_availableCueArea.bottom = cueY; + } else { + if (videoElement._captionator_availableCueArea.top < cueY + cueHeight) { + videoElement._captionator_availableCueArea.top = cueY + cueHeight; + } + } + + videoElement._captionator_availableCueArea.height = + videoElement._captionator_availableCueArea.bottom - + videoElement._captionator_availableCueArea.top; + } + + // DEBUG FUNCTIONS + // This function can be used for debugging WebVTT captions. It will not be + // included in production versions of Captionator. + // ----------------------------------------------------------------------- + if (options.debugMode) { + var debugCanvas, debugContext; + var generateDebugCanvas = function() { + if (!debugCanvas) { + if (videoElement._captionatorDebugCanvas) { + debugCanvas = videoElement._captionatorDebugCanvas; + debugContext = videoElement._captionatorDebugContext; + } else { + debugCanvas = document.createElement("canvas"); + debugCanvas.setAttribute("width",videoMetrics.width); + debugCanvas.setAttribute("height",videoMetrics.height - videoMetrics.controlHeight); + document.body.appendChild(debugCanvas); + captionator.applyStyles(debugCanvas,{ + "position": "absolute", + "top": videoMetrics.top + "px", + "left": videoMetrics.left + "px", + "width": videoMetrics.width + "px", + "height": (videoMetrics.height - videoMetrics.controlHeight) + "px", + "zIndex": 3000 + }); + + debugContext = debugCanvas.getContext("2d"); + videoElement._captionatorDebugCanvas = debugCanvas; + videoElement._captionatorDebugContext = debugContext; + } + } + }; + + var clearDebugCanvas = function() { + generateDebugCanvas(); + debugCanvas.setAttribute("width",videoMetrics.width); + }; + + var drawLines = function() { + var lineIndex; + + // Set up canvas for drawing debug information + generateDebugCanvas(); + + debugContext.strokeStyle = "rgba(255,0,0,0.5)"; + debugContext.lineWidth = 1; + + // Draw horizontal line dividers + debugContext.beginPath(); + for (lineIndex = 0; lineIndex < videoHeightInLines; lineIndex ++) { + debugContext.moveTo(0.5,(lineIndex*pixelLineHeight)+0.5); + debugContext.lineTo(videoMetrics.width,(lineIndex*pixelLineHeight)+0.5); + } + + debugContext.closePath(); + debugContext.stroke(); + debugContext.beginPath(); + debugContext.strokeStyle = "rgba(0,255,0,0.5)"; + + // Draw vertical line dividers + // Right to left, vertical + for (lineIndex = videoWidthInLines; lineIndex >= 0; lineIndex --) { + debugContext.moveTo((videoMetrics.width-(lineIndex*verticalPixelLineHeight))-0.5,-0.5); + debugContext.lineTo((videoMetrics.width-(lineIndex*verticalPixelLineHeight))-0.5,videoMetrics.height); + } + + debugContext.closePath(); + debugContext.stroke(); + debugContext.beginPath(); + debugContext.strokeStyle = "rgba(255,255,0,0.5)"; + + // Draw vertical line dividers + // Left to right, vertical + for (lineIndex = 0; lineIndex <= videoWidthInLines; lineIndex ++) { + debugContext.moveTo((lineIndex*verticalPixelLineHeight)+0.5,-0.5); + debugContext.lineTo((lineIndex*verticalPixelLineHeight)+0.5,videoMetrics.height); + } + + debugContext.stroke(); + + videoElement.linesDrawn = true; + }; + + var drawAvailableArea = function() { + generateDebugCanvas(); + + debugContext.fillStyle = "rgba(100,100,255,0.5)"; + + debugContext.fillRect( + videoElement._captionator_availableCueArea.left, + videoElement._captionator_availableCueArea.top, + videoElement._captionator_availableCueArea.right, + videoElement._captionator_availableCueArea.bottom); + debugContext.stroke(); + + }; + + clearDebugCanvas(); + drawAvailableArea(); + drawLines(); + } + // END DEBUG FUNCTIONS + }, + /* + captionator.styleCueCanvas(VideoNode) + + Styles and positions a canvas (not a object - just a div) for displaying cues on a video. + If the HTMLVideoElement in question does not have a canvas, one is created for it. + + First parameter: The HTMLVideoElement for which the cue canvas will be styled/created. This parameter is mandatory. + + RETURNS: + + Nothing. + + */ + "styleCueCanvas": function(videoElement) { + var baseFontSize, baseLineHeight; + var containerObject; + var containerID; + var options = videoElement._captionatorOptions instanceof Object ? videoElement._captionatorOptions : {}; + + if (!(videoElement instanceof HTMLVideoElement)) { + throw new Error("Cannot style a cue canvas for a non-video node!"); + } + + if (videoElement._containerObject) { + containerObject = videoElement._containerObject; + containerID = containerObject.id; + } + + if (!containerObject) { + // visually display captions + containerObject = document.createElement("div"); + containerObject.className = "captionator-cue-canvas"; + containerID = captionator.generateID(); + containerObject.id = containerID; + + // We can choose to append the canvas to an element other than the body. + // If this option is specified, we no longer use the offsetTop/offsetLeft of the video + // to define the position, we just inherit it. + // + // options.appendCueCanvasTo can be an HTMLElement, or a DOM query. + // If the query fails, the canvas will be appended to the body as normal. + // If the query is successful, the canvas will be appended to the first matched element. + + if (options.appendCueCanvasTo) { + var canvasParentNode = null; + + if (options.appendCueCanvasTo instanceof HTMLElement) { + canvasParentNode = options.appendCueCanvasTo; + } else if (typeof(options.appendCueCanvasTo) === "string") { + try { + var canvasSearchResult = document.querySelectorAll(options.appendCueCanvasTo); + if (canvasSearchResult.length > 0) { + canvasParentNode = canvasSearchResult[0]; + } else { throw null; /* Bounce to catch */ } + } catch(error) { + canvasParentNode = document.body; + options.appendCueCanvasTo = false; + } + } else { + canvasParentNode = document.body; + options.appendCueCanvasTo = false; + } + + canvasParentNode.appendChild(containerObject); + } else { + document.body.appendChild(containerObject); + } + + videoElement._containerObject = containerObject; + // TODO(silvia): we should only do aria-live on descriptions and that doesn't need visual display + containerObject.setAttribute("aria-live","polite"); + containerObject.setAttribute("aria-atomic","true"); + } else if (!containerObject.parentNode) { + document.body.appendChild(containerObject); + } + + // TODO(silvia): we should not really muck with the aria-describedby attribute of the video + if (String(videoElement.getAttribute("aria-describedby")).indexOf(containerID) === -1) { + var existingValue = videoElement.hasAttribute("aria-describedby") ? videoElement.getAttribute("aria-describedby") + " " : ""; + videoElement.setAttribute("aria-describedby",existingValue + containerID); + } + + // Set up the cue canvas + var videoMetrics = captionator.getNodeMetrics(videoElement); + + // Set up font metrics + baseFontSize = ((videoMetrics.height * (fontSizeVerticalPercentage/100))/96)*72; + baseFontSize = baseFontSize >= minimumFontSize ? baseFontSize : minimumFontSize; + baseLineHeight = Math.floor(baseFontSize * lineHeightRatio); + baseLineHeight = baseLineHeight > minimumLineHeight ? baseLineHeight : minimumLineHeight; + + // Style node! + captionator.applyStyles(containerObject,{ + "position": "absolute", + "overflow": "hidden", + "zIndex": 100, + "height": (videoMetrics.height - videoMetrics.controlHeight) + "px", + "width": videoMetrics.width + "px", + "top": (options.appendCueCanvasTo ? 0 : videoMetrics.top) + "px", + "left": (options.appendCueCanvasTo ? 0 : videoMetrics.left) + "px", + "color": "white", + "fontFamily": "Verdana, Helvetica, Arial, sans-serif", + "fontSize": baseFontSize + "pt", + "lineHeight": baseLineHeight + "pt", + "boxSizing": "border-box" + }); + + // Defeat a horrid Chrome 10 video bug + // http://stackoverflow.com/questions/5289854/chrome-10-custom-video-interface-problem/5400438#5400438 + if (window.navigator.userAgent.toLowerCase().indexOf("chrome/10") > -1) { + containerObject.style.backgroundColor = "rgba(0,0,0,0.01" + Math.random().toString().replace(".","") + ")"; + } + }, + /* + captionator.parseCaptions(string captionData, object options) + + Accepts and parses SRT caption/subtitle data. Will extend for WebVTT shortly. Perhaps non-JSON WebVTT will work already? + This function has been intended from the start to (hopefully) loosely parse both. I'll patch it as required. + + First parameter: Entire text data (UTF-8) of the retrieved SRT/WebVTT file. This parameter is mandatory. (really - what did + you expect it was going to do without it!) + + Second parameter: Captionator internal options object. See the documentation for allowed values. + + RETURNS: + + An array of TextTrackCue Objects in initial state. + + */ + "parseCaptions": function(captionData, options) { + // Be liberal in what you accept from others... + options = options instanceof Object ? options : {}; + var fileType = "", subtitles = []; + var cueStyles = ""; + var cueDefaults = []; + + // Set up timestamp parsers - SRT does WebVTT timestamps as well. + var SUBTimestampParser = /^(\d{2})?:?(\d{2}):(\d{2})\.(\d+)\,(\d{2})?:?(\d{2}):(\d{2})\.(\d+)\s*(.*)/; + var SBVTimestampParser = /^(\d+)?:?(\d{2}):(\d{2})\.(\d+)\,(\d+)?:?(\d{2}):(\d{2})\.(\d+)\s*(.*)/; + var SRTTimestampParser = /^(\d{2})?:?(\d{2}):(\d{2})[\.\,](\d+)\s+\-\-\>\s+(\d{2})?:?(\d{2}):(\d{2})[\.\,](\d+)\s*(.*)/; + var SRTChunkTimestampParser = /(\d{2})?:?(\d{2}):(\d{2})[\.\,](\d+)/; + var GoogleTimestampParser = /^([\d\.]+)\s+\+([\d\.]+)\s*(.*)/; + var LRCTimestampParser = /^\[(\d{2})?:?(\d{2})\:(\d{2})\.(\d{2})\]\s*(.*?)$/i; + var WebVTTDEFAULTSCueParser = /^(DEFAULTS|DEFAULT)\s+\-\-\>\s+(.*)/g; + var WebVTTSTYLECueParser = /^(STYLE|STYLES)\s+\-\-\>\s*\n([\s\S]*)/g; + var WebVTTCOMMENTCueParser = /^(COMMENT|COMMENTS)\s+\-\-\>\s+(.*)/g; + + if (captionData) { + // This function parses and validates cue HTML/VTT tokens, and converts them into something understandable to the renderer. + var processCaptionHTML = function processCaptionHTML(inputHTML) { + var cueStructure = new captionator.CaptionatorCueStructure(inputHTML,options), + cueSplit = [], + splitIndex, + currentToken, + currentContext, + stack = [], + stackIndex = 0, + chunkTimestamp, + timeData; + + var hasRealTextContent = function(textInput) { + return !!textInput.replace(/[^a-z0-9]+/ig,"").length; + }; + + // Process out special cue spans + cueSplit = inputHTML + .split(/(<\/?[^>]+>)/ig) + .filter(function(cuePortionText) { + return !!cuePortionText.replace(/\s*/ig,""); + }); + + currentContext = cueStructure; + for (splitIndex in cueSplit) { + if (cueSplit.hasOwnProperty(splitIndex)) { + currentToken = cueSplit[splitIndex]; + + if (currentToken.substr(0,1) === "<") { + if (currentToken.substr(1,1) === "/") { + // Closing tag + var TagName = currentToken.substr(2).split(/[\s>]+/g)[0]; + if (stack.length > 0) { + // Scan backwards through the stack to determine whether we've got an open tag somewhere to close. + var stackScanDepth = 0; + for (stackIndex = stack.length-1; stackIndex >= 0; stackIndex --) { + var parentContext = stack[stackIndex][stack[stackIndex].length-1]; + stackScanDepth = stackIndex; + if (parentContext.token === TagName) { break; } + } + + currentContext = stack[stackScanDepth]; + stack = stack.slice(0,stackScanDepth); + } else { + // Tag mismatch! + } + } else { + // Opening Tag + // Check whether the tag is valid according to the WebVTT specification + // If not, don't allow it (unless the sanitiseCueHTML option is explicitly set to false) + + if (( currentToken.substr(1).match(SRTChunkTimestampParser) || + currentToken.match(/^]+>/i) || + currentToken.match(/^/) || + currentToken.match(/^<(b|i|u|ruby|rt)>/)) || + options.sanitiseCueHTML !== false) { + + var tmpObject = { + "token": currentToken.replace(/[<\/>]+/ig,"").split(/[\s\.]+/)[0], + "rawToken": currentToken, + "children": [] + }; + + if (tmpObject.token === "v") { + tmpObject.voice = currentToken.match(/^]+)>/i)[1]; + } else if (tmpObject.token === "c") { + tmpObject.classes = currentToken + .replace(/[<\/>\s]+/ig,"") + .split(/[\.]+/ig) + .slice(1) + .filter(hasRealTextContent); + } else if (!!(chunkTimestamp = tmpObject.rawToken.match(SRTChunkTimestampParser))) { + cueStructure.isTimeDependent = true; + timeData = chunkTimestamp.slice(1); + tmpObject.timeIn = parseInt((timeData[0]||0) * 60 * 60,10) + // Hours + parseInt((timeData[1]||0) * 60,10) + // Minutes + parseInt((timeData[2]||0),10) + // Seconds + parseFloat("0." + (timeData[3]||0)); // MS + } + + currentContext.push(tmpObject); + stack.push(currentContext); + currentContext = tmpObject.children; + } + } + } else { + // Text string + if (options.sanitiseCueHTML !== false) { + currentToken = currentToken + .replace(//g,">") + .replace(/\&/g,"&"); + + if (!options.ignoreWhitespace) { + currentToken = currentToken.replace(/\n+/g,"
"); + } + } + + currentContext.push(currentToken); + } + } + } + + return cueStructure; + }; + + // This function takes chunks of text representing cues, and converts them into cue objects. + var parseCaptionChunk = function parseCaptionChunk(subtitleElement,objectCount) { + var subtitleParts, timeIn, timeOut, html, timeData, subtitlePartIndex, cueSettings = "", id, specialCueData; + var timestampMatch, tmpCue; + + // WebVTT Special Cue Logic + if ((specialCueData = WebVTTDEFAULTSCueParser.exec(subtitleElement))) { + cueDefaults = specialCueData.slice(2).join(""); + cueDefaults = cueDefaults.split(/\s+/g).filter(function(def) { return def && !!def.length; }); + return null; + } else if ((specialCueData = WebVTTSTYLECueParser.exec(subtitleElement))) { + cueStyles += specialCueData[specialCueData.length-1]; + return null; + } else if ((specialCueData = WebVTTCOMMENTCueParser.exec(subtitleElement))) { + return null; // At this stage, we don't want to do anything with these. + } + + if (fileType === "LRC") { + subtitleParts = [ + subtitleElement.substr(0,subtitleElement.indexOf("]")), + subtitleElement.substr(subtitleElement.indexOf("]")+1) + ]; + } else { + subtitleParts = subtitleElement.split(/\n/g); + } + + // Trim off any blank lines (logically, should only be max. one, but loop to be sure) + while (!subtitleParts[0].replace(/\s+/ig,"").length && subtitleParts.length > 0) { + subtitleParts.shift(); + } + + if (subtitleParts[0].match(/^\s*[a-z0-9]+\s*$/ig)) { + // The identifier becomes the cue ID (when *we* load the cues from file. Programatically created cues can have an ID of whatever.) + id = String(subtitleParts.shift().replace(/\s*/ig,"")); + } else { + // We're not parsing a format with an ID prior to each caption like SRT or WebVTT + id = objectCount; + } + + for (subtitlePartIndex = 0; subtitlePartIndex < subtitleParts.length; subtitlePartIndex ++) { + var timestamp = subtitleParts[subtitlePartIndex]; + + if ((timestampMatch = SRTTimestampParser.exec(timestamp)) || + (timestampMatch = SUBTimestampParser.exec(timestamp)) || + (timestampMatch = SBVTimestampParser.exec(timestamp))) { + + // WebVTT / SRT / SUB (VOBSub) / YouTube SBV style timestamp + + timeData = timestampMatch.slice(1); + + timeIn = parseInt((timeData[0]||0) * 60 * 60,10) + // Hours + parseInt((timeData[1]||0) * 60,10) + // Minutes + parseInt((timeData[2]||0),10) + // Seconds + parseFloat("0." + (timeData[3]||0)); // MS + + timeOut = parseInt((timeData[4]||0) * 60 * 60,10) + // Hours + parseInt((timeData[5]||0) * 60,10) + // Minutes + parseInt((timeData[6]||0),10) + // Seconds + parseFloat("0." + (timeData[7]||0)); // MS + + if (timeData[8]) { + cueSettings = timeData[8]; + } + + } else if (!!(timestampMatch = GoogleTimestampParser.exec(timestamp))) { + + // Google's proposed WebVTT timestamp style + timeData = timestampMatch.slice(1); + + timeIn = parseFloat(timeData[0]); + timeOut = timeIn + parseFloat(timeData[1]); + + if (timeData[2]) { + cueSettings = timeData[2]; + } + } + + // We've got the timestamp - return all the other unmatched lines as the raw subtitle data + subtitleParts = subtitleParts.slice(0,subtitlePartIndex).concat(subtitleParts.slice(subtitlePartIndex+1)); + break; + } + + if (!timeIn && !timeOut) { + // We didn't extract any time information. Assume the cue is invalid! + return null; + } + + // Consolidate cue settings, convert defaults to object + var compositeCueSettings = + cueDefaults + .reduce(function(previous,current,index,array){ + previous[current.split(":")[0]] = current.split(":")[1]; + return previous; + },{}); + + // Loop through cue settings, replace defaults with cue specific settings if they exist + compositeCueSettings = + cueSettings + .split(/\s+/g) + .filter(function(set) { return set && !!set.length; }) + // Convert array to a key/val object + .reduce(function(previous,current,index,array){ + previous[current.split(":")[0]] = current.split(":")[1]; + return previous; + },compositeCueSettings); + + // Turn back into string like the TextTrackCue constructor expects + cueSettings = ""; + for (var key in compositeCueSettings) { + if (compositeCueSettings.hasOwnProperty(key)) { + cueSettings += !!cueSettings.length ? " " : ""; + cueSettings += key + ":" + compositeCueSettings[key]; + } + } + + // The remaining lines are the subtitle payload itself (after removing an ID if present, and the time); + html = options.processCueHTML === false ? subtitleParts.join("\n") : processCaptionHTML(subtitleParts.join("\n")); + tmpCue = new captionator.TextTrackCue(id, timeIn, timeOut, html, cueSettings, false, null); + tmpCue.styleData = cueStyles; + return tmpCue; + }; + + // Begin parsing -------------------- + subtitles = captionData + .replace(/\r\n/g,"\n") + .replace(/\r/g,"\n"); + + if (LRCTimestampParser.exec(captionData)) { + // LRC file... split by single line + subtitles = subtitles.split(/\n+/g); + fileType = "LRC"; + } else { + subtitles = subtitles.split(/\n\n+/g); + } + + subtitles = subtitles.filter(function(lineGroup) { + if (lineGroup.match(/^WEBVTT(\s*FILE)?/ig)) { + fileType = "WebVTT"; + return false; + } else { + if (lineGroup.replace(/\s*/ig,"").length) { + return true; + } + return false; + } + }) + .map(parseCaptionChunk) + .filter(function(cue) { + // In the parseCaptionChunk function, we return null for special and malformed cues, + // and cues we want to ignore, rather than expose to JS. Filter these out now. + if (cue !== null) { + return true; + } + + return false; + }); + + return subtitles; + } else { + throw new Error("Required parameter captionData not supplied."); + } + } + }; + + window.captionator = captionator; +})(); diff --git a/tests/html/TestCaptions.en.vtt b/tests/html/TestCaptions.en.vtt new file mode 100644 index 0000000..46db043 --- /dev/null +++ b/tests/html/TestCaptions.en.vtt @@ -0,0 +1,434 @@ +WEBVTT + +1 +00:00:01.77 --> 00:00:04.03 +Eeny, meeny, miny, moe, + +2 +00:00:04.03 --> 00:00:05.99 +Catch a tiger by the toe + +3 +00:00:05.99 --> 00:00:08.05 +If he hollers let him go + +4 +00:00:08.05 --> 00:00:10.96 +Eeny, meeny, miny moe. + +5 +00:00:12.70 --> 00:00:14.64 +I'm Jutta Treviranus + +6 +00:00:14.64 --> 00:00:16.04 +and I've come to wonder + +7 +00:00:16.04 --> 00:00:18.38 +whether we have a chance to reorganize our future. + +8 +00:00:18.38 --> 00:00:23.23 +David Kelley says that the future of design is human centred. + +9 +00:00:23.23 --> 00:00:25.51 +Most experts agree. + +10 +00:00:25.51 --> 00:00:29.02 +That leaves the question - which human? + +11 +00:00:29.02 --> 00:00:32.46 +An inevitable human condition is diversity. + +12 +00:00:32.46 --> 00:00:34.70 +There's no typical human, + +13 +00:00:34.70 --> 00:00:37.33 +even clones and identical twins are not the same. + +14 +00:00:37.33 --> 00:00:40.42 +We differ from one to the next, + +15 +00:00:40.42 --> 00:00:43.54 +but also from one moment to the next, + +16 +00:00:43.54 --> 00:00:46.84 +from one context to the next. + +17 +00:00:46.84 --> 00:00:49.91 +But diversity and difference become overwhelming + +18 +00:00:49.91 --> 00:00:52.94 +and we develop strategies to deal with this diversity. + +19 +00:00:52.94 --> 00:00:55.56 +We try to make things simpler, + +20 +00:00:55.60 --> 00:00:57.50 +less complex, less chaotic + +21 +00:00:57.50 --> 00:01:00.00 +Another part of the human condition is that + +22 +00:01:00.00 --> 00:01:03.18 +we try to find commonality and connections. + +23 +00:01:03.20 --> 00:01:09.53 +We form groups informal and formal with implicit and explicit criteria. + +24 +00:01:09.55 --> 00:01:13.97 +We organize, we create categories, we filter, we label. + +25 +00:01:13.97 --> 00:01:19.67 +At our most insecure and overwhelmed we divide in two, we create binaries: + +26 +00:01:19.67 --> 00:01:21.86 +male, female + +27 +00:01:21.86 --> 00:01:24.21 +disabled, normal + +28 +00:01:24.21 --> 00:01:27.03 +left, right + +29 +00:01:27.03 --> 00:01:29.57 +us, them. + +30 +00:01:29.57 --> 00:01:34.98 +This all results in issues of who belongs and who is excluded. + +31 +00:01:34.98 --> 00:01:37.53 +Membership in groups can be self assigned, + +32 +00:01:37.53 --> 00:01:40.46 +may be imposed, may even be policed. + +33 +00:01:40.46 --> 00:01:44.17 +Groups are used to assert or assign privileges and powers. + +34 +00:01:44.17 --> 00:01:46.92 +We use groups to judge + +35 +00:01:46.92 --> 00:01:49.22 +values get assigned to groups + +36 +00:01:49.22 --> 00:01:51.88 +often characteristics that have nothing to do with + +37 +00:01:51.88 --> 00:01:53.35 +the original founding properties of groups + +38 +00:01:53.35 --> 00:01:55.98 +are generalized to all individuals in the group. + +39 +00:01:55.98 --> 00:02:00.07 +Sometimes, people who are in an imposed group + +40 +00:02:00.07 --> 00:02:02.00 +take ownership of the group and reform + +41 +00:02:02.00 --> 00:02:04.42 +the classifications and values from within. + +42 +00:02:04.42 --> 00:02:08.44 +Occasionally, someone has the audacity + +43 +00:02:08.44 --> 00:02:11.00 +to break out of the category we have put her in + +44 +00:02:11.00 --> 00:02:16.90 +but to preserve our category, we may dismiss her as an anomaly. + +45 +00:02:16.90 --> 00:02:20.03 +Some groups are more fluid while others are more fixed. + +46 +00:02:20.03 --> 00:02:23.56 +We not only form groups, but groups of groups + +47 +00:02:23.56 --> 00:02:25.66 +and groups, of groups, of groups. + +48 +00:02:25.66 --> 00:02:29.09 +Membership in one group can grant us membership in other groups. + +49 +00:02:29.09 --> 00:02:33.03 +But despite all this, we are diverse, + +50 +00:02:33.03 --> 00:02:34.85 +we are complex, + +51 +00:02:34.85 --> 00:02:36.45 +we are chaotic. + +52 +00:02:36.45 --> 00:02:38.81 +Individually we're different + +53 +00:02:38.81 --> 00:02:40.44 +over time, in different contexts, + +54 +00:02:40.44 --> 00:02:42.35 +in different roles, in different groups. + +55 +00:02:42.35 --> 00:02:45.42 +We need to assert our uniqueness, + +56 +00:02:45.42 --> 00:02:47.86 +we need to form and refine our identity. + +57 +00:02:47.86 --> 00:02:50.91 +We struggle with the identity imposed on us. + +58 +00:02:50.91 --> 00:02:56.36 +Generally, people do not fit easily into assigned categories + +59 +00:02:56.36 --> 00:02:58.98 +and yet we persist in assigning them. + +60 +00:02:58.98 --> 00:03:02.63 +And then, something new comes along + +61 +00:03:02.63 --> 00:03:05.41 +and shakes up our groups, our categories and our rules, + +62 +00:03:05.41 --> 00:03:08.26 +and we need to adjust, rebuild and rethink. + +63 +00:03:08.26 --> 00:03:12.53 +Something like, networks and digital stuff. + +64 +00:03:12.53 --> 00:03:15.47 +This new digital and connected world + +65 +00:03:15.47 --> 00:03:17.87 +puts into question how we group things + +66 +00:03:17.87 --> 00:03:20.75 +and challenges our excuses for leaving people out. + +67 +00:03:20.75 --> 00:03:25.46 +The digital changes our view of time, space and distance + +68 +00:03:25.46 --> 00:03:31.08 +and by extension our view of design, what is possible and what things cost. + +69 +00:03:31.08 --> 00:03:36.04 +Digital things are plastic, mutable, malleable and adaptable. + +70 +00:03:36.04 --> 00:03:39.50 +Before, not everyone could fit, + +71 +00:03:39.50 --> 00:03:42.16 +allowing someone in meant someone else was left out. + +72 +00:03:42.16 --> 00:03:46.06 +In the digital, room is very stretchy. + +73 +00:03:46.06 --> 00:03:49.76 +Before, what we created could not fit everyone + +74 +00:03:49.76 --> 00:03:51.77 +so we made it fit the largest group. + +75 +00:03:51.77 --> 00:03:54.53 +We made it for the group called average or typical + +76 +00:03:54.53 --> 00:03:58.26 +this left out everyone not average or typical. + +77 +00:03:58.26 --> 00:04:03.39 +In the digital reality the things we make can reconfigure, adapt + +78 +00:04:03.39 --> 00:04:06.27 +and take a form that is best for each individual. + +79 +00:04:06.27 --> 00:04:11.90 +In the solid world, each copy cost almost the same as the original. + +80 +00:04:11.90 --> 00:04:14.35 +Consumption actually consumed. + +81 +00:04:14.35 --> 00:04:18.56 +In the digital world, we can copy almost without cost. + +82 +00:04:18.56 --> 00:04:21.00 +Consumption no longer consumes. + +83 +00:04:21.00 --> 00:04:24.52 +Before, it took a great deal of time and effort + +84 +00:04:24.52 --> 00:04:27.23 +to deliver things, especially to people far away. + +85 +00:04:27.23 --> 00:04:30.93 +Now it is as easy to deliver things around the world + +86 +00:04:30.93 --> 00:04:33.13 +as it is to deliver things next door. + +87 +00:04:33.13 --> 00:04:36.85 +Before, if we didn't place things in a fixed spot + +88 +00:04:36.85 --> 00:04:39.53 +we would have a hard time finding them again. + +89 +00:04:39.53 --> 00:04:43.63 +Now we can place them anywhere on the network and + +90 +00:04:43.63 --> 00:04:46.26 +retrieve them anywhere on the network. + +91 +00:04:46.26 --> 00:04:50.13 +Before, we needed to label things unambiguously and simply + +92 +00:04:50.13 --> 00:04:52.80 +so we could recognize them and know what to do with them. + +93 +00:04:52.80 --> 00:04:56.44 +Now we can see a description of each person or thing + +94 +00:04:56.44 --> 00:04:59.02 +that is useful and relevant to our purpose. + +95 +00:04:59.02 --> 00:05:03.01 +And by the way, we have learned that + +96 +00:05:03.01 --> 00:05:06.36 +inclusion and equality are good for all of us. + +97 +00:05:06.36 --> 00:05:09.35 +We are all healthier, wealthier and wiser + +98 +00:05:09.35 --> 00:05:12.19 +when our society is inclusive and equal. + +99 +00:05:12.19 --> 00:05:15.36 +We've also discovered that diverse groups + +100 +00:05:15.36 --> 00:05:18.93 +are more innovative and creative, and better at planning and predicting. + +101 +00:05:18.93 --> 00:05:23.73 +We've experimented with new organization like + +102 +00:05:23.73 --> 00:05:26.33 +most popular, to be ignored + +103 +00:05:26.33 --> 00:05:28.69 +friend, not friend. + +104 +00:05:28.69 --> 00:05:31.03 +But we can do better. + +105 +00:05:31.03 --> 00:05:33.26 +We can afford to be generous in our design, + +106 +00:05:33.26 --> 00:05:35.39 +we have fewer excuses to exclude. + +107 +00:05:35.39 --> 00:05:37.56 +We can be true to our diversity. + +108 +00:05:37.56 --> 00:05:43.06 +Perhaps now, we can find a way to make room for us all. + diff --git a/tests/html/TestCaptions.fr.vtt b/tests/html/TestCaptions.fr.vtt new file mode 100644 index 0000000..e647de8 --- /dev/null +++ b/tests/html/TestCaptions.fr.vtt @@ -0,0 +1,434 @@ +WEBVTT + +1 +00:00:01.77 --> 00:00:04.03 +Eeny, meeny, miny, moe, + +2 +00:00:04.03 --> 00:00:05.99 +Catch un tigre par le gros orteil + +3 +00:00:05.99 --> 00:00:08.05 +S'il crie le laisser aller + +4 +00:00:08.05 --> 00:00:10.96 +Eeny, meeny, miny moe. + +5 +00:00:12.70 --> 00:00:14.64 +Je suis Jutta Treviranus + +6 +00:00:14.64 --> 00:00:16.04 +et je suis venu à me demander + +7 +00:00:16.04 --> 00:00:18.38 +si nous avons une chance de réorganiser notre avenir. + +8 +00:00:18.38 --> 00:00:23.23 +David Kelley affirme que l'avenir du design est centré humaine. + +9 +00:00:23.23 --> 00:00:25.51 +La plupart des experts s'accordent à dire. + +10 +00:00:25.51 --> 00:00:29.02 +Reste la question - ce qui de l'homme? + +11 +00:00:29.02 --> 00:00:32.46 +Une condition humaine est inévitable, la diversité. + +12 +00:00:32.46 --> 00:00:34.70 +Il n'y a aucun humain typique. + +13 +00:00:34.70 --> 00:00:37.33 +Même les clones et les jumeaux identiques ne sont pas les mêmes. + +14 +00:00:37.33 --> 00:00:40.42 +Nous différons de l'un à l'autre, + +15 +00:00:40.42 --> 00:00:43.54 +mais aussi d'un moment à l'autre, + +16 +00:00:43.54 --> 00:00:46.84 +d'un contexte à l'autre. + +17 +00:00:46.84 --> 00:00:49.91 +Mais la diversité et la différence devient écrasante + +18 +00:00:49.91 --> 00:00:52.94 +et nous développons des stratégies pour faire face à cette diversité. + +19 +00:00:52.94 --> 00:00:55.56 +Nous essayons de rendre les choses plus simples, + +20 +00:00:55.60 --> 00:00:57.50 +moins complexe, moins chaotique. + +21 +00:00:57.50 --> 00:01:00.00 +Une autre partie de la condition humaine est que nous + +22 +00:01:00.00 --> 00:01:03.18 +essayer de trouver communité et connexions. + +23 +00:01:03.20 --> 00:01:09.53 +Nous formons des groupes formels et informels avec des critères explicites et implicites. + +24 +00:01:09.55 --> 00:01:13.97 +Nous organisons, nous créons des catégories, on filtre, on étiquette. + +25 +00:01:13.97 --> 00:01:19.67 +A notre plus précaires et accablé nous divisons en deux, nous créons des binaires: + +26 +00:01:19.67 --> 00:01:21.86 +masculin, féminin + +27 +00:01:21.86 --> 00:01:24.21 +handicapés, normale + +28 +00:01:24.21 --> 00:01:27.03 +gauche, droite + +29 +00:01:27.03 --> 00:01:29.57 +nous, eux. + +30 +00:01:29.57 --> 00:01:34.98 +Cela se traduit tout dans les questions de qui appartient et qui est exclu. + +31 +00:01:34.98 --> 00:01:37.53 +L'adhésion à des groupes peuvent être auto assignés, + +32 +00:01:37.53 --> 00:01:40.46 +peut être imposée, peut-être même policée. + +33 +00:01:40.46 --> 00:01:44.17 +Les groupes sont utilisés pour affirmer ou d'attribuer des privilèges et des pouvoirs. + +34 +00:01:44.17 --> 00:01:46.92 +Nous utilisons des groupes de juger + +35 +00:01:46.92 --> 00:01:49.22 +valeurs sont attribuées à des groupes + +36 +00:01:49.22 --> 00:01:51.88 +souvent des caractéristiques qui n'ont rien à voir avec + +37 +00:01:51.88 --> 00:01:53.35 +les propriétés originales des groupes fondateurs du + +38 +00:01:53.35 --> 00:01:55.98 +sont généralisés à tous les individus dans le groupe. + +39 +00:01:55.98 --> 00:02:00.07 +Parfois, les gens qui sont dans un groupe imposé + +40 +00:02:00.07 --> 00:02:02.00 +prendre la propriété du groupe et de la réforme + +41 +00:02:02.00 --> 00:02:04.42 +les classifications et les valeurs de l'intérieur. + +42 +00:02:04.42 --> 00:02:08.44 +Parfois, quelqu'un a l'audace + +43 +00:02:08.44 --> 00:02:11.00 +pour sortir de la catégorie, nous avons la mettre dans + +44 +00:02:11.00 --> 00:02:16.90 +mais pour préserver notre catégorie, nous pouvons la renvoyer comme une anomalie. + +45 +00:02:16.90 --> 00:02:20.03 +Certains groupes sont plus fluides tandis que d'autres sont plus fixes. + +46 +00:02:20.03 --> 00:02:23.56 +Nous les groupes se forment pas seulement, mais des groupes de groupes + +47 +00:02:23.56 --> 00:02:25.66 +et des groupes, des groupes, des groupes. + +48 +00:02:25.66 --> 00:02:29.09 +L'adhésion à un groupe peut nous accorder l'adhésion à d'autres groupes. + +49 +00:02:29.09 --> 00:02:33.03 +Mais malgré tout cela, nous sommes diversifiés + +50 +00:02:33.03 --> 00:02:34.85 +nous sommes complexe + +51 +00:02:34.85 --> 00:02:36.45 +nous sommes chaotique. + +52 +00:02:36.45 --> 00:02:38.81 +Individuellement, nous sommes différents + +53 +00:02:38.81 --> 00:02:40.44 +au fil du temps, dans des contextes différents + +54 +00:02:40.44 --> 00:02:42.35 +dans des rôles différents, dans des groupes différents. + +55 +00:02:42.35 --> 00:02:45.42 +Nous devons affirmer notre spécificité + +56 +00:02:45.42 --> 00:02:47.86 +nous avons besoin de former et de perfectionner notre identité. + +57 +00:02:47.86 --> 00:02:50.91 +Nous luttons avec l'identité qui nous est imposé. + +58 +00:02:50.91 --> 00:02:56.36 +Généralement, les gens ne s'intègrent pas facilement dans les catégories assignées + +59 +00:02:56.36 --> 00:02:58.98 +et pourtant nous persistons à les affecter. + +60 +00:02:58.98 --> 00:03:02.63 +Et puis, quelque chose de nouveau arrive + +61 +00:03:02.63 --> 00:03:05.41 +et secoue nos groupes, nos catégories et nos règles + +62 +00:03:05.41 --> 00:03:08.26 +et nous avons besoin d'ajuster, de reconstruire et de repenser. + +63 +00:03:08.26 --> 00:03:12.53 +Quelque chose comme, réseaux et des trucs numérique. + +64 +00:03:12.53 --> 00:03:15.47 +Ce nouveau monde numérique et connecté + +65 +00:03:15.47 --> 00:03:17.87 +remet en question la façon dont nous les choses de groupe + +66 +00:03:17.87 --> 00:03:20.75 +et les défis nos excuses pour laisser les gens sortir. + +67 +00:03:20.75 --> 00:03:25.46 +Le numérique change notre vision du temps, d'espace et de distance + +68 +00:03:25.46 --> 00:03:31.08 +et par extension notre point de vue du design, ce qui est possible et quelles choses coût. + +69 +00:03:31.08 --> 00:03:36.04 +Things Digital sont en plastique, mutable, malléable et adaptable. + +70 +00:03:36.04 --> 00:03:39.50 +Avant, tout le monde ne pouvait en forme + +71 +00:03:39.50 --> 00:03:42.16 +permettre à quelqu'un de quelqu'un d'autre était destiné à l'écart. + +72 +00:03:42.16 --> 00:03:46.06 +. Dans le numérique, la chambre est très extensible + +73 +00:03:46.06 --> 00:03:49.76 +Avant, ce que nous avons créé ne pouvait pas convenir à tous + +74 +00:03:49.76 --> 00:03:51.77 +. Alors nous avons fait l'adapter le plus grand groupe + +75 +00:03:51.77 --> 00:03:54.53 +Nous l'avons fait pour le groupe appelé moyen ou typique + +76 +00:03:54.53 --> 00:03:58.26 +cette gauche à tous de ne pas en moyenne ou typique. + +77 +00:03:58.26 --> 00:04:03.39 +Dans la réalité numérique des choses que nous faisons peut reconfigurer, adapter + +78 +00:04:03.39 --> 00:04:06.27 +et prendre une forme qui est le mieux pour chaque individu. + +79 +00:04:06.27 --> 00:04:11.90 +Dans le monde solide, chaque copie coûte presque le même que l'original. + +80 +00:04:11.90 --> 00:04:14.35 +la consommation a réellement consommé. + +81 +00:04:14.35 --> 00:04:18.56 +Dans le monde numérique, nous pouvons copier presque sans coût. + +82 +00:04:18.56 --> 00:04:21.00 +La consommation ne consomme plus. + +83 +00:04:21.00 --> 00:04:24.52 +Avant, il a fallu beaucoup de temps et d'effort + +84 +00:04:24.52 --> 00:04:27.23 +pour livrer des choses, surtout pour les gens très loin. + +85 +00:04:27.23 --> 00:04:30.93 +Maintenant, il est aussi facile de livrer des choses dans le monde + +86 +00:04:30.93 --> 00:04:33.13 +. Comme il est de livrer des choses à côté + +87 +00:04:33.13 --> 00:04:36.85 +Avant, si on ne place pas les choses dans un endroit fixe + +88 +00:04:36.85 --> 00:04:39.53 +nous aurions du mal à les retrouver. + +89 +00:04:39.53 --> 00:04:43.63 +Maintenant, nous pouvons les placer n'importe où sur le réseau et + +90 +00:04:43.63 --> 00:04:46.26 +les récupérer n'importe où sur le réseau. + +91 +00:04:46.26 --> 00:04:50.13 +Avant, nous avions besoin d'étiqueter les choses clairement et simplement + +92 +00:04:50.13 --> 00:04:52.80 +. Afin que nous puissions les reconnaître et de savoir quoi faire avec eux + +93 +00:04:52.80 --> 00:04:56.44 +Maintenant nous pouvons voir une description de chaque personne ou une chose + +94 +00:04:56.44 --> 00:04:59.02 +ce qui est utile et pertinente à notre but. + +95 +00:04:59.02 --> 00:05:03.01 +Et en passant, nous avons appris que + +96 +00:05:03.01 --> 00:05:06.36 +l'inclusion et l'égalité sont bons pour nous tous. + +97 +00:05:06.36 --> 00:05:09.35 +Nous sommes tous sains, plus riches et plus sage + +98 +00:05:09.35 --> 00:05:12.19 +quand notre société est inclusive et égalitaire. + +99 +00:05:12.19 --> 00:05:15.36 +Nous avons également découvert que les divers groupes + +100 +00:05:15.36 --> 00:05:18.93 +sont plus innovantes et créatives, et mieux à la planification et la prévision. + +101 +00:05:18.93 --> 00:05:23.73 +Nous avons expérimenté avec la nouvelle organisation comme + +102 +00:05:23.73 --> 00:05:26.33 +le plus populaire, pour être ignoré + +103 +00:05:26.33 --> 00:05:28.69 +ami, pas un ami. + +104 +00:05:28.69 --> 00:05:31.03 +Mais nous pouvons faire mieux. + +105 +00:05:31.03 --> 00:05:33.26 +Nous pouvons nous permettre d'être généreux dans notre conception + +106 +00:05:33.26 --> 00:05:35.39 +nous avons moins d'excuses à exclure. + +107 +00:05:35.39 --> 00:05:37.56 +Nous pouvons être fidèles à notre diversité. + +108 +00:05:37.56 --> 00:05:43.06 +Peut-être maintenant, nous pouvons trouver un moyen de faire de la place pour nous tous. + diff --git a/tests/html/TestVideo.mp4 b/tests/html/TestVideo.mp4 new file mode 100644 index 0000000..4605399 Binary files /dev/null and b/tests/html/TestVideo.mp4 differ diff --git a/tests/html/VideoPlayerCaptionLoader-test.html b/tests/html/VideoPlayerCaptionLoader-test.html deleted file mode 100644 index 00ddefd..0000000 --- a/tests/html/VideoPlayerCaptionLoader-test.html +++ /dev/null @@ -1,35 +0,0 @@ - - - - - Video Player Caption Loader Test Suite - - - - - - - - - - - - - - - - - -

Video Player Caption Loader Test Suite

-

-
-

-
    -
    - -
    - -
    - - - diff --git a/tests/html/VideoPlayerControls-test.html b/tests/html/VideoPlayerControls-test.html index b749c0f..b895a9a 100644 --- a/tests/html/VideoPlayerControls-test.html +++ b/tests/html/VideoPlayerControls-test.html @@ -7,7 +7,7 @@ - + @@ -43,21 +43,23 @@

    -
    - -
      -
    • - - -
    • -
    -
    +
      +
    • +
    • +
    - +
    + +
      +
    • +
    • +
    +
    + diff --git a/tests/html/VideoPlayerHTML5Captionator-test.html b/tests/html/VideoPlayerHTML5Captionator-test.html new file mode 100644 index 0000000..60b4d67 --- /dev/null +++ b/tests/html/VideoPlayerHTML5Captionator-test.html @@ -0,0 +1,61 @@ + + + + + Video Player HTML5 Captionator Test Suite + + + + + + + + + + + + + + + + + + + + + + + + + +

    Video Player HTML5 Captionator Test Suite

    +

    +
    +

    +
      +
      + +
      + +
      + +
      + +
      + +
      + +
      + +
      + +
      + +
      + +
      + +
      + + + diff --git a/tests/js/VideoPlayerCaptionLoaderTests.js b/tests/js/VideoPlayerCaptionLoaderTests.js index 1f76105..6df251e 100644 --- a/tests/js/VideoPlayerCaptionLoaderTests.js +++ b/tests/js/VideoPlayerCaptionLoaderTests.js @@ -25,14 +25,12 @@ https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt var options = { model: { captions: { - sources: { - english: { - src: "TestCaption.en.json", - type: "JSONcc" - } - }, - choices: ["english"], - selection: "english", + list: [{ + label: "English", + src: "TestCaption.en.json", + type: "JSONcc" + }], + currentTrack: 0, conversionServiceUrl: "/videoPlayer/conversion_service/index.php", maxNumber: 3, track: undefined diff --git a/tests/js/VideoPlayerControlsTests.js b/tests/js/VideoPlayerControlsTests.js index 15d18fe..a870058 100644 --- a/tests/js/VideoPlayerControlsTests.js +++ b/tests/js/VideoPlayerControlsTests.js @@ -21,13 +21,19 @@ fluid.registerNamespace("fluid.tests"); (function ($) { $(document).ready(function () { + // TODO: The various "fluid.tests.initXXX" functions could probably be refactored to reduce duplication + + var videoPlayerControlsTests = new jqUnit.TestCase("Video Player Controls Tests"); + fluid.tests.toggleButtonDefaults = fluid.defaults("fluid.videoPlayer.controllers.toggleButton"); - fluid.tests.pressEventHandler = function () { + fluid.tests.onPressEventHandler = function () { + expect(1); jqUnit.assertTrue("The onPress event should fire", true); }; fluid.tests.getTooltipCheckString = function (jEl, expectedText) { + expect(1); jEl.mouseover(); var tooltip = $("#" + jEl.attr("aria-describedby")); jqUnit.assertEquals("Tooltip should contain " + expectedText + " initially", expectedText, tooltip.text()); @@ -35,15 +41,13 @@ fluid.registerNamespace("fluid.tests"); }; var baseVideoPlayerOpts = { - model: { - video: { - sources: [ - { - src: "http://royalgiz.fr/videoplayer/video/Richard.Stallman.mp4", - type: "video/mp4" - } - ] - } + video: { + sources: [ + { + src: "http://royalgiz.fr/videoplayer/video/Richard.Stallman.mp4", + type: "video/mp4" + } + ] }, templates: { videoPlayer: { @@ -60,8 +64,6 @@ fluid.registerNamespace("fluid.tests"); return fluid.videoPlayer("#videoPlayer", opts); }; - var videoPlayerControlsTests = new jqUnit.TestCase("Video Player Controls Tests"); - var baseToggleButtonOpts = { selectors: { button: ".test-toggle-button" @@ -74,7 +76,7 @@ fluid.registerNamespace("fluid.tests"); }; function verifyBasicButtonFunctions(buttonEl, name, clickToggles, tooltipReleased, tooltipPressed, stylePressed) { - // 7 assertions + expect(6); jqUnit.assertEquals("There should be exactly one " + name + " button", 1, buttonEl.length); jqUnit.assertEquals(name + " button should have role of 'button'", "button", buttonEl.attr("role")); jqUnit.assertEquals(name + " button should have aria-pressed of 'false' initially", "false", buttonEl.attr("aria-pressed")); @@ -88,7 +90,7 @@ fluid.registerNamespace("fluid.tests"); // TODO: When captions controls are refactored (FLUID-4589), this 'if' might go away // (since toggle button might always toggle) if (clickToggles) { - // 6 assertions + expect(6); buttonEl.click(); jqUnit.assertEquals("After click, " + name + " button should have aria-pressed of 'true'", "true", buttonEl.attr("aria-pressed")); jqUnit.assertTrue("While pressed, " + name + " button should have the 'pressed' style", buttonEl.hasClass(stylePressed)); @@ -104,11 +106,10 @@ fluid.registerNamespace("fluid.tests"); } videoPlayerControlsTests.asyncTest("Toggle button, default functionality", function () { - expect(18); - + expect(3); var testComponent = fluid.tests.initToggleButton({ listeners: { - onPress: fluid.tests.pressEventHandler, + onPress: fluid.tests.onPressEventHandler, onReady: function (that) { var toggleButton = that.locate("button"); @@ -156,7 +157,7 @@ fluid.registerNamespace("fluid.tests"); }); videoPlayerControlsTests.asyncTest("Toggle button, prevent the toggle", function () { - expect(4); + expect(3); var testComponent = fluid.tests.initToggleButton({ listeners: { onPress: function () { @@ -180,7 +181,7 @@ fluid.registerNamespace("fluid.tests"); }); videoPlayerControlsTests.asyncTest("Toggle button, overriding strings", function () { - expect(2); + expect(1); var testStrings = { press: "press me", release: "release me" @@ -202,8 +203,197 @@ fluid.registerNamespace("fluid.tests"); }); }); + var baseMenuOpts = { + model: { + languages: [{ + srclang: "klingon", + label: "Klingo√±" + }, { + srclang: "esperanto", + label: "Esp√©ranto" + }, { + srclang: "lolspeak", + label: "LOLspeak" + }, { + srclang: "elvish", + label: "Elv√Æsh" + }], + activeLanguages: [0], + showLanguage: false + } + }; + + fluid.tests.initMenu = function (testOpts) { + var opts = fluid.copy(baseMenuOpts); + $.extend(true, opts, testOpts); + return fluid.videoPlayer.controllers.languageMenu("#basic-menu-test", opts); + }; + + var verifyActivation = function (actionString, that, activatedIndex) { + expect(5); + var menuItems = that.locate("menuItem"); + jqUnit.assertEquals(actionString + " updates the active language", activatedIndex, that.model.activeLanguages[0]); + jqUnit.assertTrue(actionString + " adds the 'active' style to the item", $(menuItems[activatedIndex]).hasClass(that.options.styles.active)); + jqUnit.assertEquals("Only one item is active at a time", 1, $(that.options.selectors.menuItem + "." + that.options.styles.active).length); + jqUnit.assertFalse(actionString + " removes 'selected' style from all items", menuItems.hasClass(that.options.styles.selected)); + jqUnit.notVisible(actionString + " hides the menu", that.container); + }; + + var verifySelection = function (actionString, that, selectedIndex, activeIndex) { + expect(4); + var langList = that.locate("menuItem"); + jqUnit.isVisible(actionString + " shows menu", that.container); + jqUnit.assertTrue(actionString + " adds 'selected' style to the language", $(langList[selectedIndex]).hasClass(that.options.styles.selected)); + jqUnit.assertEquals("Only one item is selected at a time", 1, $(that.options.selectors.menuItem + "." + that.options.styles.selected).length); + jqUnit.assertTrue(actionString + " leaves 'active' style on the active language", $(langList[activeIndex]).hasClass(that.options.styles.active)); + }; + + videoPlayerControlsTests.asyncTest("Language Menu: Default configuration", function () { + var numLangs = baseMenuOpts.model.languages.length; + expect(9); + var testMenu = fluid.tests.initMenu({ + listeners: { + onReady: function (that) { + var langList = that.locate("language"); + jqUnit.assertEquals("Menu should have correct number of languages listed", numLangs, langList.length); + jqUnit.exists("Menu should have also have the 'show/hide' option", that.locate("showHide")); + jqUnit.assertFalse("Initially, nothing should have 'selected' style", langList.hasClass(that.options.styles.selected)); + jqUnit.assertTrue("Initially, the 'active language' have the 'active' style", $(langList[that.model.activeLanguages[0]]).hasClass(that.options.styles.active)); + jqUnit.assertEquals("Initially, 'show/hide' option should have the correct text", that.options.strings.showLanguage, that.locate("showHide").text()); + + jqUnit.notVisible("The menu should be hidden by default", that.container); + that.show(); + jqUnit.isVisible("show() shows the menu", that.container); + that.hide(); + jqUnit.notVisible("hide() hides the menu", that.container); + + that.container.fluid("selectable.select", that.locate("showHide")); + verifySelection("Selecting the 'show/hide' option", that, numLangs, 0); + + that.container.fluid("selectable.select", langList[numLangs - 1]); + verifySelection("Selecting a language", that, numLangs - 1, 0); + + that.applier.modelChanged.addListener("showLanguage", function () { + jqUnit.assertEquals("Activating a new language changes the 'show/hide' option text", that.options.strings.hideLanguage, that.locate("showHide").text()); + that.applier.modelChanged.removeListener("showLanguageChecker"); + }, "showLanguageChecker"); + that.activate(1); + verifyActivation("Activating a new language", that, 1); + + that.show(); + $(that.locate("language")[2]).click(); + verifyActivation("Clicking a language", that, 2); + + // double-check notes on interaction between keyboard selection and hover, and add tests + start(); + } + } + }); + }); + + videoPlayerControlsTests.asyncTest("Language Menu: Custom 'show/hide' option strings", function () { + var numLangs = baseMenuOpts.model.languages.length; + expect(2); + var testStrings = { + showLanguage: "No one is talking", + hideLanguage: "Please stop all the talking!" + }; + var testMenu = fluid.tests.initMenu({ + strings: testStrings, + listeners: { + onReady: function (that) { + var langList = that.locate("language"); + jqUnit.assertEquals("Initially, 'show/hide' option should have the correct custom text", testStrings.showLanguage, that.locate("showHide").text()); + that.activate(1); + jqUnit.assertEquals("Activating an item changes the 'show/hide' option text to the custom text", testStrings.hideLanguage, that.locate("showHide").text()); + + start(); + } + } + }); + }); + + var baseLanguageControlsOpts = { + languages: [{ + srclang: "klingon", + label: "Klingoñ" + }, { + srclang: "esperanto", + label: "Espéranto" + }, { + srclang: "lolspeak", + label: "LOLspeak" + }, { + srclang: "elvish", + label: "Elvîsh" + }], + model: { + currentTracks: { + captions: [0] + }, + displayCaptions: false + }, + currentLanguagePath: "currentTracks.captions", + showHidePath: "displayCaptions" + }; + + fluid.tests.initLangControls = function (testOpts) { + var opts = fluid.copy(baseLanguageControlsOpts); + $.extend(true, opts, testOpts); + return fluid.videoPlayer.controllers.languageControls("#basic-languageControls-test", opts); + }; + + // TODO: These tests could possibly be refactored to reduce duplication + videoPlayerControlsTests.asyncTest("Language Controls: default functionality", function () { + var numLangs = baseLanguageControlsOpts.languages.length; + var testControls = fluid.tests.initLangControls({ + listeners: { + onReady: { + listener: function (that) { + var langList = that.menu.locate("language"); + var showHideOption = that.menu.locate("showHide"); + jqUnit.assertEquals("Menu should have correct number of languages listed", numLangs, langList.length); + jqUnit.notVisible("Menu should not be visible initially", that.menu.container); + jqUnit.assertFalse("'show language' model flag should be false", fluid.get(that.model, that.options.showHidePath)); + jqUnit.assertEquals("'show language' text should be correct", that.options.strings.showLanguage, showHideOption.text()); + jqUnit.assertFalse("Buttons state should be released", that.button.model.pressed); + + var button = that.locate("button"); + button[0].click(); + jqUnit.isVisible("Clicking the button should show menu", that.menu.container); + jqUnit.assertFalse("Buttons state should still be released", that.button.model.pressed); + button[0].click(); + jqUnit.notVisible("Clicking the button again should hide menu again", that.menu.container); + + button[0].click(); + langList[1].click(); + jqUnit.notVisible("Show the menu, click a language, menu should hide", that.menu.container); + jqUnit.assertEquals("'current langauge' should be udated", 1, fluid.get(that.model, that.options.currentLanguagePath)[0]); + jqUnit.assertTrue("'show language' model flag should be true", fluid.get(that.model, that.options.showHidePath)); + jqUnit.assertEquals("'show language' text should be updated", that.options.strings.hideLanguage, showHideOption.text()); + jqUnit.assertTrue("Button state should be pressed", fluid.get(that.button.model, baseLanguageControlsOpts.showHidePath)); + + button[0].click(); + showHideOption[0].click(); + jqUnit.assertFalse("Show the menu, click the show/hide option, 'show language' model flag should be false", fluid.get(that.model, that.options.showHidePath)); + jqUnit.assertEquals("'show language' text should be updated", that.options.strings.showLanguage, showHideOption.text()); + jqUnit.assertFalse("Button state should be released", fluid.get(that.button.model, baseLanguageControlsOpts.showHidePath)); + jqUnit.assertEquals("'current langauge' should be not be changed", 1, fluid.get(that.model, that.options.currentLanguagePath)[0]); + + button[0].click(); + showHideOption[0].click(); + jqUnit.assertTrue("Click the show/hide option, 'show language' model flag should be true again", fluid.get(that.model, that.options.showHidePath)); + jqUnit.assertEquals("'show language' text should be updated", that.options.strings.hideLanguage, showHideOption.text()); + jqUnit.assertTrue("Button state should be pressed", fluid.get(that.button.model, baseLanguageControlsOpts.showHidePath)); + + start(); + } + } + } + }); + }); + videoPlayerControlsTests.asyncTest("Play button", function () { - expect(13); var testPlayer = fluid.tests.initVideoPlayer({ listeners: { onControllersReady: function (that) { @@ -225,7 +415,7 @@ fluid.registerNamespace("fluid.tests"); }; videoPlayerControlsTests.asyncTest("Volume controls", function () { - expect(17); + expect(4); var testVolumeControls = fluid.tests.initVolumeControls({ listeners: { onReady: function (that) { @@ -269,131 +459,8 @@ fluid.registerNamespace("fluid.tests"); }); }); - var baseCaptionOpts = { - model: { - captions: { - sources: { - esperanto: { - src: "Test.esp.json", - type: "JSONcc" - }, - klingon: { - src: "Test.kling.json", - type: "JSONcc" - }, - lolspeak: { - src: "Test.lol.json", - type: "JSONcc" - } - }, - choices: ["esperanto", "klingon", "lolspeak", "none"], - names: ["esperanto", "klingon", "lolspeak", "Captions OFF"], - selection: "none" - } - } - }; - - fluid.tests.initCaptionControls = function (testOpts) { - var opts = fluid.copy(baseCaptionOpts); - $.extend(true, opts, testOpts); - return fluid.videoPlayer.controllers.captionControls("#basic-caption-controls-test", opts); - }; - - videoPlayerControlsTests.asyncTest("Caption controls", function () { - expect(31); - var numLangs = Object.keys(baseCaptionOpts.model.captions.sources).length + 1; - var testCaptionControls = fluid.tests.initCaptionControls({ - listeners: { - onReady: function (that) { - var captionsButton = that.locate("button"); - var languageRadioButtons = that.locate("languageButton"); - var languageLabels = that.locate("languageLabel"); - var languageList = that.locate("languageList"); - - verifyBasicButtonFunctions(captionsButton, "Captions", false, "Captions", "Captions", that.captionButton.options.styles.pressed); - - jqUnit.assertEquals("'none' option should say '" + that.options.strings.captionsOff + "' initially", that.options.strings.captionsOff, languageLabels[numLangs - 1].textContent); - jqUnit.assertTrue("'none' option should have the 'selected' style", $(languageLabels[numLangs - 1]).hasClass(that.options.styles.selected)); - jqUnit.assertEquals("Only one label should have selected style", 1, $("." + that.options.styles.selected).length); - - jqUnit.assertEquals("There should be " + numLangs + " languages", numLangs, languageRadioButtons.length); - jqUnit.notVisible("The list of languages should not be visible initially", languageList); - - captionsButton.click(); - jqUnit.isVisible("When caption button clicked, the list of languages should show", languageList); - jqUnit.assertEquals("While no caption selected, Captions button should still have aria-pressed of 'false'", "false", captionsButton.attr("aria-pressed")); - captionsButton.click(); - jqUnit.notVisible("When caption button clicked again, the list of languages should hide", languageList); - jqUnit.assertEquals("While no caption selected, Captions button should still have aria-pressed of 'false'", "false", captionsButton.attr("aria-pressed")); - - captionsButton.click(); - jqUnit.assertEquals("Initially, 'none' should be selected", "none", that.model.captions.selection); - languageRadioButtons[1].click(); - jqUnit.assertEquals("After clicking a radio button, another language should be selected", "klingon", that.model.captions.selection); - jqUnit.assertTrue("After selecting a language, captions button should have active style", captionsButton.hasClass(that.captionButton.options.styles.pressed)); - jqUnit.assertEquals("After selecting a language, Captions button should have aria-pressed of 'true'", "true", captionsButton.attr("aria-pressed")); - jqUnit.assertEquals("After selecting a language, 'none' option should say '" + that.options.strings.turnCaptionsOff + "'", that.options.strings.turnCaptionsOff, languageLabels[numLangs - 1].textContent); - jqUnit.assertTrue("Selected option should have the 'selected' style", $(languageLabels[1]).hasClass(that.options.styles.selected)); - jqUnit.assertEquals("Only one label should have selected style", 1, $("." + that.options.styles.selected).length); - - languageRadioButtons[1].click(); - jqUnit.assertTrue("Selected the same option should not change the 'selected' style", $(languageLabels[1]).hasClass(that.options.styles.selected)); - jqUnit.assertEquals("Only one label should have selected style", 1, $("." + that.options.styles.selected).length); - - languageRadioButtons[numLangs-1].click(); - jqUnit.assertEquals("After clicking last radio button (i.e. captions off), 'none' should be selected", "none", that.model.captions.selection); - jqUnit.assertFalse("After turning captions off, captions button should not have active style", captionsButton.hasClass(that.captionButton.options.styles.pressed)); - jqUnit.assertEquals("After turning captions off, Captions button should have aria-pressed of 'false'", "false", captionsButton.attr("aria-pressed")); - jqUnit.assertEquals("After turning captions off, 'none' option should say '" + that.options.strings.captionsOff + "'", that.options.strings.captionsOff, languageLabels[numLangs - 1].textContent); - jqUnit.assertTrue("After turning captions off, 'none' option should have the 'selected' style", $(languageLabels[numLangs - 1]).hasClass(that.options.styles.selected)); - jqUnit.assertEquals("Only one label should have selected style", 1, $("." + that.options.styles.selected).length); - - start(); - } - } - }); - }); - - videoPlayerControlsTests.asyncTest("Caption controls integration (some tests fail: will be addressed with FLUID-4589)", function () { -// TODO: this is a workaround for FLUID-4592 -// expect(8); - expect(6); - var captionOpts = fluid.copy(baseCaptionOpts); - var numLangs = Object.keys(baseCaptionOpts.model.captions.sources).length + 1; - $.extend(true, captionOpts, { - listeners: { - onReady: function (that) { - var captionButton = that.controllers.captionControls.locate("button"); - var languageRadioButtons = that.controllers.captionControls.locate("languageButton"); - var languageList = that.controllers.captionControls.locate("languageList"); - var captionArea = that.controllers.captionControls.locate("captionArea"); - -// TODO: this is a workaround for FLUID-4592: a default caption *must* be loaded -// for the intervalEventsConductor to be created -// jqUnit.assertEquals("Initially, captions should not be showing", "none", that.model.captions.selection); -// jqUnit.notVisible("The caption area should be hidden initially", captionArea); - jqUnit.notVisible("The list of languages should not be visible initially", languageList); - - captionButton.click(); - jqUnit.isVisible("When caption button clicked, the list of languages should show", languageList); - - $(languageRadioButtons[0]).click(); - jqUnit.assertEquals("After clicking a radio button, a caption should be selected", baseCaptionOpts.model.captions.choices[0], that.model.captions.selection); - jqUnit.isVisible("The caption area should show", captionArea); - - $(languageRadioButtons[numLangs-1]).click(); - jqUnit.assertEquals("After clicking the 'none' radio button, no caption should be selected", baseCaptionOpts.model.captions.choices[numLangs-1], that.model.captions.selection); - jqUnit.notVisible("The caption area should hide", captionArea); - - start(); - } - } - }); - var testPlayer = fluid.tests.initVideoPlayer(captionOpts); - }); - videoPlayerControlsTests.asyncTest("Fullscreen button", function () { - expect(16); + expect(3); var testPlayer = fluid.tests.initVideoPlayer({ listeners: { onControllersReady: function (that) { @@ -413,5 +480,6 @@ fluid.registerNamespace("fluid.tests"); }); }); + }); })(jQuery); diff --git a/tests/js/VideoPlayerHTML5CaptionatorTests.js b/tests/js/VideoPlayerHTML5CaptionatorTests.js new file mode 100644 index 0000000..2a63466 --- /dev/null +++ b/tests/js/VideoPlayerHTML5CaptionatorTests.js @@ -0,0 +1,219 @@ +/* +Copyright 2012 OCAD University + +Licensed under the Educational Community License (ECL), Version 2.0 or the New +BSD license. You may not use this file except in compliance with one these +Licenses. + +You may obtain a copy of the ECL 2.0 License and BSD License at +https://github.com/fluid-project/infusion/raw/master/Infusion-LICENSE.txt + + */ + +// Declare dependencies +/*global fluid, jqUnit, expect, jQuery, start*/ + +// JSLint options +/*jslint white: true, funcinvoke: true, undef: true, newcap: true, nomen: true, regexp: true, bitwise: true, browser: true, forin: true, maxerr: 100, indent: 4 */ + +(function ($) { + $(document).ready(function () { + fluid.setLogging(false); // disable it not to mess up with FireBug in FF + + // containers. A separate one per test + var container = [".videoPlayer0", ".videoPlayer1", ".videoPlayer2", ".videoPlayer3", ".videoPlayer4"]; + + // selector to find if the captionator div is present on the webpage + var captionatorSelector = ".captionator-cue-canvas"; + + var videoPlayerCaptionatorTests = new jqUnit.TestCase("Video Player HTML5 Captionator Test Suite"); + + var testOptionsNoCaptions = { + video: { + sources: [ + { + src: "TestVideo.mp4", + type: "video/mp4" + } + ] + }, + templates: { + videoPlayer: { + href: "../../html/videoPlayer_template.html" + } + } + }; + + var testOptionsNoCurrentTrack = {}; + fluid.merge(null, testOptionsNoCurrentTrack, testOptionsNoCaptions); + fluid.merge(null, testOptionsNoCurrentTrack, { + video: { + captions: [ + { + src: "ReorganizeFuture.en.vtt", + type: "text/vtt", + srclang: "en", + label: "English Subtitles", + kind: "subtitles" + }, + { + src: "ReorganizeFuture.fr.vtt", + type: "text/vtt", + srclang: "fr", + label: "French Subtitles", + kind: "subtitles" + } + ] + } + }); + + var testOptionsFull = {}; + fluid.merge(null, testOptionsFull, testOptionsNoCurrentTrack); + fluid.merge(null, testOptionsFull, { + model: { + currentTracks: { + captions: [0] + }, + displayCaptions: true + } + }); + + // videoPlayer creation + var initVideoPlayer = function (container, options, callback) { + options = options || {}; + + fluid.merge(null, options, { + listeners: { + onReady: function (that) { + callback(that); + } + } + }); + + return fluid.videoPlayer(container, options); + } + + // Function to set or unset HTML5 test environment + var setupEnvironment = function (withHtml5) { + if (withHtml5) { + fluid.staticEnvironment.browserHtml5 = fluid.typeTag("fluid.browser.html5"); + } else { + fluid.staticEnvironment.browserHtml5 = undefined; + } + } + + // A template function which checks captionator initalization depending on different provided options and config + var testInit = function (config) { + expect(2); + + setupEnvironment(config.isHTML5); + + config.testComponentFunc = config.hasComponent ? jqUnit.assertNotUndefined : jqUnit.assertUndefined; + config.componentStr = config.hasComponent ? "html5Captionator has been instantiated" + : "html5Captionator has NOT been instantiated"; + config.domStr = config.hasDOMElement ? "Captionator DIV is present in the DOM" + : "Captionator DIV is NOT present in the DOM"; + + initVideoPlayer(container[config.testIndex], config.options, function (videoPlayer) { + config.testComponentFunc(config.componentStr, videoPlayer.html5Captionator); + jqUnit.assertEquals(config.domStr, (config.hasDOMElement)?1:0, $(captionatorSelector).length); + start(); + }); + } + + + videoPlayerCaptionatorTests.asyncTest("NO HTML5: html5Captionator was not initialized", function () { + testInit({ + testIndex: 0, + options: testOptionsFull, + isHTML5: false, + hasComponent: false, + hasDOMElement: false + }); + }); + + + videoPlayerCaptionatorTests.asyncTest("HTML5: html5Captionator was initialized but without tracks", function () { + testInit({ + testIndex: 1, + options: testOptionsNoCaptions, + isHTML5: true, + hasComponent: true, + hasDOMElement: false + }); + }); + + + videoPlayerCaptionatorTests.asyncTest("HTML5: html5Captionator was initialized", function () { + testInit({ + testIndex: 2, + options: testOptionsFull, + isHTML5: true, + hasComponent: true, + hasDOMElement: true + }); + }); + + + videoPlayerCaptionatorTests.asyncTest("html5Captionator changing tracks and more", function () { + var testIndex = 3; + + expect(7); + + setupEnvironment(true); + + initVideoPlayer(container[testIndex], testOptionsFull, function (videoPlayer) { + + jqUnit.assertNotUndefined("html5Captionator has been instantiated", videoPlayer.html5Captionator); + + var tracks = videoPlayer.html5Captionator.container[0].tracks; + + jqUnit.assertEquals("English subtitles are showing", captionator.TextTrack.SHOWING, tracks[0].mode); + jqUnit.assertEquals("French subtitles are NOT showing", captionator.TextTrack.OFF, tracks[1].mode); + + fluid.videoPlayer.html5Captionator.showCurrentTrack([1],tracks,videoPlayer.html5Captionator.options.captions); + + jqUnit.assertEquals("English subtitles are NOT showing", captionator.TextTrack.OFF, tracks[0].mode); + jqUnit.assertEquals("French subtitles are showing", captionator.TextTrack.SHOWING, tracks[1].mode); + + fluid.videoPlayer.html5Captionator.hideAllTracks(tracks); + + jqUnit.assertEquals("English subtitles are NOT showing", captionator.TextTrack.OFF, tracks[0].mode); + jqUnit.assertEquals("French subtitles are NOT showing", captionator.TextTrack.OFF, tracks[1].mode); + + start(); + }); + }); + + + videoPlayerCaptionatorTests.asyncTest("html5Captionator without currentTrack", function () { + var testIndex = 4; + + expect(6); + + setupEnvironment(true); + + initVideoPlayer(container[testIndex], testOptionsNoCurrentTrack, function (videoPlayer) { + + jqUnit.assertUndefined("currentTracks is empty in the model", testOptionsNoCurrentTrack.currentTracks); + + jqUnit.assertNotUndefined("html5Captionator has been instantiated", videoPlayer.html5Captionator); + + var tracks = videoPlayer.html5Captionator.container[0].tracks; + + jqUnit.assertEquals("Current track is NOT empty in the html5Captionator model it has only one element in it", + 1, videoPlayer.html5Captionator.model.currentTracks.captions.length); + + jqUnit.assertEquals("And this element is the index for the first element in the array of captions", + 0, videoPlayer.html5Captionator.model.currentTracks.captions[0]); + + jqUnit.assertEquals("English subtitles should default to be showing", captionator.TextTrack.SHOWING, tracks[0].mode); + jqUnit.assertEquals("French subtitles are NOT showing", captionator.TextTrack.OFF, tracks[1].mode); + + start(); + }); + }); + + + }); +})(jQuery);