diff --git a/assets/examples/sample-audio.mp3 b/assets/examples/sample-audio.mp3 new file mode 100644 index 00000000000..318929a9bb2 Binary files /dev/null and b/assets/examples/sample-audio.mp3 differ diff --git a/py/examples/audio_annotator.py b/py/examples/audio_annotator.py new file mode 100644 index 00000000000..5f26aeb73f4 --- /dev/null +++ b/py/examples/audio_annotator.py @@ -0,0 +1,35 @@ +# Form / Audio Annotator +# Use when you need to annotate audio. +# #form #annotator #audio +# --- +from h2o_wave import main, app, Q, ui +import os + + +@app('/demo') +async def serve(q: Q): + # Upload the audio file to Wave server first. + if not q.app.initialized: + example_dir = os.path.dirname(os.path.realpath(__file__)) + q.app.uploaded_mp3, = await q.site.upload([os.path.join(example_dir, 'audio_annotator_sample.mp3')]) + q.app.initialized = True + + if q.args.annotator is not None: + q.page['example'].items = [ + ui.text(f'annotator={q.args.annotator}'), + ui.button(name='back', label='Back', primary=True), + ] + else: + q.page['example'] = ui.form_card(box='1 1 7 -1', items=[ + ui.audio_annotator( + name='annotator', + title='Drag to annotate', + path=q.app.uploaded_mp3, + tags=[ + ui.audio_annotator_tag(name='f', label='Flute', color='$blue'), + ui.audio_annotator_tag(name='d', label='Drum', color='$brown'), + ], + ), + ui.button(name='submit', label='Submit', primary=True) + ]) + await q.page.save() diff --git a/py/examples/audio_annotator_sample.mp3 b/py/examples/audio_annotator_sample.mp3 new file mode 100644 index 00000000000..a28b3550e85 Binary files /dev/null and b/py/examples/audio_annotator_sample.mp3 differ diff --git a/py/examples/tour.conf b/py/examples/tour.conf index c5d2ab3f8ad..c9f8eed7566 100644 --- a/py/examples/tour.conf +++ b/py/examples/tour.conf @@ -111,6 +111,7 @@ image_popup.py image_annotator.py image_annotator_events_click.py image_annotator_events_tool_change.py +audio_annotator.py inline.py file_stream.py frame.py diff --git a/py/h2o_lightwave/h2o_lightwave/types.py b/py/h2o_lightwave/h2o_lightwave/types.py index bb64323923b..23b57b310c8 100644 --- a/py/h2o_lightwave/h2o_lightwave/types.py +++ b/py/h2o_lightwave/h2o_lightwave/types.py @@ -6736,7 +6736,7 @@ def __init__( self.allowed_shapes = allowed_shapes """List of allowed shapes. Available values are 'rect' and 'polygon'. If not set, all shapes are available by default.""" self.events = events - """The events to capture on this image annotator. One of `click` or `tool_change`.""" + """The events to capture on this image annotator. One of `click` | `tool_change`.""" def dump(self) -> Dict: """Returns the contents of this object as a dict.""" @@ -6804,6 +6804,185 @@ def load(__d: Dict) -> 'ImageAnnotator': ) +class AudioAnnotatorTag: + """Create a unique tag type for use in an audio annotator. + """ + def __init__( + self, + name: str, + label: str, + color: str, + ): + _guard_scalar('AudioAnnotatorTag.name', name, (str,), True, False, False) + _guard_scalar('AudioAnnotatorTag.label', label, (str,), False, False, False) + _guard_scalar('AudioAnnotatorTag.color', color, (str,), False, False, False) + self.name = name + """An identifying name for this tag.""" + self.label = label + """Text to be displayed for the annotation.""" + self.color = color + """Hex or RGB color string to be used as the background color.""" + + def dump(self) -> Dict: + """Returns the contents of this object as a dict.""" + _guard_scalar('AudioAnnotatorTag.name', self.name, (str,), True, False, False) + _guard_scalar('AudioAnnotatorTag.label', self.label, (str,), False, False, False) + _guard_scalar('AudioAnnotatorTag.color', self.color, (str,), False, False, False) + return _dump( + name=self.name, + label=self.label, + color=self.color, + ) + + @staticmethod + def load(__d: Dict) -> 'AudioAnnotatorTag': + """Creates an instance of this class using the contents of a dict.""" + __d_name: Any = __d.get('name') + _guard_scalar('AudioAnnotatorTag.name', __d_name, (str,), True, False, False) + __d_label: Any = __d.get('label') + _guard_scalar('AudioAnnotatorTag.label', __d_label, (str,), False, False, False) + __d_color: Any = __d.get('color') + _guard_scalar('AudioAnnotatorTag.color', __d_color, (str,), False, False, False) + name: str = __d_name + label: str = __d_label + color: str = __d_color + return AudioAnnotatorTag( + name, + label, + color, + ) + + +class AudioAnnotatorItem: + """Create an annotator item with initial selected tags or no tags. + """ + def __init__( + self, + start: float, + end: float, + tag: str, + ): + _guard_scalar('AudioAnnotatorItem.start', start, (float, int,), False, False, False) + _guard_scalar('AudioAnnotatorItem.end', end, (float, int,), False, False, False) + _guard_scalar('AudioAnnotatorItem.tag', tag, (str,), False, False, False) + self.start = start + """The start of the audio annotation in seconds.""" + self.end = end + """The end of the audio annotation in seconds.""" + self.tag = tag + """The `name` of the audio annotator tag to refer to for the `label` and `color` of this item.""" + + def dump(self) -> Dict: + """Returns the contents of this object as a dict.""" + _guard_scalar('AudioAnnotatorItem.start', self.start, (float, int,), False, False, False) + _guard_scalar('AudioAnnotatorItem.end', self.end, (float, int,), False, False, False) + _guard_scalar('AudioAnnotatorItem.tag', self.tag, (str,), False, False, False) + return _dump( + start=self.start, + end=self.end, + tag=self.tag, + ) + + @staticmethod + def load(__d: Dict) -> 'AudioAnnotatorItem': + """Creates an instance of this class using the contents of a dict.""" + __d_start: Any = __d.get('start') + _guard_scalar('AudioAnnotatorItem.start', __d_start, (float, int,), False, False, False) + __d_end: Any = __d.get('end') + _guard_scalar('AudioAnnotatorItem.end', __d_end, (float, int,), False, False, False) + __d_tag: Any = __d.get('tag') + _guard_scalar('AudioAnnotatorItem.tag', __d_tag, (str,), False, False, False) + start: float = __d_start + end: float = __d_end + tag: str = __d_tag + return AudioAnnotatorItem( + start, + end, + tag, + ) + + +class AudioAnnotator: + """Create an audio annotator component. + + This component allows annotating and labeling parts of audio file. + """ + def __init__( + self, + name: str, + title: str, + path: str, + tags: List[AudioAnnotatorTag], + items: Optional[List[AudioAnnotatorItem]] = None, + trigger: Optional[bool] = None, + ): + _guard_scalar('AudioAnnotator.name', name, (str,), True, False, False) + _guard_scalar('AudioAnnotator.title', title, (str,), False, False, False) + _guard_scalar('AudioAnnotator.path', path, (str,), False, False, False) + _guard_vector('AudioAnnotator.tags', tags, (AudioAnnotatorTag,), False, False, False) + _guard_vector('AudioAnnotator.items', items, (AudioAnnotatorItem,), False, True, False) + _guard_scalar('AudioAnnotator.trigger', trigger, (bool,), False, True, False) + self.name = name + """An identifying name for this component.""" + self.title = title + """The audio annotator's title.""" + self.path = path + """The path to the audio file. Use mp3 or wav formats to achieve the best cross-browser support. See https://caniuse.com/?search=audio%20format for other formats.""" + self.tags = tags + """The master list of tags that can be used for annotations.""" + self.items = items + """Annotations to display on the image, if any.""" + self.trigger = trigger + """True if the form should be submitted as soon as an annotation is made.""" + + def dump(self) -> Dict: + """Returns the contents of this object as a dict.""" + _guard_scalar('AudioAnnotator.name', self.name, (str,), True, False, False) + _guard_scalar('AudioAnnotator.title', self.title, (str,), False, False, False) + _guard_scalar('AudioAnnotator.path', self.path, (str,), False, False, False) + _guard_vector('AudioAnnotator.tags', self.tags, (AudioAnnotatorTag,), False, False, False) + _guard_vector('AudioAnnotator.items', self.items, (AudioAnnotatorItem,), False, True, False) + _guard_scalar('AudioAnnotator.trigger', self.trigger, (bool,), False, True, False) + return _dump( + name=self.name, + title=self.title, + path=self.path, + tags=[__e.dump() for __e in self.tags], + items=None if self.items is None else [__e.dump() for __e in self.items], + trigger=self.trigger, + ) + + @staticmethod + def load(__d: Dict) -> 'AudioAnnotator': + """Creates an instance of this class using the contents of a dict.""" + __d_name: Any = __d.get('name') + _guard_scalar('AudioAnnotator.name', __d_name, (str,), True, False, False) + __d_title: Any = __d.get('title') + _guard_scalar('AudioAnnotator.title', __d_title, (str,), False, False, False) + __d_path: Any = __d.get('path') + _guard_scalar('AudioAnnotator.path', __d_path, (str,), False, False, False) + __d_tags: Any = __d.get('tags') + _guard_vector('AudioAnnotator.tags', __d_tags, (dict,), False, False, False) + __d_items: Any = __d.get('items') + _guard_vector('AudioAnnotator.items', __d_items, (dict,), False, True, False) + __d_trigger: Any = __d.get('trigger') + _guard_scalar('AudioAnnotator.trigger', __d_trigger, (bool,), False, True, False) + name: str = __d_name + title: str = __d_title + path: str = __d_path + tags: List[AudioAnnotatorTag] = [AudioAnnotatorTag.load(__e) for __e in __d_tags] + items: Optional[List[AudioAnnotatorItem]] = None if __d_items is None else [AudioAnnotatorItem.load(__e) for __e in __d_items] + trigger: Optional[bool] = __d_trigger + return AudioAnnotator( + name, + title, + path, + tags, + items, + trigger, + ) + + class Facepile: """A face pile displays a list of personas. Each circle represents a person and contains their image or initials. Often this control is used when sharing who has access to a specific view or file. @@ -7233,6 +7412,7 @@ def __init__( persona: Optional[Persona] = None, text_annotator: Optional[TextAnnotator] = None, image_annotator: Optional[ImageAnnotator] = None, + audio_annotator: Optional[AudioAnnotator] = None, facepile: Optional[Facepile] = None, copyable_text: Optional[CopyableText] = None, menu: Optional[Menu] = None, @@ -7284,6 +7464,7 @@ def __init__( _guard_scalar('Component.persona', persona, (Persona,), False, True, False) _guard_scalar('Component.text_annotator', text_annotator, (TextAnnotator,), False, True, False) _guard_scalar('Component.image_annotator', image_annotator, (ImageAnnotator,), False, True, False) + _guard_scalar('Component.audio_annotator', audio_annotator, (AudioAnnotator,), False, True, False) _guard_scalar('Component.facepile', facepile, (Facepile,), False, True, False) _guard_scalar('Component.copyable_text', copyable_text, (CopyableText,), False, True, False) _guard_scalar('Component.menu', menu, (Menu,), False, True, False) @@ -7379,6 +7560,8 @@ def __init__( """Text annotator.""" self.image_annotator = image_annotator """Image annotator.""" + self.audio_annotator = audio_annotator + """Audio annotator.""" self.facepile = facepile """Facepile.""" self.copyable_text = copyable_text @@ -7437,6 +7620,7 @@ def dump(self) -> Dict: _guard_scalar('Component.persona', self.persona, (Persona,), False, True, False) _guard_scalar('Component.text_annotator', self.text_annotator, (TextAnnotator,), False, True, False) _guard_scalar('Component.image_annotator', self.image_annotator, (ImageAnnotator,), False, True, False) + _guard_scalar('Component.audio_annotator', self.audio_annotator, (AudioAnnotator,), False, True, False) _guard_scalar('Component.facepile', self.facepile, (Facepile,), False, True, False) _guard_scalar('Component.copyable_text', self.copyable_text, (CopyableText,), False, True, False) _guard_scalar('Component.menu', self.menu, (Menu,), False, True, False) @@ -7488,6 +7672,7 @@ def dump(self) -> Dict: persona=None if self.persona is None else self.persona.dump(), text_annotator=None if self.text_annotator is None else self.text_annotator.dump(), image_annotator=None if self.image_annotator is None else self.image_annotator.dump(), + audio_annotator=None if self.audio_annotator is None else self.audio_annotator.dump(), facepile=None if self.facepile is None else self.facepile.dump(), copyable_text=None if self.copyable_text is None else self.copyable_text.dump(), menu=None if self.menu is None else self.menu.dump(), @@ -7588,6 +7773,8 @@ def load(__d: Dict) -> 'Component': _guard_scalar('Component.text_annotator', __d_text_annotator, (dict,), False, True, False) __d_image_annotator: Any = __d.get('image_annotator') _guard_scalar('Component.image_annotator', __d_image_annotator, (dict,), False, True, False) + __d_audio_annotator: Any = __d.get('audio_annotator') + _guard_scalar('Component.audio_annotator', __d_audio_annotator, (dict,), False, True, False) __d_facepile: Any = __d.get('facepile') _guard_scalar('Component.facepile', __d_facepile, (dict,), False, True, False) __d_copyable_text: Any = __d.get('copyable_text') @@ -7643,6 +7830,7 @@ def load(__d: Dict) -> 'Component': persona: Optional[Persona] = None if __d_persona is None else Persona.load(__d_persona) text_annotator: Optional[TextAnnotator] = None if __d_text_annotator is None else TextAnnotator.load(__d_text_annotator) image_annotator: Optional[ImageAnnotator] = None if __d_image_annotator is None else ImageAnnotator.load(__d_image_annotator) + audio_annotator: Optional[AudioAnnotator] = None if __d_audio_annotator is None else AudioAnnotator.load(__d_audio_annotator) facepile: Optional[Facepile] = None if __d_facepile is None else Facepile.load(__d_facepile) copyable_text: Optional[CopyableText] = None if __d_copyable_text is None else CopyableText.load(__d_copyable_text) menu: Optional[Menu] = None if __d_menu is None else Menu.load(__d_menu) @@ -7694,6 +7882,7 @@ def load(__d: Dict) -> 'Component': persona, text_annotator, image_annotator, + audio_annotator, facepile, copyable_text, menu, diff --git a/py/h2o_lightwave/h2o_lightwave/ui.py b/py/h2o_lightwave/h2o_lightwave/ui.py index 6c3098190c1..39e08ed4dca 100644 --- a/py/h2o_lightwave/h2o_lightwave/ui.py +++ b/py/h2o_lightwave/h2o_lightwave/ui.py @@ -2497,7 +2497,7 @@ def image_annotator( trigger: True if the form should be submitted as soon as an annotation is drawn. image_height: The card’s image height. The actual image size is used by default. allowed_shapes: List of allowed shapes. Available values are 'rect' and 'polygon'. If not set, all shapes are available by default. - events: The events to capture on this image annotator. One of `click` or `tool_change`. + events: The events to capture on this image annotator. One of `click` | `tool_change`. Returns: A `h2o_wave.types.ImageAnnotator` instance. """ @@ -2514,6 +2514,80 @@ def image_annotator( )) +def audio_annotator_tag( + name: str, + label: str, + color: str, +) -> AudioAnnotatorTag: + """Create a unique tag type for use in an audio annotator. + + Args: + name: An identifying name for this tag. + label: Text to be displayed for the annotation. + color: Hex or RGB color string to be used as the background color. + Returns: + A `h2o_wave.types.AudioAnnotatorTag` instance. + """ + return AudioAnnotatorTag( + name, + label, + color, + ) + + +def audio_annotator_item( + start: float, + end: float, + tag: str, +) -> AudioAnnotatorItem: + """Create an annotator item with initial selected tags or no tags. + + Args: + start: The start of the audio annotation in seconds. + end: The end of the audio annotation in seconds. + tag: The `name` of the audio annotator tag to refer to for the `label` and `color` of this item. + Returns: + A `h2o_wave.types.AudioAnnotatorItem` instance. + """ + return AudioAnnotatorItem( + start, + end, + tag, + ) + + +def audio_annotator( + name: str, + title: str, + path: str, + tags: List[AudioAnnotatorTag], + items: Optional[List[AudioAnnotatorItem]] = None, + trigger: Optional[bool] = None, +) -> Component: + """Create an audio annotator component. + + This component allows annotating and labeling parts of audio file. + + Args: + name: An identifying name for this component. + title: The audio annotator's title. + path: The path to the audio file. Use mp3 or wav formats to achieve the best cross-browser support. See https://caniuse.com/?search=audio%20format for other formats. + tags: The master list of tags that can be used for annotations. + items: Annotations to display on the image, if any. + trigger: True if the form should be submitted as soon as an annotation is made. + Returns: + A `h2o_wave.types.AudioAnnotator` instance. + """ + return Component(audio_annotator=AudioAnnotator( + name, + title, + path, + tags, + items, + trigger, + )) + + def facepile( items: List[Component], name: Optional[str] = None, diff --git a/py/h2o_wave/h2o_wave/types.py b/py/h2o_wave/h2o_wave/types.py index bb64323923b..23b57b310c8 100644 --- a/py/h2o_wave/h2o_wave/types.py +++ b/py/h2o_wave/h2o_wave/types.py @@ -6736,7 +6736,7 @@ def __init__( self.allowed_shapes = allowed_shapes """List of allowed shapes. Available values are 'rect' and 'polygon'. If not set, all shapes are available by default.""" self.events = events - """The events to capture on this image annotator. One of `click` or `tool_change`.""" + """The events to capture on this image annotator. One of `click` | `tool_change`.""" def dump(self) -> Dict: """Returns the contents of this object as a dict.""" @@ -6804,6 +6804,185 @@ def load(__d: Dict) -> 'ImageAnnotator': ) +class AudioAnnotatorTag: + """Create a unique tag type for use in an audio annotator. + """ + def __init__( + self, + name: str, + label: str, + color: str, + ): + _guard_scalar('AudioAnnotatorTag.name', name, (str,), True, False, False) + _guard_scalar('AudioAnnotatorTag.label', label, (str,), False, False, False) + _guard_scalar('AudioAnnotatorTag.color', color, (str,), False, False, False) + self.name = name + """An identifying name for this tag.""" + self.label = label + """Text to be displayed for the annotation.""" + self.color = color + """Hex or RGB color string to be used as the background color.""" + + def dump(self) -> Dict: + """Returns the contents of this object as a dict.""" + _guard_scalar('AudioAnnotatorTag.name', self.name, (str,), True, False, False) + _guard_scalar('AudioAnnotatorTag.label', self.label, (str,), False, False, False) + _guard_scalar('AudioAnnotatorTag.color', self.color, (str,), False, False, False) + return _dump( + name=self.name, + label=self.label, + color=self.color, + ) + + @staticmethod + def load(__d: Dict) -> 'AudioAnnotatorTag': + """Creates an instance of this class using the contents of a dict.""" + __d_name: Any = __d.get('name') + _guard_scalar('AudioAnnotatorTag.name', __d_name, (str,), True, False, False) + __d_label: Any = __d.get('label') + _guard_scalar('AudioAnnotatorTag.label', __d_label, (str,), False, False, False) + __d_color: Any = __d.get('color') + _guard_scalar('AudioAnnotatorTag.color', __d_color, (str,), False, False, False) + name: str = __d_name + label: str = __d_label + color: str = __d_color + return AudioAnnotatorTag( + name, + label, + color, + ) + + +class AudioAnnotatorItem: + """Create an annotator item with initial selected tags or no tags. + """ + def __init__( + self, + start: float, + end: float, + tag: str, + ): + _guard_scalar('AudioAnnotatorItem.start', start, (float, int,), False, False, False) + _guard_scalar('AudioAnnotatorItem.end', end, (float, int,), False, False, False) + _guard_scalar('AudioAnnotatorItem.tag', tag, (str,), False, False, False) + self.start = start + """The start of the audio annotation in seconds.""" + self.end = end + """The end of the audio annotation in seconds.""" + self.tag = tag + """The `name` of the audio annotator tag to refer to for the `label` and `color` of this item.""" + + def dump(self) -> Dict: + """Returns the contents of this object as a dict.""" + _guard_scalar('AudioAnnotatorItem.start', self.start, (float, int,), False, False, False) + _guard_scalar('AudioAnnotatorItem.end', self.end, (float, int,), False, False, False) + _guard_scalar('AudioAnnotatorItem.tag', self.tag, (str,), False, False, False) + return _dump( + start=self.start, + end=self.end, + tag=self.tag, + ) + + @staticmethod + def load(__d: Dict) -> 'AudioAnnotatorItem': + """Creates an instance of this class using the contents of a dict.""" + __d_start: Any = __d.get('start') + _guard_scalar('AudioAnnotatorItem.start', __d_start, (float, int,), False, False, False) + __d_end: Any = __d.get('end') + _guard_scalar('AudioAnnotatorItem.end', __d_end, (float, int,), False, False, False) + __d_tag: Any = __d.get('tag') + _guard_scalar('AudioAnnotatorItem.tag', __d_tag, (str,), False, False, False) + start: float = __d_start + end: float = __d_end + tag: str = __d_tag + return AudioAnnotatorItem( + start, + end, + tag, + ) + + +class AudioAnnotator: + """Create an audio annotator component. + + This component allows annotating and labeling parts of audio file. + """ + def __init__( + self, + name: str, + title: str, + path: str, + tags: List[AudioAnnotatorTag], + items: Optional[List[AudioAnnotatorItem]] = None, + trigger: Optional[bool] = None, + ): + _guard_scalar('AudioAnnotator.name', name, (str,), True, False, False) + _guard_scalar('AudioAnnotator.title', title, (str,), False, False, False) + _guard_scalar('AudioAnnotator.path', path, (str,), False, False, False) + _guard_vector('AudioAnnotator.tags', tags, (AudioAnnotatorTag,), False, False, False) + _guard_vector('AudioAnnotator.items', items, (AudioAnnotatorItem,), False, True, False) + _guard_scalar('AudioAnnotator.trigger', trigger, (bool,), False, True, False) + self.name = name + """An identifying name for this component.""" + self.title = title + """The audio annotator's title.""" + self.path = path + """The path to the audio file. Use mp3 or wav formats to achieve the best cross-browser support. See https://caniuse.com/?search=audio%20format for other formats.""" + self.tags = tags + """The master list of tags that can be used for annotations.""" + self.items = items + """Annotations to display on the image, if any.""" + self.trigger = trigger + """True if the form should be submitted as soon as an annotation is made.""" + + def dump(self) -> Dict: + """Returns the contents of this object as a dict.""" + _guard_scalar('AudioAnnotator.name', self.name, (str,), True, False, False) + _guard_scalar('AudioAnnotator.title', self.title, (str,), False, False, False) + _guard_scalar('AudioAnnotator.path', self.path, (str,), False, False, False) + _guard_vector('AudioAnnotator.tags', self.tags, (AudioAnnotatorTag,), False, False, False) + _guard_vector('AudioAnnotator.items', self.items, (AudioAnnotatorItem,), False, True, False) + _guard_scalar('AudioAnnotator.trigger', self.trigger, (bool,), False, True, False) + return _dump( + name=self.name, + title=self.title, + path=self.path, + tags=[__e.dump() for __e in self.tags], + items=None if self.items is None else [__e.dump() for __e in self.items], + trigger=self.trigger, + ) + + @staticmethod + def load(__d: Dict) -> 'AudioAnnotator': + """Creates an instance of this class using the contents of a dict.""" + __d_name: Any = __d.get('name') + _guard_scalar('AudioAnnotator.name', __d_name, (str,), True, False, False) + __d_title: Any = __d.get('title') + _guard_scalar('AudioAnnotator.title', __d_title, (str,), False, False, False) + __d_path: Any = __d.get('path') + _guard_scalar('AudioAnnotator.path', __d_path, (str,), False, False, False) + __d_tags: Any = __d.get('tags') + _guard_vector('AudioAnnotator.tags', __d_tags, (dict,), False, False, False) + __d_items: Any = __d.get('items') + _guard_vector('AudioAnnotator.items', __d_items, (dict,), False, True, False) + __d_trigger: Any = __d.get('trigger') + _guard_scalar('AudioAnnotator.trigger', __d_trigger, (bool,), False, True, False) + name: str = __d_name + title: str = __d_title + path: str = __d_path + tags: List[AudioAnnotatorTag] = [AudioAnnotatorTag.load(__e) for __e in __d_tags] + items: Optional[List[AudioAnnotatorItem]] = None if __d_items is None else [AudioAnnotatorItem.load(__e) for __e in __d_items] + trigger: Optional[bool] = __d_trigger + return AudioAnnotator( + name, + title, + path, + tags, + items, + trigger, + ) + + class Facepile: """A face pile displays a list of personas. Each circle represents a person and contains their image or initials. Often this control is used when sharing who has access to a specific view or file. @@ -7233,6 +7412,7 @@ def __init__( persona: Optional[Persona] = None, text_annotator: Optional[TextAnnotator] = None, image_annotator: Optional[ImageAnnotator] = None, + audio_annotator: Optional[AudioAnnotator] = None, facepile: Optional[Facepile] = None, copyable_text: Optional[CopyableText] = None, menu: Optional[Menu] = None, @@ -7284,6 +7464,7 @@ def __init__( _guard_scalar('Component.persona', persona, (Persona,), False, True, False) _guard_scalar('Component.text_annotator', text_annotator, (TextAnnotator,), False, True, False) _guard_scalar('Component.image_annotator', image_annotator, (ImageAnnotator,), False, True, False) + _guard_scalar('Component.audio_annotator', audio_annotator, (AudioAnnotator,), False, True, False) _guard_scalar('Component.facepile', facepile, (Facepile,), False, True, False) _guard_scalar('Component.copyable_text', copyable_text, (CopyableText,), False, True, False) _guard_scalar('Component.menu', menu, (Menu,), False, True, False) @@ -7379,6 +7560,8 @@ def __init__( """Text annotator.""" self.image_annotator = image_annotator """Image annotator.""" + self.audio_annotator = audio_annotator + """Audio annotator.""" self.facepile = facepile """Facepile.""" self.copyable_text = copyable_text @@ -7437,6 +7620,7 @@ def dump(self) -> Dict: _guard_scalar('Component.persona', self.persona, (Persona,), False, True, False) _guard_scalar('Component.text_annotator', self.text_annotator, (TextAnnotator,), False, True, False) _guard_scalar('Component.image_annotator', self.image_annotator, (ImageAnnotator,), False, True, False) + _guard_scalar('Component.audio_annotator', self.audio_annotator, (AudioAnnotator,), False, True, False) _guard_scalar('Component.facepile', self.facepile, (Facepile,), False, True, False) _guard_scalar('Component.copyable_text', self.copyable_text, (CopyableText,), False, True, False) _guard_scalar('Component.menu', self.menu, (Menu,), False, True, False) @@ -7488,6 +7672,7 @@ def dump(self) -> Dict: persona=None if self.persona is None else self.persona.dump(), text_annotator=None if self.text_annotator is None else self.text_annotator.dump(), image_annotator=None if self.image_annotator is None else self.image_annotator.dump(), + audio_annotator=None if self.audio_annotator is None else self.audio_annotator.dump(), facepile=None if self.facepile is None else self.facepile.dump(), copyable_text=None if self.copyable_text is None else self.copyable_text.dump(), menu=None if self.menu is None else self.menu.dump(), @@ -7588,6 +7773,8 @@ def load(__d: Dict) -> 'Component': _guard_scalar('Component.text_annotator', __d_text_annotator, (dict,), False, True, False) __d_image_annotator: Any = __d.get('image_annotator') _guard_scalar('Component.image_annotator', __d_image_annotator, (dict,), False, True, False) + __d_audio_annotator: Any = __d.get('audio_annotator') + _guard_scalar('Component.audio_annotator', __d_audio_annotator, (dict,), False, True, False) __d_facepile: Any = __d.get('facepile') _guard_scalar('Component.facepile', __d_facepile, (dict,), False, True, False) __d_copyable_text: Any = __d.get('copyable_text') @@ -7643,6 +7830,7 @@ def load(__d: Dict) -> 'Component': persona: Optional[Persona] = None if __d_persona is None else Persona.load(__d_persona) text_annotator: Optional[TextAnnotator] = None if __d_text_annotator is None else TextAnnotator.load(__d_text_annotator) image_annotator: Optional[ImageAnnotator] = None if __d_image_annotator is None else ImageAnnotator.load(__d_image_annotator) + audio_annotator: Optional[AudioAnnotator] = None if __d_audio_annotator is None else AudioAnnotator.load(__d_audio_annotator) facepile: Optional[Facepile] = None if __d_facepile is None else Facepile.load(__d_facepile) copyable_text: Optional[CopyableText] = None if __d_copyable_text is None else CopyableText.load(__d_copyable_text) menu: Optional[Menu] = None if __d_menu is None else Menu.load(__d_menu) @@ -7694,6 +7882,7 @@ def load(__d: Dict) -> 'Component': persona, text_annotator, image_annotator, + audio_annotator, facepile, copyable_text, menu, diff --git a/py/h2o_wave/h2o_wave/ui.py b/py/h2o_wave/h2o_wave/ui.py index 6c3098190c1..39e08ed4dca 100644 --- a/py/h2o_wave/h2o_wave/ui.py +++ b/py/h2o_wave/h2o_wave/ui.py @@ -2497,7 +2497,7 @@ def image_annotator( trigger: True if the form should be submitted as soon as an annotation is drawn. image_height: The card’s image height. The actual image size is used by default. allowed_shapes: List of allowed shapes. Available values are 'rect' and 'polygon'. If not set, all shapes are available by default. - events: The events to capture on this image annotator. One of `click` or `tool_change`. + events: The events to capture on this image annotator. One of `click` | `tool_change`. Returns: A `h2o_wave.types.ImageAnnotator` instance. """ @@ -2514,6 +2514,80 @@ def image_annotator( )) +def audio_annotator_tag( + name: str, + label: str, + color: str, +) -> AudioAnnotatorTag: + """Create a unique tag type for use in an audio annotator. + + Args: + name: An identifying name for this tag. + label: Text to be displayed for the annotation. + color: Hex or RGB color string to be used as the background color. + Returns: + A `h2o_wave.types.AudioAnnotatorTag` instance. + """ + return AudioAnnotatorTag( + name, + label, + color, + ) + + +def audio_annotator_item( + start: float, + end: float, + tag: str, +) -> AudioAnnotatorItem: + """Create an annotator item with initial selected tags or no tags. + + Args: + start: The start of the audio annotation in seconds. + end: The end of the audio annotation in seconds. + tag: The `name` of the audio annotator tag to refer to for the `label` and `color` of this item. + Returns: + A `h2o_wave.types.AudioAnnotatorItem` instance. + """ + return AudioAnnotatorItem( + start, + end, + tag, + ) + + +def audio_annotator( + name: str, + title: str, + path: str, + tags: List[AudioAnnotatorTag], + items: Optional[List[AudioAnnotatorItem]] = None, + trigger: Optional[bool] = None, +) -> Component: + """Create an audio annotator component. + + This component allows annotating and labeling parts of audio file. + + Args: + name: An identifying name for this component. + title: The audio annotator's title. + path: The path to the audio file. Use mp3 or wav formats to achieve the best cross-browser support. See https://caniuse.com/?search=audio%20format for other formats. + tags: The master list of tags that can be used for annotations. + items: Annotations to display on the image, if any. + trigger: True if the form should be submitted as soon as an annotation is made. + Returns: + A `h2o_wave.types.AudioAnnotator` instance. + """ + return Component(audio_annotator=AudioAnnotator( + name, + title, + path, + tags, + items, + trigger, + )) + + def facepile( items: List[Component], name: Optional[str] = None, diff --git a/r/R/ui.R b/r/R/ui.R index dbcc807b50a..71f1e856514 100644 --- a/r/R/ui.R +++ b/r/R/ui.R @@ -2894,7 +2894,7 @@ ui_image_annotator_item <- function( #' @param trigger True if the form should be submitted as soon as an annotation is drawn. #' @param image_height The card’s image height. The actual image size is used by default. #' @param allowed_shapes List of allowed shapes. Available values are 'rect' and 'polygon'. If not set, all shapes are available by default. -#' @param events The events to capture on this image annotator. One of `click` or `tool_change`. +#' @param events The events to capture on this image annotator. One of `click` | `tool_change`. #' @return A ImageAnnotator instance. #' @export ui_image_annotator <- function( @@ -2930,6 +2930,86 @@ ui_image_annotator <- function( return(.o) } +#' Create a unique tag type for use in an audio annotator. +#' +#' @param name An identifying name for this tag. +#' @param label Text to be displayed for the annotation. +#' @param color Hex or RGB color string to be used as the background color. +#' @return A AudioAnnotatorTag instance. +#' @export +ui_audio_annotator_tag <- function( + name, + label, + color) { + .guard_scalar("name", "character", name) + .guard_scalar("label", "character", label) + .guard_scalar("color", "character", color) + .o <- list( + name=name, + label=label, + color=color) + class(.o) <- append(class(.o), c(.wave_obj, "WaveAudioAnnotatorTag")) + return(.o) +} + +#' Create an annotator item with initial selected tags or no tags. +#' +#' @param start The start of the audio annotation in seconds. +#' @param end The end of the audio annotation in seconds. +#' @param tag The `name` of the audio annotator tag to refer to for the `label` and `color` of this item. +#' @return A AudioAnnotatorItem instance. +#' @export +ui_audio_annotator_item <- function( + start, + end, + tag) { + .guard_scalar("start", "numeric", start) + .guard_scalar("end", "numeric", end) + .guard_scalar("tag", "character", tag) + .o <- list( + start=start, + end=end, + tag=tag) + class(.o) <- append(class(.o), c(.wave_obj, "WaveAudioAnnotatorItem")) + return(.o) +} + +#' Create an audio annotator component. +#' +#' This component allows annotating and labeling parts of audio file. +#' +#' @param name An identifying name for this component. +#' @param title The audio annotator's title. +#' @param path The path to the audio file. Use mp3 or wav formats to achieve the best cross-browser support. See https://caniuse.com/?search=audio%20format for other formats. +#' @param tags The master list of tags that can be used for annotations. +#' @param items Annotations to display on the image, if any. +#' @param trigger True if the form should be submitted as soon as an annotation is made. +#' @return A AudioAnnotator instance. +#' @export +ui_audio_annotator <- function( + name, + title, + path, + tags, + items = NULL, + trigger = NULL) { + .guard_scalar("name", "character", name) + .guard_scalar("title", "character", title) + .guard_scalar("path", "character", path) + .guard_vector("tags", "WaveAudioAnnotatorTag", tags) + .guard_vector("items", "WaveAudioAnnotatorItem", items) + .guard_scalar("trigger", "logical", trigger) + .o <- list(audio_annotator=list( + name=name, + title=title, + path=path, + tags=tags, + items=items, + trigger=trigger)) + class(.o) <- append(class(.o), c(.wave_obj, "WaveComponent")) + return(.o) +} + #' A face pile displays a list of personas. Each circle represents a person and contains their image or initials. #' Often this control is used when sharing who has access to a specific view or file. #' diff --git a/tools/intellij-plugin/src/main/resources/templates/wave-components.xml b/tools/intellij-plugin/src/main/resources/templates/wave-components.xml index 08176d9bfc4..34da03ce83e 100644 --- a/tools/intellij-plugin/src/main/resources/templates/wave-components.xml +++ b/tools/intellij-plugin/src/main/resources/templates/wave-components.xml @@ -6,6 +6,31 @@ + + + + + + + + + + + + + + + + + + + + + + + + + @@ -984,6 +1009,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -1302,7 +1354,7 @@ - + @@ -1348,6 +1400,7 @@ + diff --git a/tools/showcase/showcase.py b/tools/showcase/showcase.py index 8e163ca2451..c44c293217e 100644 --- a/tools/showcase/showcase.py +++ b/tools/showcase/showcase.py @@ -73,7 +73,8 @@ def generate_diff_view(): f.write(content) -def make_snippet_screenshot(code: List[str], img_name: str, page, groups: List[str], pool_idx: int, is_test: bool, browser: str): +def make_screenshot(code: List[str], img_name: str, page, + groups: List[str], pool_idx: int, is_test: bool, browser: str, sleep=1): code_str = ''.join(code) match = re.findall('(q.page\\[)(\'|\")([\\w-]+)', code_str) if not match: @@ -88,7 +89,7 @@ def make_snippet_screenshot(code: List[str], img_name: str, page, groups: List[s page.goto(f'http://localhost:10101/{pool_idx}', wait_until='networkidle') # Wait for all the resources to be loaded. - time.sleep(1) + time.sleep(sleep) groups = os.path.join(*groups) path = os.path.join(docs_path, 'docs', 'widgets', groups, 'assets', img_name) @@ -126,17 +127,21 @@ def generate_screenshots(files: List[DocFile], pool_idx: int, is_test: bool): for file in files: with open(os.path.join(docs_path, file.path), 'r') as f: is_code = False + sleep = 1 code = [] file_idx = 0 for line in f.readlines(): + if line.startswith('```py') and 'sleep' in line: + sleep = int(line.split('sleep')[1].replace(' ', '').replace('\n', '')) if line.startswith('```py') and 'ignore' not in line: is_code = True elif line.replace(' ', '').replace('\n', '') == '```' and is_code: screenshot_name = f"{file.name}-{file_idx}.png" - make_snippet_screenshot(code, screenshot_name, page, file.groups, pool_idx, is_test, b) + make_screenshot(code, screenshot_name, page, file.groups, pool_idx, is_test, b, sleep) file_idx = file_idx + 1 code = [] is_code = False + sleep = 1 elif is_code: code.append(line) browser.close() diff --git a/tools/vscode-extension/component-snippets.json b/tools/vscode-extension/component-snippets.json index ca8f71532cb..077cd5462d1 100644 --- a/tools/vscode-extension/component-snippets.json +++ b/tools/vscode-extension/component-snippets.json @@ -6,6 +6,27 @@ ], "description": "Create a minimal Wave ArticleCard." }, + "Wave AudioAnnotatorTag": { + "prefix": "w_audio_annotator_tag", + "body": [ + "ui.audio_annotator_tag(name='$1', label='$2', color='$3'),$0" + ], + "description": "Create a minimal Wave AudioAnnotatorTag." + }, + "Wave AudioAnnotatorItem": { + "prefix": "w_audio_annotator_item", + "body": [ + "ui.audio_annotator_item(start=$1, end=$2, tag='$3'),$0" + ], + "description": "Create a minimal Wave AudioAnnotatorItem." + }, + "Wave AudioAnnotator": { + "prefix": "w_audio_annotator", + "body": [ + "ui.audio_annotator(name='$1', title='$2', path='$3', tags=[\n\t\t$4\t\t\n]),$0" + ], + "description": "Create a minimal Wave AudioAnnotator." + }, "Wave Breadcrumb": { "prefix": "w_breadcrumb", "body": [ @@ -951,6 +972,27 @@ ], "description": "Create a full Wave ArticleCard." }, + "Wave Full AudioAnnotatorTag": { + "prefix": "w_full_audio_annotator_tag", + "body": [ + "ui.audio_annotator_tag(name='$1', label='$2', color='$3'),$0" + ], + "description": "Create a full Wave AudioAnnotatorTag." + }, + "Wave Full AudioAnnotatorItem": { + "prefix": "w_full_audio_annotator_item", + "body": [ + "ui.audio_annotator_item(start=$1, end=$2, tag='$3'),$0" + ], + "description": "Create a full Wave AudioAnnotatorItem." + }, + "Wave Full AudioAnnotator": { + "prefix": "w_full_audio_annotator", + "body": [ + "ui.audio_annotator(name='$1', title='$2', path='$3', trigger=${4:False}, tags=[\n\t\t$5\t\t\n], items=[\n\t\t$6\t\t\n]),$0" + ], + "description": "Create a full Wave AudioAnnotator." + }, "Wave Full Breadcrumb": { "prefix": "w_full_breadcrumb", "body": [ @@ -1143,7 +1185,7 @@ "Wave Full Component": { "prefix": "w_full_component", "body": [ - "ui.component(text=${1:None}, text_xl=${2:None}, text_l=${3:None}, text_m=${4:None}, text_s=${5:None}, text_xs=${6:None}, label=${7:None}, separator=${8:None}, progress=${9:None}, message_bar=${10:None}, textbox=${11:None}, checkbox=${12:None}, toggle=${13:None}, choice_group=${14:None}, checklist=${15:None}, dropdown=${16:None}, combobox=${17:None}, slider=${18:None}, spinbox=${19:None}, date_picker=${20:None}, color_picker=${21:None}, button=${22:None}, buttons=${23:None}, mini_button=${24:None}, mini_buttons=${25:None}, file_upload=${26:None}, table=${27:None}, link=${28:None}, links=${29:None}, tabs=${30:None}, expander=${31:None}, frame=${32:None}, markup=${33:None}, template=${34:None}, picker=${35:None}, range_slider=${36:None}, stepper=${37:None}, visualization=${38:None}, vega_visualization=${39:None}, stats=${40:None}, inline=${41:None}, image=${42:None}, persona=${43:None}, text_annotator=${44:None}, image_annotator=${45:None}, facepile=${46:None}, copyable_text=${47:None}, menu=${48:None}, tags=${49:None}, time_picker=${50:None}),$0" + "ui.component(text=${1:None}, text_xl=${2:None}, text_l=${3:None}, text_m=${4:None}, text_s=${5:None}, text_xs=${6:None}, label=${7:None}, separator=${8:None}, progress=${9:None}, message_bar=${10:None}, textbox=${11:None}, checkbox=${12:None}, toggle=${13:None}, choice_group=${14:None}, checklist=${15:None}, dropdown=${16:None}, combobox=${17:None}, slider=${18:None}, spinbox=${19:None}, date_picker=${20:None}, color_picker=${21:None}, button=${22:None}, buttons=${23:None}, mini_button=${24:None}, mini_buttons=${25:None}, file_upload=${26:None}, table=${27:None}, link=${28:None}, links=${29:None}, tabs=${30:None}, expander=${31:None}, frame=${32:None}, markup=${33:None}, template=${34:None}, picker=${35:None}, range_slider=${36:None}, stepper=${37:None}, visualization=${38:None}, vega_visualization=${39:None}, stats=${40:None}, inline=${41:None}, image=${42:None}, persona=${43:None}, text_annotator=${44:None}, image_annotator=${45:None}, audio_annotator=${46:None}, facepile=${47:None}, copyable_text=${48:None}, menu=${49:None}, tags=${50:None}, time_picker=${51:None}),$0" ], "description": "Create a full Wave Component." }, diff --git a/ui/src/audio_annotator.test.tsx b/ui/src/audio_annotator.test.tsx new file mode 100644 index 00000000000..8294e15f026 --- /dev/null +++ b/ui/src/audio_annotator.test.tsx @@ -0,0 +1,865 @@ +// Copyright 2020 H2O.ai, Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import { act, fireEvent, render } from '@testing-library/react' +import React from 'react' +import { AudioAnnotator, XAudioAnnotator } from './audio_annotator' +import { DrawnAnnotation, recalculateAnnotations } from './parts/range_annotator' +import { wave } from './ui' + +const + name = 'audio_annotator', + items = [ + { start: 0, end: 20, tag: 'tag1' }, + { start: 160, end: 190, tag: 'tag2' }, + ], + model: AudioAnnotator = { + name, + title: name, + path: '', + tags: [ + { name: 'tag1', label: 'Tag 1', color: 'red' }, + { name: 'tag2', label: 'Tag 2', color: 'blue' }, + ], + items + }, + { start, end } = items[1], + mid = (start + end) / 2, + waitForComponentLoad = async () => act(() => new Promise(res => setTimeout(() => res(), 20))), + WIDTH_AND_DURATION = 1000, + WAVE_FORM_HEIGHT = 100 + +class MockAudioContext { + createGain = () => ({ gain: {} }) + createMediaElementSource = () => this + connect = () => this + decodeAudioData = (_data: any, res: any) => res({ duration: WIDTH_AND_DURATION, getChannelData: () => new Array(300).fill(1) }) +} + +describe('AudioAnnotator.tsx', () => { + beforeAll(() => { + // @ts-ignore + window.AudioContext = MockAudioContext + // @ts-ignore + window.fetch = () => ({ arrayBuffer: () => '' }) + // @ts-ignore + window.URL = { createObjectURL: () => '' } + // @ts-ignore + window.HTMLCanvasElement.prototype.getBoundingClientRect = () => ({ width: WIDTH_AND_DURATION, left: 0, top: 0 }) + }) + + it('Renders data-test attr', async () => { + const { queryByTestId } = render() + await waitForComponentLoad() + expect(queryByTestId(name)).toBeInTheDocument() + }) + + it('Sets annotation args - empty ', async () => { + render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject([]) + }) + + it('Sets annotation args ', async () => { + render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + }) + + it('Displays correct cursor when hovering over canvas - no intersection', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseMove(canvasEl, { clientX: 125, clientY: 25 }) + expect(canvasEl.style.cursor).toBe('pointer') + }) + + it('Removes all shapes after clicking reset', async () => { + const { getByText } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + fireEvent.click(getByText('Remove all')) + expect(wave.args[name]).toMatchObject([]) + }) + + describe('Annotations', () => { + it('Draws a new annotation', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 130, clientY: 10, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 140, clientY: 20, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 140, clientY: 20, buttons: 1 }) + + expect(wave.args[name]).toHaveLength(3) + expect(wave.args[name]).toMatchObject([items[0], { tag: 'tag1', start: 130, end: 140 }, items[1]]) + }) + + it('Draws a new annotation by moving forward and then a bit backward', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 130, clientY: 10, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 155, clientY: 20, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 150, clientY: 20, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 140, clientY: 20, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 140, clientY: 20, buttons: 1 }) + + expect(wave.args[name]).toHaveLength(3) + expect(wave.args[name]).toMatchObject([items[0], { tag: 'tag1', start: 130, end: 140 }, items[1]]) + }) + + it('Draws a new annotation by moving forward, backward, forward', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 130, clientY: 10, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 150, clientY: 20, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 120, clientY: 20, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 140, clientY: 20, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 140, clientY: 20, buttons: 1 }) + + expect(wave.args[name]).toHaveLength(3) + expect(wave.args[name]).toMatchObject([items[0], { tag: 'tag1', start: 130, end: 140 }, items[1]]) + }) + + it('Does not draw a new annotation if too small', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 130, clientY: 10, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 131, clientY: 20, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 131, clientY: 20, buttons: 1 }) + + expect(wave.args[name]).toHaveLength(2) + expect(wave.args[name]).toMatchObject(items) + }) + + it('Does not draw a new annotation if dragging over zoom - should move zoom instead', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 30, clientY: 10, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 31, clientY: 20, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 31, clientY: 20, buttons: 1 }) + + expect(wave.args[name]).toHaveLength(2) + expect(wave.args[name]).toMatchObject(items) + }) + + it('Does not draw a new annotation if left mouse click not pressed', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 130, clientY: 10 }) + fireEvent.mouseMove(canvasEl, { clientX: 131, clientY: 20 }) + fireEvent.click(canvasEl, { clientX: 131, clientY: 20 }) + + expect(wave.args[name]).toHaveLength(2) + expect(wave.args[name]).toMatchObject(items) + }) + + it('Draws a new annotation if moved too fast', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 140, clientY: 20, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 130, clientY: 10, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 130, clientY: 10, buttons: 1 }) + + expect(wave.args[name]).toHaveLength(3) + expect(wave.args[name]).toMatchObject([items[0], { tag: 'tag1', start: 130, end: 140 }, items[1]]) + }) + + it('Removes annotation after clicking remove btn', async () => { + const { container, getByText } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + expect(wave.args[name]).toMatchObject(items) + + const removeBtn = getByText('Remove selected').parentElement!.parentElement!.parentElement! + expect(removeBtn).toHaveAttribute('aria-disabled', 'true') + fireEvent.click(canvasEl, { clientX: mid, clientY: 3 }) + await waitForComponentLoad() + expect(removeBtn).not.toHaveAttribute('aria-disabled') + fireEvent.click(removeBtn) + + expect(wave.args[name]).toHaveLength(1) + expect(wave.args[name]).toMatchObject([items[0]]) + }) + + it('Changes tag of focused annotation', async () => { + const { container, getByText } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.click(canvasEl, { clientX: 170, clientY: 3 }) + fireEvent.click(getByText('Tag 1')) + + expect(wave.args[name]).toMatchObject([items[0], { ...items[1], tag: 'tag1' }]) + }) + + it('Displays the correct cursor when hovering over annotation', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseMove(canvasEl, { clientX: 170, clientY: 3 }) + expect(canvasEl.style.cursor).toBe('pointer') + }) + + it('Displays move cursor when hovering over focused annotation', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.click(canvasEl, { clientX: 170, clientY: 3, buttons: 1 }) + expect(canvasEl.style.cursor).toBe('move') + fireEvent.mouseMove(canvasEl, { clientX: 175, clientY: 3, buttons: 1 }) + expect(canvasEl.style.cursor).toBe('move') + }) + + it('Displays resize cursor when hovering over focused annotation boundaries', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.click(canvasEl, { clientX: mid, clientY: 0, buttons: 1 }) + expect(canvasEl.style.cursor).toBe('move') + fireEvent.mouseMove(canvasEl, { clientX: start, clientY: 0, buttons: 1 }) + expect(canvasEl.style.cursor).toBe('ew-resize') + fireEvent.mouseMove(canvasEl, { clientX: mid, clientY: 0, buttons: 1 }) + expect(canvasEl.style.cursor).toBe('move') + fireEvent.mouseMove(canvasEl, { clientX: end, clientY: 0, buttons: 1 }) + expect(canvasEl.style.cursor).toBe('ew-resize') + }) + + it('Displays resize cursor when resizing focused annotation', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.click(canvasEl, { clientX: start, clientY: 3, buttons: 1 }) + expect(canvasEl.style.cursor).toBe('ew-resize') + fireEvent.mouseDown(canvasEl, { clientX: start, clientY: 3, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: start + 10, clientY: 3, buttons: 1 }) + expect(canvasEl.style.cursor).toBe('ew-resize') + }) + + it('Displays move cursor when dragging the focused annotation', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseMove(canvasEl, { clientX: mid, clientY: 3 }) + expect(canvasEl.style.cursor).toBe('pointer') + fireEvent.click(canvasEl, { clientX: mid, clientY: 10 }) + expect(canvasEl.style.cursor).toBe('move') + fireEvent.mouseDown(canvasEl, { clientX: mid, clientY: 3, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: mid + 10, clientY: 4, buttons: 1 }) + expect(canvasEl.style.cursor).toBe('move') + fireEvent.click(canvasEl, { clientX: mid + 10, clientY: 5 }) + expect(canvasEl.style.cursor).toBe('move') + }) + + it('Displays resize cursor when dragging the focused annotation', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseMove(canvasEl, { clientX: end, clientY: 3 }) + expect(canvasEl.style.cursor).toBe('pointer') + fireEvent.click(canvasEl, { clientX: end, clientY: 3 }) + expect(canvasEl.style.cursor).toBe('ew-resize') + fireEvent.mouseDown(canvasEl, { clientX: end, clientY: 3, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: end + 10, clientY: 4, buttons: 1 }) + expect(canvasEl.style.cursor).toBe('ew-resize') + fireEvent.click(canvasEl, { clientX: end + 20, clientY: 5 }) + expect(canvasEl.style.cursor).toBe('pointer') + }) + + it('Moves annotation', async () => { + const { container } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + const canvasEl = container.querySelector('canvas')! + const moveOffset = 5 + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: mid, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: mid + moveOffset, clientY: 60, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: mid + moveOffset, clientY: 60 }) + + expect(wave.args[name]).toMatchObject([items[0], { ...items[1], start: start + moveOffset, end: end + moveOffset }]) + }) + + it('Does not move annotation if left mouse btn not pressed (dragging)', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + const moveOffset = 5 + fireEvent.click(canvasEl, { clientX: 10, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: 10, clientY: 50 }) + fireEvent.mouseMove(canvasEl, { clientX: 10 + moveOffset, clientY: 60 }) + fireEvent.click(canvasEl, { clientX: 10 + moveOffset, clientY: 60 }) + + expect(wave.args[name]).toMatchObject(items) + }) + + it('Does not move annotation beyond left border (0)', async () => { + const { container } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + const canvasEl = container.querySelector('canvas')! + const { start, end } = items[0] + const mid = (start + end) / 2 + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: mid, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: start, clientY: 60, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: start, clientY: 60 }) + + expect(wave.args[name]).toMatchObject(items) + }) + + it('Does not move annotation beyond right border (canvas width)', async () => { + const { container } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + const canvasEl = container.querySelector('canvas')! + const moveOffset = end - mid + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: mid, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: WIDTH_AND_DURATION - moveOffset, clientY: 60, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: WIDTH_AND_DURATION - moveOffset, clientY: 60 }) + + expect(wave.args[name]).toMatchObject([items[0], { ...items[1], start: WIDTH_AND_DURATION - (end - start), end: WIDTH_AND_DURATION }]) + }) + + it('Resizes annotation from', async () => { + const { container } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + const canvasEl = container.querySelector('canvas')! + const moveOffset = 5 + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: start, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: start - moveOffset, clientY: 60, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: start - moveOffset, clientY: 60 }) + + expect(wave.args[name]).toMatchObject([items[0], { ...items[1], start: start - moveOffset }]) + }) + + it('Resizes annotation from and exceeds the "to"', async () => { + const { container } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + const canvasEl = container.querySelector('canvas')! + const { start, end } = items[1] + const moveOffset = end - start + 5 + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: start, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: start + moveOffset, clientY: 60, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: start + moveOffset, clientY: 60 }) + + expect(wave.args[name]).toMatchObject([items[0], { ...items[1], start: end, end: end + 5 }]) + }) + + it('Resizes annotation to', async () => { + const { container } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + const canvasEl = container.querySelector('canvas')! + const moveOffset = 5 + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: end, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: end + moveOffset, clientY: 60, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: end + moveOffset, clientY: 60 }) + + expect(wave.args[name]).toMatchObject([items[0], { ...items[1], end: end + moveOffset }]) + }) + + it('Resizes annotation to and exceeds the "from"', async () => { + const { container } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + const canvasEl = container.querySelector('canvas')! + const moveOffset = end - start + 5 + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: end, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: end - moveOffset, clientY: 60, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: end - moveOffset, clientY: 60 }) + + expect(wave.args[name]).toMatchObject([items[0], { ...items[1], start: end - moveOffset, end: start }]) + }) + + }) + + describe('Tooltip', () => { + it('Shows tooltip when hovering over annotation', async () => { + const { container, getByTestId, getByText } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseMove(canvasEl, { clientX: mid, clientY: 3 }) + expect(getByTestId('wave-range-annotator-tooltip')).toBeVisible() + expect(getByText('02:40.00 - 03:10.00')).toBeVisible() + }) + + it('Does not show tooltip when not hovering over annotation', async () => { + const { container, getByTestId } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseMove(canvasEl, { clientX: 30, clientY: 3 }) + expect(getByTestId('wave-range-annotator-tooltip')).not.toBeVisible() + }) + + it('Shows tooltip while drawing a new annotation', async () => { + const { container, getByTestId, queryByText } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 130, clientY: 10, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 140, clientY: 20, buttons: 1 }) + expect(getByTestId('wave-range-annotator-tooltip')).toBeVisible() + expect(queryByText('02:10.00 - 02:20.00')).toBeVisible() + }) + + it('Shows tooltip while moving an annotation', async () => { + const { container, getByTestId, queryByText } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + const moveOffset = 5 + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: mid, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: mid + moveOffset, clientY: 60, buttons: 1 }) + expect(getByTestId('wave-range-annotator-tooltip')).toBeVisible() + expect(queryByText('02:45.00 - 03:15.00')).toBeVisible() + }) + + it('Shows tooltip while resizing an annotation "from"', async () => { + const { container, getByTestId, queryByText } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + const moveOffset = 5 + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: start, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: start - moveOffset, clientY: 60, buttons: 1 }) + expect(getByTestId('wave-range-annotator-tooltip')).toBeVisible() + expect(queryByText('02:35.00 - 03:10.00')).toBeVisible() + }) + + it('Shows correct tooltip while resizing an annotation "from" and exceeds "to"', async () => { + const { container, getByTestId, queryByText } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + const moveOffset = 5 + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: start, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: start + (end - start) + moveOffset, clientY: 60, buttons: 1 }) + expect(getByTestId('wave-range-annotator-tooltip')).toBeVisible() + expect(queryByText('03:10.00 - 03:15.00')).toBeVisible() + }) + + it('Shows tooltip while resizing an annotation "to"', async () => { + const { container, getByTestId, queryByText } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + const moveOffset = 5 + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: end, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: end + moveOffset, clientY: 60, buttons: 1 }) + expect(getByTestId('wave-range-annotator-tooltip')).toBeVisible() + expect(queryByText('02:40.00 - 03:15.00')).toBeVisible() + }) + + it('Shows correct tooltip while resizing an annotation "to" and exceeds "from"', async () => { + const { container, getByTestId, queryByText } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: end, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: start - 5, clientY: 60, buttons: 1 }) + expect(getByTestId('wave-range-annotator-tooltip')).toBeVisible() + expect(queryByText('02:35.00 - 02:40.00')).toBeVisible() + }) + }) + describe('Wave trigger', () => { + const pushMock = jest.fn() + + beforeAll(() => wave.push = pushMock) + beforeEach(() => pushMock.mockReset()) + + it('Calls push after drawing the annotation', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 30, clientY: 10, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 40, clientY: 20, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 40, clientY: 20, buttons: 1 }) + + expect(pushMock).toHaveBeenCalledTimes(1) + }) + + it('Does not call push after drawing the annottaion', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 30, clientY: 10, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 40, clientY: 20, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 40, clientY: 20, buttons: 1 }) + + expect(pushMock).toHaveBeenCalledTimes(0) + }) + + it('Calls push after moving', async () => { + const { container } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + const canvasEl = container.querySelector('canvas')! + const moveOffset = 5 + fireEvent.click(canvasEl, { clientX: 10, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: 10, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 10 + moveOffset, clientY: 60, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 10 + moveOffset, clientY: 60 }) + + expect(pushMock).toHaveBeenCalledTimes(1) + }) + + it('Calls push after resizing annotation from', async () => { + const { container } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + const canvasEl = container.querySelector('canvas')! + const moveOffset = 5 + const { start } = items[1] + fireEvent.click(canvasEl, { clientX: mid, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: start, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: start - moveOffset, clientY: 60, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: start - moveOffset, clientY: 60 }) + + expect(pushMock).toHaveBeenCalledTimes(1) + }) + + it('Calls push after resizing annotation to', async () => { + const { container } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + const canvasEl = container.querySelector('canvas')! + const moveOffset = 5 + const { end } = items[0] + fireEvent.click(canvasEl, { clientX: 10, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: end, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: end + moveOffset, clientY: 60, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: end + moveOffset, clientY: 60 }) + + expect(pushMock).toHaveBeenCalledTimes(1) + }) + + it('Calls push after removing all annotations', async () => { + const { getByText } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + fireEvent.click(getByText('Remove all')) + expect(pushMock).toHaveBeenCalledTimes(1) + }) + + it('Calls push after removing annotation', async () => { + const { container, getByText } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + expect(wave.args[name]).toMatchObject(items) + + fireEvent.click(canvasEl, { clientX: 3, clientY: 3 }) + await waitForComponentLoad() + fireEvent.click(getByText('Remove selected')) + + expect(pushMock).toHaveBeenCalledTimes(1) + }) + }) + describe('Annotations - merge / union', () => { + + it('Merges annotation if intersecting and exceeding from start', async () => { + const { container, getByText } = render() + await waitForComponentLoad() + fireEvent.click(getByText('Tag 2')) + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: start - 20, clientY: 0, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: start + 10, clientY: 0, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: start + 10, clientY: 0, buttons: 1 }) + + expect(wave.args[name]).toHaveLength(2) + expect(wave.args[name]).toMatchObject([items[0], { ...items[1], start: start - 20 }]) + }) + + it('Merges annotation if intersecting and exceeding from end', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 10, clientY: 0, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 30, clientY: 0, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 30, clientY: 0, buttons: 1 }) + + expect(wave.args[name]).toHaveLength(2) + expect(wave.args[name]).toMatchObject([{ ...items[0], end: 30 }, items[1]]) + }) + + it('Merges annotations when intersecting with both ends', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 10, clientY: 0, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 15, clientY: 0, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 15, clientY: 0, buttons: 1 }) + + expect(wave.args[name]).toHaveLength(2) + expect(wave.args[name]).toMatchObject(items) + }) + + it('Does not merge if intersecting with different tag', async () => { + const { container } = render() + await waitForComponentLoad() + const canvasEl = container.querySelector('canvas')! + fireEvent.mouseDown(canvasEl, { clientX: 50, clientY: 0, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 70, clientY: 0, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 70, clientY: 0, buttons: 1 }) + + expect(wave.args[name]).toHaveLength(3) + expect(wave.args[name]).toMatchObject([items[0], { start: 50, end: 70, tag: 'tag1' }, items[1]]) + }) + }) + + describe('Annotations recalculations', () => { + const base = { start: 0, end: 0 } + + it('Does not recalculate if no annotations', () => { + const annotations: DrawnAnnotation[] = [] + expect(recalculateAnnotations(annotations)).toMatchObject(annotations) + }) + + it('Handles single annotation', () => { + const annotations: DrawnAnnotation[] = [{ id: '1', ...base, canvasStart: 10, canvasEnd: 20, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }] + expect(recalculateAnnotations(annotations)).toMatchObject(annotations) + }) + + it('Handles 2 separate annotations', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 20, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 30, canvasEnd: 40, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject(annotations) + }) + + it('Merges 2 intersecting same tag annotations', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 20, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 15, canvasEnd: 40, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 40, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + ]) + }) + + it('Handles 2 intersecting annotations - start', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 40, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 30, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 40, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 30, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: WAVE_FORM_HEIGHT / 2, tag: 'tag2' }, + ]) + }) + + it('Handles 2 intersecting annotations - middle ', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 40, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 40, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: WAVE_FORM_HEIGHT / 2, tag: 'tag2' }, + ]) + }) + + it('Handles 2 intersecting annotations - end', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 30, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 30, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: WAVE_FORM_HEIGHT / 2, tag: 'tag2' }, + ]) + }) + + it('Handles 3 zig-zag intersecting annotations - tag1, tag2, tag1', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 25, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 45, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 40, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 25, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 45, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: WAVE_FORM_HEIGHT / 2, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 40, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: 0, tag: 'tag1' }, + ]) + }) + + it('Handles 3 intersecting annotations - tag1, tag2, tag3', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 30, canvasEnd: 70, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag3' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT / 3, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT / 3, canvasY: WAVE_FORM_HEIGHT / 3, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 30, canvasEnd: 70, canvasHeight: WAVE_FORM_HEIGHT - (2 * WAVE_FORM_HEIGHT / 3), canvasY: 2 * WAVE_FORM_HEIGHT / 3, tag: 'tag3' }, + ]) + }) + + it('Handles 3 intersecting annotations - tag1, tag2, tag2', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 30, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 40, canvasEnd: 70, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 30, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: WAVE_FORM_HEIGHT / 2, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 40, canvasEnd: 70, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: WAVE_FORM_HEIGHT / 2, tag: 'tag2' }, + ]) + }) + + it('Handles 3 intersecting annotations - tag2, tag1, tag1', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 40, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '3', ...base, canvasStart: 30, canvasEnd: 70, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 40, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: 0, tag: 'tag2' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 70, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: WAVE_FORM_HEIGHT / 2, tag: 'tag1' }, + ]) + }) + + it('Handles 3 intersecting annotations and 1 that should fill the remaining space', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 140, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 30, canvasEnd: 70, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag3' }, + { id: '4', ...base, canvasStart: 90, canvasEnd: 170, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 140, canvasHeight: WAVE_FORM_HEIGHT / 3, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT / 3, canvasY: WAVE_FORM_HEIGHT / 3, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 30, canvasEnd: 70, canvasHeight: WAVE_FORM_HEIGHT - (2 * WAVE_FORM_HEIGHT / 3), canvasY: WAVE_FORM_HEIGHT / 3 * 2, tag: 'tag3' }, + { id: '4', ...base, canvasStart: 90, canvasEnd: 170, canvasHeight: WAVE_FORM_HEIGHT - (WAVE_FORM_HEIGHT / 3), canvasY: WAVE_FORM_HEIGHT / 3, tag: 'tag2' }, + ]) + }) + + it('Handles 3 intersecting annotations and 1 that should fill the remaining space - 2', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 140, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 60, canvasEnd: 80, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag3' }, + { id: '4', ...base, canvasStart: 70, canvasEnd: 170, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 140, canvasHeight: WAVE_FORM_HEIGHT / 3, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 50, canvasHeight: WAVE_FORM_HEIGHT - (WAVE_FORM_HEIGHT / 3), canvasY: WAVE_FORM_HEIGHT / 3, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 60, canvasEnd: 80, canvasHeight: WAVE_FORM_HEIGHT / 3, canvasY: WAVE_FORM_HEIGHT / 3, tag: 'tag3' }, + { id: '4', ...base, canvasStart: 70, canvasEnd: 170, canvasHeight: WAVE_FORM_HEIGHT - (2 * WAVE_FORM_HEIGHT / 3), canvasY: 2 * WAVE_FORM_HEIGHT / 3, tag: 'tag2' }, + ]) + }) + + it('Handles 3 intersecting annotations and 1 that should fill the remaining space - 3', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 100, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 50, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag3' }, + { id: '4', ...base, canvasStart: 70, canvasEnd: 90, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag3' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT / 3, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 20, canvasEnd: 100, canvasHeight: WAVE_FORM_HEIGHT / 3, canvasY: WAVE_FORM_HEIGHT / 3, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 50, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT - (2 * WAVE_FORM_HEIGHT / 3), canvasY: WAVE_FORM_HEIGHT / 3 * 2, tag: 'tag3' }, + { id: '4', ...base, canvasStart: 70, canvasEnd: 90, canvasHeight: WAVE_FORM_HEIGHT / 3, canvasY: 0, tag: 'tag3' }, + ]) + }) + + it('Handles multiple intersecting annotations with same start', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 10, canvasEnd: 100, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 10, canvasEnd: 160, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag3' }, + { id: '4', ...base, canvasStart: 10, canvasEnd: 160, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag4' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT / 4, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 10, canvasEnd: 100, canvasHeight: WAVE_FORM_HEIGHT / 4, canvasY: WAVE_FORM_HEIGHT / 4, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 10, canvasEnd: 160, canvasHeight: WAVE_FORM_HEIGHT / 4, canvasY: 2 * WAVE_FORM_HEIGHT / 4, tag: 'tag3' }, + { id: '4', ...base, canvasStart: 10, canvasEnd: 160, canvasHeight: WAVE_FORM_HEIGHT / 4, canvasY: 3 * WAVE_FORM_HEIGHT / 4, tag: 'tag4' }, + ]) + }) + + it('Handles multiple intersecting annotations with same start and a couple of other intersecting', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 10, canvasEnd: 100, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 10, canvasEnd: 160, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag3' }, + { id: '4', ...base, canvasStart: 10, canvasEnd: 160, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag4' }, + { id: '5', ...base, canvasStart: 210, canvasEnd: 240, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '6', ...base, canvasStart: 230, canvasEnd: 250, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT / 4, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 10, canvasEnd: 100, canvasHeight: WAVE_FORM_HEIGHT / 4, canvasY: WAVE_FORM_HEIGHT / 4, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 10, canvasEnd: 160, canvasHeight: WAVE_FORM_HEIGHT / 4, canvasY: 2 * WAVE_FORM_HEIGHT / 4, tag: 'tag3' }, + { id: '4', ...base, canvasStart: 10, canvasEnd: 160, canvasHeight: WAVE_FORM_HEIGHT / 4, canvasY: 3 * WAVE_FORM_HEIGHT / 4, tag: 'tag4' }, + { id: '5', ...base, canvasStart: 210, canvasEnd: 240, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: 0, tag: 'tag1' }, + { id: '6', ...base, canvasStart: 230, canvasEnd: 250, canvasHeight: WAVE_FORM_HEIGHT / 2, canvasY: WAVE_FORM_HEIGHT / 2, tag: 'tag2' }, + ]) + }) + + it('Handles 4 intersecting annotations with same start zig zag', () => { + const annotations: DrawnAnnotation[] = [ + { id: '1', ...base, canvasStart: 10, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 10, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 20, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag3' }, + { id: '4', ...base, canvasStart: 20, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT, canvasY: 0, tag: 'tag4' }, + ] + expect(recalculateAnnotations(annotations)).toMatchObject([ + { id: '1', ...base, canvasStart: 10, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT / 4, canvasY: 0, tag: 'tag1' }, + { id: '2', ...base, canvasStart: 10, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT / 4, canvasY: WAVE_FORM_HEIGHT / 4, tag: 'tag2' }, + { id: '3', ...base, canvasStart: 20, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT / 4, canvasY: 2 * WAVE_FORM_HEIGHT / 4, tag: 'tag3' }, + { id: '4', ...base, canvasStart: 20, canvasEnd: 60, canvasHeight: WAVE_FORM_HEIGHT / 4, canvasY: 3 * WAVE_FORM_HEIGHT / 4, tag: 'tag4' }, + ]) + }) + + it('Handles intersection tag1 tag2 tag2 after moving from right to left', async () => { + const { container, getByText } = render() + await waitForComponentLoad() + expect(wave.args[name]).toMatchObject(items) + fireEvent.click(getByText('Tag 2')) + + const canvasEl = container.querySelector('canvas')! + + fireEvent.mouseDown(canvasEl, { clientX: 200, clientY: 10, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 380, clientY: 10, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 380, clientY: 10, buttons: 1 }) + + expect(wave.args[name]).toHaveLength(3) + expect(wave.args[name]).toMatchObject([...items, { start: 200, end: 380, tag: 'tag2' }]) + + fireEvent.click(canvasEl, { clientX: 210, clientY: 50 }) + fireEvent.mouseDown(canvasEl, { clientX: 210, clientY: 50, buttons: 1 }) + fireEvent.mouseMove(canvasEl, { clientX: 150, clientY: 60, buttons: 1 }) + fireEvent.click(canvasEl, { clientX: 150, clientY: 60 }) + + expect(wave.args[name]).toHaveLength(2) + expect(wave.args[name]).toMatchObject([items[0], { start: 140, end: 320, tag: 'tag2' }]) + }) + }) +}) diff --git a/ui/src/audio_annotator.tsx b/ui/src/audio_annotator.tsx new file mode 100644 index 00000000000..a57f42d90b2 --- /dev/null +++ b/ui/src/audio_annotator.tsx @@ -0,0 +1,282 @@ +import * as Fluent from '@fluentui/react' +import { B, F, Id, Rec, S, U } from 'h2o-wave' +import React from 'react' +import { stylesheet } from 'typestyle' +import { averageChannels } from './parts/audioUtils' +import { DrawnAnnotation, RangeAnnotator, TimeComponent } from './parts/range_annotator' +import { AnnotatorTags } from './text_annotator' +import { clas, cssVar } from './theme' +import { wave } from './ui' + +/** Create a unique tag type for use in an audio annotator. */ +export interface AudioAnnotatorTag { + /** An identifying name for this tag. */ + name: Id + /** Text to be displayed for the annotation. */ + label: S + /** Hex or RGB color string to be used as the background color. */ + color: S +} + +/** Create an annotator item with initial selected tags or no tags. */ +export interface AudioAnnotatorItem { + /** The start of the audio annotation in seconds. */ + start: F + /** The end of the audio annotation in seconds. */ + end: F + /** The `name` of the audio annotator tag to refer to for the `label` and `color` of this item. */ + tag: S +} + +/** + * Create an audio annotator component. + * + * This component allows annotating and labeling parts of audio file. + */ +export interface AudioAnnotator { + /** An identifying name for this component. */ + name: Id, + /** The audio annotator's title. */ + title: S + /** The path to the audio file. Use mp3 or wav formats to achieve the best cross-browser support. See https://caniuse.com/?search=audio%20format for other formats. */ + path: S + /** The master list of tags that can be used for annotations. */ + tags: AudioAnnotatorTag[] + /** Annotations to display on the image, if any. */ + items?: AudioAnnotatorItem[] + /** True if the form should be submitted as soon as an annotation is made. */ + trigger?: B +} + +const + WAVEFORM_HEIGHT = 200, + BODY_MIN_HEGHT = 370, + TRACK_SYNC_OFFSET = 0.05, + css = stylesheet({ + body: { + minHeight: BODY_MIN_HEGHT, + }, + title: { + color: cssVar('$neutralPrimary'), + marginBottom: 8 + }, + waveForm: { + position: 'absolute', + top: 0, + width: '100%', + height: WAVEFORM_HEIGHT, + cursor: 'pointer' + }, + }), + speedAdjustmentOptions = [ + { key: 0.25, text: '0.25x' }, + { key: 0.5, text: '0.5x' }, + { key: 0.75, text: '0.75x' }, + { key: 1, text: 'Normal' }, + { key: 1.25, text: '1.25x' }, + { key: 1.5, text: '1.5x' }, + { key: 2, text: '2x' }, + ], + // Love ya Safari. + promisifyDecodeAudioData = (audioContext: AudioContext, audioData: ArrayBuffer) => new Promise((resolve, reject) => { + audioContext.decodeAudioData(audioData, resolve, reject) + }) + +declare global { + interface Window { webkitAudioContext: typeof window.AudioContext } +} +// Shim for AudioContext in Safari. +window.AudioContext = window.AudioContext || window.webkitAudioContext + +export const XAudioAnnotator = ({ model }: { model: AudioAnnotator }) => { + const + [activeTag, setActiveTag] = React.useState(model.tags[0]?.name), + [waveFormData, setWaveFormData] = React.useState(null), + [isPlaying, setIsPlaying] = React.useState(false), + [duration, setDuration] = React.useState(0), + [currentTime, setCurrentTime] = React.useState(0), + [volumeIcon, setVolumeIcon] = React.useState('Volume3'), + [loadingMsg, setLoadingMsg] = React.useState(''), + [errMsg, setErrMsg] = React.useState(''), + audioRef = React.useRef(null), + audioContextRef = React.useRef(), + gainNodeRef = React.useRef(), + fetchedAudioUrlRef = React.useRef(), + activateTag = (tagName: S) => () => setActiveTag(tagName), + // TODO: Move to a separate service worker. + getAudioData = async () => { + if (!audioRef.current) return + + const audioContext = new AudioContext() + audioContextRef.current = audioContext + gainNodeRef.current = audioContext.createGain() + + audioContext.createMediaElementSource(audioRef.current) + .connect(gainNodeRef.current) + .connect(audioContext.destination) + + setLoadingMsg('Fetching audio data...') + let arrBuffer: ArrayBuffer + try { + // The data audio needs to be fetched and processed manually to generate a waveform later. + const res = await fetch(model.path) + arrBuffer = await res.arrayBuffer() + } catch (e) { + setErrMsg('Could not download audio file.') + return + } + // Store the URL into the ref so that it can be revoked on destroy and mem leak prevented. + // Safari needs Blob type to be specified, doesn't need to match the real sound format. + fetchedAudioUrlRef.current = URL.createObjectURL(new Blob([arrBuffer], { type: 'audio/mpeg' })) + // Do not set src directly within HTML to prevent double fetching. + audioRef.current.src = fetchedAudioUrlRef.current + + setLoadingMsg('Decoding audio data...') + let audioBuffer: AudioBuffer + try { + audioBuffer = await promisifyDecodeAudioData(audioContext, arrBuffer) + } catch (e) { + setErrMsg('Could not decode audio data. The file is either corrupted or the format is not supported.') + return + } + const channel2 = audioBuffer.numberOfChannels > 1 ? audioBuffer.getChannelData(1) : undefined + setWaveFormData(averageChannels(audioBuffer.getChannelData(0), channel2)) + setDuration(audioBuffer.duration) + setLoadingMsg('') + }, + updateTrack = (audioEl: HTMLAudioElement) => { + // We need higher frequency than HTMLAudioELEMENT's onTimeUpdate provides. + window.requestAnimationFrame(() => { + setCurrentTime(audioEl.currentTime) + setIsPlaying(isPlaying => { + if (isPlaying) updateTrack(audioEl) + return isPlaying + }) + }) + }, + onPlayerStateChange = () => { + const audioContext = audioContextRef.current + const audioEl = audioRef.current + if (!audioContext || !audioEl) return + if (audioContext.state === 'suspended') audioContext.resume() + + setIsPlaying(isPlaying => !isPlaying) + if (isPlaying) audioEl.pause() + else { + audioEl.play() + updateTrack(audioEl) + } + }, + onAudioEnded = () => setIsPlaying(false), + onTrackChange = (value: F, _range?: [F, F]) => skipToTime(value - (isPlaying ? TRACK_SYNC_OFFSET : 0))(), + onVolumeChange = (v: F) => { + if (gainNodeRef.current) gainNodeRef.current.gain.value = v + setVolumeIcon(v === 0 ? 'VolumeDisabled' : (v < 0.3 ? 'Volume1' : (v < 0.75 ? 'Volume2' : 'Volume3'))) + }, + onSpeedChange = (v: U) => { if (audioRef.current) audioRef.current.playbackRate = v }, + skipToTime = (newTime: F) => () => { + if (!audioRef.current) return + setCurrentTime(newTime) + audioRef.current.currentTime = newTime + }, + onAnnotate = React.useCallback((newAnnotations: DrawnAnnotation[]) => { + const annotations = [] + for (let i = 0; i < newAnnotations.length; i++) { + const { start, end, tag, isZoom } = newAnnotations[i] + if (!isZoom) annotations.push({ start, end, tag }) + } + wave.args[model.name] = annotations + if (model.trigger) wave.push() + }, [model.name, model.trigger]) + + React.useEffect(() => { + getAudioData() + wave.args[model.name] = (model.items as unknown as Rec[]) || [] + return () => { if (fetchedAudioUrlRef.current) URL.revokeObjectURL(fetchedAudioUrlRef.current) } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []) + + return ( + + {model.title} + + { + waveFormData ? ( + <> + + ( + + + + + onSpeedChange(option!.key as U)} + /> + + )} + backgroundData={waveFormData} + /> + + + + + + + + + + > + ) : ( + + {errMsg + ? ( + <> + + {errMsg} + > + ) + : } + + + ) + } + + ) +} \ No newline at end of file diff --git a/ui/src/form.tsx b/ui/src/form.tsx index 6651b8ec39b..e4d3dfb0bfb 100644 --- a/ui/src/form.tsx +++ b/ui/src/form.tsx @@ -16,6 +16,7 @@ import * as Fluent from '@fluentui/react' import { B, Model, Packed, S, unpack } from 'h2o-wave' import React from 'react' import { stylesheet } from 'typestyle' +import { AudioAnnotator, XAudioAnnotator } from './audio_annotator' import { Button, Buttons, MiniButton, MiniButtons, XButtons, XMiniButton, XMiniButtons, XStandAloneButton } from './button' import { Checkbox, XCheckbox } from './checkbox' import { Checklist, XChecklist } from './checklist' @@ -153,6 +154,8 @@ export interface Component { text_annotator?: TextAnnotator /** Image annotator. */ image_annotator?: ImageAnnotator + /** Audio annotator. */ + audio_annotator?: AudioAnnotator /** Facepile. */ facepile?: Facepile /** Copyable text. */ @@ -337,6 +340,7 @@ const if (m.persona) return if (m.text_annotator) return if (m.image_annotator) return + if (m.audio_annotator) return if (m.mini_button) return if (m.mini_buttons) return if (m.facepile) return diff --git a/ui/src/image_annotator.tsx b/ui/src/image_annotator.tsx index 8bbfdd62746..e84e2f5fd99 100644 --- a/ui/src/image_annotator.tsx +++ b/ui/src/image_annotator.tsx @@ -80,7 +80,7 @@ export interface ImageAnnotator { image_height?: S /** List of allowed shapes. Available values are 'rect' and 'polygon'. If not set, all shapes are available by default. */ allowed_shapes?: S[] - /** The events to capture on this image annotator. One of `click` or `tool_change`. */ + /** The events to capture on this image annotator. One of `click` | `tool_change`. */ events?: S[] } diff --git a/ui/src/image_annotator_rect.ts b/ui/src/image_annotator_rect.ts index 17b6c4a9301..1e8d8b2bfff 100644 --- a/ui/src/image_annotator_rect.ts +++ b/ui/src/image_annotator_rect.ts @@ -154,7 +154,7 @@ export const if (isFocused && getCorner(cursor_x, cursor_y, rect)) return true const { x2, x1, y2, y1 } = rect - return cursor_x > x1 && cursor_x < x2 && cursor_y > y1 && cursor_y < y2 + return cursor_x >= x1 && cursor_x <= x2 && cursor_y >= y1 && cursor_y <= y2 }, getCorner = (x: U, y: U, { x1, y1, x2, y2 }: ImageAnnotatorRect) => { if (x > x1 - ARC_RADIUS && x < x1 + ARC_RADIUS && y > y1 - ARC_RADIUS && y < y1 + ARC_RADIUS) return 'topLeft' diff --git a/ui/src/parts/annotator_utils.ts b/ui/src/parts/annotator_utils.ts new file mode 100644 index 00000000000..a645a8c1166 --- /dev/null +++ b/ui/src/parts/annotator_utils.ts @@ -0,0 +1 @@ +export const eventToCursor = (e: React.MouseEvent, rect: DOMRect) => ({ cursor_x: e.clientX - rect.left, cursor_y: e.clientY - rect.top }) \ No newline at end of file diff --git a/ui/src/parts/audioUtils.ts b/ui/src/parts/audioUtils.ts new file mode 100644 index 00000000000..f108f21bbbe --- /dev/null +++ b/ui/src/parts/audioUtils.ts @@ -0,0 +1,27 @@ +import { F } from "h2o-wave" + +export const + averageChannels = (channel1: Float32Array, channel2?: Float32Array) => { + if (!channel2) return channel1 + const result = new Float32Array(channel1.length) + for (let i = 0; i < channel1.length; i++) { + result[i] = (channel1[i] + channel2[i]) / 2 + } + return result + }, + parseAudioData = (samples: F, rawData: Float32Array) => { + if (!samples) return [] + + const blockSize = Math.floor(rawData.length / samples) + const filteredData = new Array(samples) + for (let i = 0; i < samples; i++) { + const blockStart = blockSize * i // the location of the first sample in the block + let sum = 0 + for (let j = 0; j < blockSize; j++) { + sum += Math.abs(rawData[blockStart + j]) // find the sum of all the samples in the block + } + filteredData[i] = sum / blockSize // divide the sum by the block size to get the average + } + const multiplier = Math.pow(Math.max(...filteredData), -1) + return filteredData.map(n => n * multiplier) + } \ No newline at end of file diff --git a/ui/src/parts/range_annotator.tsx b/ui/src/parts/range_annotator.tsx new file mode 100644 index 00000000000..b450060efc9 --- /dev/null +++ b/ui/src/parts/range_annotator.tsx @@ -0,0 +1,720 @@ +import * as Fluent from '@fluentui/react' +import { B, F, S, U, xid } from 'h2o-wave' +import React from 'react' +import { stylesheet } from 'typestyle' +import { AudioAnnotatorItem, AudioAnnotatorTag } from '../audio_annotator' +import { isIntersectingRect } from '../image_annotator_rect' +import { clas, cssVar, cssVarValue } from '../theme' +import { eventToCursor } from './annotator_utils' +import { parseAudioData } from './audioUtils' +import { Waveform } from './waveform' + +type RangeAnnotatorProps = { + activeTag: S + tags: AudioAnnotatorTag[] + trackPosition: F + duration: F + items?: AudioAnnotatorItem[] + backgroundData: Float32Array + setActiveTag: (tag: S) => void + onAnnotate: (annotations: DrawnAnnotation[]) => void + onRenderToolbar?: () => JSX.Element +} +type AnnotatorProps = { + annotations: DrawnAnnotation[] + activeTag: S + trackPosition: F | null + duration: F + start?: F + colorsMap: Map + setActiveTag: (tag: S) => void + addNewAnnotation: (annotation: DrawnAnnotation, unzoom?: B) => void + focusAnnotation: (annotation?: DrawnAnnotation) => void + moveOrResizeAnnotation: (annotation?: DrawnAnnotation) => void + setZoom?: React.Dispatch> +} +type DraggedAnnotation = { + from: U + to: U + action?: 'resize' | 'move' | 'new' + resized?: 'from' | 'to' + intersected?: DrawnAnnotation + newAnnotationStart?: F +} +type TooltipProps = { title: S, range: S, top: U, left: U } +type TagColor = { transparent: S, color: S, label: S } +export type DrawnAnnotation = AudioAnnotatorItem & { + // TODO: Think of a better way to move/resize zoomed annotations. + id: S + canvasStart: F + canvasEnd: F + canvasHeight: U + canvasY: U + isFocused?: B + isZoom?: B +} + +const + WAVEFORM_HEIGHT = 100, + MIN_ANNOTATION_WIDTH = 5, + ANNOTATION_HANDLE_OFFSET = 3, + LEFT_TOOLTIP_OFFSET = 25, + TOOLTIP_WIDTH = 200, + TRACK_WIDTH = 4, + ZOOM_STROKE_WIDTH = 3, + css = stylesheet({ + annotatorContainer: { + width: '100%', + height: WAVEFORM_HEIGHT, + position: 'relative', + marginTop: 15 + }, + annotatorCanvas: { + position: 'absolute', + top: 0, + width: '100%', + height: WAVEFORM_HEIGHT, + }, + tooltip: { + position: 'absolute', + display: 'none', + zIndex: 1, + padding: 15, + background: cssVar('$card'), + width: TOOLTIP_WIDTH, + borderRadius: 2, + userSelect: 'none', + boxShadow: `${cssVar('$text1')} 0px 6.4px 14.4px 0px, ${cssVar('$text2')} 0px 1.2px 3.6px 0px`, + boxSizing: 'border-box', + }, + timeComponent: { + textAlign: 'center', + $nest: { + 'span': { + display: 'inline-block', + width: 18, + }, + '.wave-time-delimiter': { + width: 'auto', + } + } + }, + timeComponentBig: { + $nest: { + 'span': { + width: 35, + fontSize: 30, + }, + '.wave-time-delimiter': { + width: 10, + marginLeft: 3 + } + } + }, + }), + getIntersectingEdge = (x: U, intersected?: { canvasStart: F, canvasEnd: F }) => { + if (!intersected) return + const { canvasStart, canvasEnd } = intersected + if (Math.abs(canvasStart - x) <= ANNOTATION_HANDLE_OFFSET) return 'from' + if (Math.abs(canvasEnd - x) <= ANNOTATION_HANDLE_OFFSET) return 'to' + }, + getResized = (cursor_x: F, min: F, max: F) => { + return cursor_x <= min + ? 'from' + : cursor_x >= max + ? 'to' + : undefined + }, + getTooltipTopOffset = (cursorY: F) => cursorY - 80, + getTooltipLeftOffset = (cursorX: F, canvasWidth: F) => { + return (cursorX + TOOLTIP_WIDTH + LEFT_TOOLTIP_OFFSET) > canvasWidth + ? cursorX - TOOLTIP_WIDTH - LEFT_TOOLTIP_OFFSET + : cursorX + LEFT_TOOLTIP_OFFSET + }, + isAnnotationIntersectingAtStart = (a1?: DrawnAnnotation, a2?: DrawnAnnotation) => { + return a1 && a2 && a1.canvasEnd >= a2.canvasStart && a2.canvasStart >= a1.canvasStart + }, + isAnnotationIntersectingAtEnd = (a1?: DrawnAnnotation, a2?: DrawnAnnotation) => { + return a1 && a2 && a2.canvasStart >= a1.canvasStart && a2.canvasStart <= a1.canvasEnd + }, + canvasUnitsToSeconds = (canvasUnit: F, canvasWidth: F, duration: F) => +(canvasUnit / canvasWidth * duration).toFixed(2), + createAnnotation = (from: U, to: U, tag: S, canvasWidth: F, duration: F): DrawnAnnotation => { + const canvasStart = Math.min(from, to) + const canvasEnd = Math.max(from, to) + const start = canvasUnitsToSeconds(from, canvasWidth, duration) + const end = canvasUnitsToSeconds(to, canvasWidth, duration) + return { id: xid(), canvasStart, canvasEnd, start, end, tag, canvasHeight: WAVEFORM_HEIGHT, canvasY: 0 } + }, + getIntersectedAnnotation = (annotations: DrawnAnnotation[], x: U, y: U) => { + // TODO: Improve perf - binary search. + return annotations.find(a => isIntersectingRect(x, y, { x1: a.canvasStart, x2: a.canvasEnd, y1: a.canvasY, y2: a.canvasHeight + a.canvasY })) + }, + getAnnotationsWithinRange = (annotations: DrawnAnnotation[], { from, to }: { from: F, to: F }, canvasWidth: F) => { + // TODO: Improve perf - binary search. + const annotationsWithinRange: DrawnAnnotation[] = [] + const rangeSize = Math.abs(to - from) + + for (let i = 0; i < annotations.length; i++) { + const a = annotations[i] + if (a.isZoom) continue + const aStart = a.canvasStart + const aEnd = a.canvasEnd + if ((aStart >= from && aStart <= to) || (aEnd >= from && aEnd <= to) || (aStart <= from && aEnd >= to)) { + const canvasStart = (aStart - from) / rangeSize * canvasWidth + const canvasEnd = (aEnd - from) / rangeSize * canvasWidth + annotationsWithinRange.push({ ...a, canvasStart, canvasEnd }) + } + } + + return annotationsWithinRange + }, + getCanvasDimensions = (intersections: DrawnAnnotation[], annotation: DrawnAnnotation, isFirst: B, maxDepth?: U) => { + const verticalIntersections = intersections + .filter(a => isAnnotationIntersectingAtEnd(a, annotation)) + .sort((a, b) => a.canvasY - b.canvasY) + let canvasY = 0 + let j = 0 + while (!isFirst && canvasY === verticalIntersections[j]?.canvasY) { + canvasY += verticalIntersections[j].canvasHeight + j++ + } + const canvasHeight = maxDepth + ? WAVEFORM_HEIGHT / maxDepth + : Math.abs(canvasY - (verticalIntersections[j]?.canvasY || WAVEFORM_HEIGHT)) + return { canvasY, canvasHeight } + }, + getMaxDepth = (annotations: DrawnAnnotation[], idx: U, annotation: DrawnAnnotation, currMax: U) => { + // TODO: Super ugly perf-wise. + let currmax = annotations.filter(a => annotation.canvasStart >= a.canvasStart && annotation.canvasStart <= a.canvasEnd).length + for (let j = idx + 1; annotations[j]?.canvasStart >= annotation?.canvasStart && annotations[j]?.canvasStart <= annotation?.canvasEnd; j++) { + currmax = Math.max(currmax, getMaxDepth(annotations, j, annotations[j], currMax + 1)) + } + return currmax + }, + itemsToAnnotations = (items?: AudioAnnotatorItem[]) => { + return items?.map(i => ({ ...i, id: xid(), canvasHeight: WAVEFORM_HEIGHT, canvasY: 0, canvasStart: i.start, canvasEnd: i.end })) || [] + }, + needsZoom = (duration: F) => duration > 120, + drawAnnotation = (ctx: CanvasRenderingContext2D, { tag, canvasStart, canvasEnd, canvasHeight, canvasY, isFocused }: DrawnAnnotation, colorsMap: Map) => { + ctx.fillStyle = colorsMap.get(tag)?.transparent || 'red' + ctx.fillRect(canvasStart, canvasY, canvasEnd - canvasStart, canvasHeight) + if (isFocused) { + ctx.strokeStyle = colorsMap.get(tag)?.color || 'red' + ctx.lineWidth = ZOOM_STROKE_WIDTH + ctx.strokeRect(canvasStart, canvasY, canvasEnd - canvasStart, canvasHeight) + } + }, + Annotator = (props: React.PropsWithChildren) => { + const + { annotations, activeTag, addNewAnnotation, trackPosition, duration, setActiveTag, + children, colorsMap, moveOrResizeAnnotation, focusAnnotation, setZoom, start = 0 } = props, + canvasRef = React.useRef(null), + ctxRef = React.useRef(null), + currDrawnAnnotation = React.useRef(undefined), + isDefaultCanvasWidthFixed = React.useRef(false), + [tooltipProps, setTooltipProps] = React.useState(null), + redrawAnnotations = React.useCallback(() => { + const canvas = canvasRef.current + const ctx = ctxRef.current + if (!ctx || !canvas) return + ctx.clearRect(0, 0, canvas.width, canvas.height) + const action = currDrawnAnnotation.current?.action + const moveOrResize = action === 'move' || action === 'resize' + const intersected = currDrawnAnnotation.current?.intersected + + for (let i = 0; i < annotations.length; i++) { + const { id, canvasStart, canvasEnd, canvasHeight, canvasY, isZoom } = annotations[i] + if (isZoom && !setZoom) continue + if (isZoom) { + ctx.strokeStyle = cssVarValue('$themePrimary') + ctx.lineWidth = ZOOM_STROKE_WIDTH + ctx.strokeRect(canvasStart, canvasY, canvasEnd - canvasStart, canvasHeight) + continue + } + if (moveOrResize && intersected?.id === id) continue + drawAnnotation(ctx, annotations[i], colorsMap) + } + + if (currDrawnAnnotation.current && action === 'new') { + const { from, to } = currDrawnAnnotation.current + ctx.fillStyle = colorsMap.get(activeTag)?.transparent || 'red' + ctx.fillRect(from, 0, to - from, WAVEFORM_HEIGHT) + } + if (moveOrResize && intersected && !intersected.isZoom) drawAnnotation(ctx, intersected, colorsMap) + + // Draw track. + if (trackPosition !== null) { + const trackP = trackPosition === 1 ? canvas.width - TRACK_WIDTH : canvas.width * trackPosition + ctx.fillStyle = cssVarValue('$themeDark') + ctx.fillRect(trackP - (TRACK_WIDTH / 2), 0, TRACK_WIDTH, WAVEFORM_HEIGHT) + } + }, [activeTag, annotations, colorsMap, trackPosition, setZoom]), + onMouseDown = (e: React.MouseEvent) => { + if (e.buttons !== 1) return // Accept left-click only. + const canvas = canvasRef.current + if (!canvas) return + const { cursor_x, cursor_y } = eventToCursor(e, canvas.getBoundingClientRect()) + + const intersected = getIntersectedAnnotation(annotations, cursor_x, cursor_y) + const resized = getIntersectingEdge(cursor_x, intersected) + const action = (intersected?.isFocused || intersected?.isZoom) ? (resized && 'resize') || 'move' : undefined + currDrawnAnnotation.current = { from: cursor_x, to: cursor_x, action, intersected, resized } + }, + onMouseMove = (e: React.MouseEvent) => { + const canvas = canvasRef.current + const ctx = ctxRef.current + if (!ctx || !canvas) return + + const { cursor_x, cursor_y } = eventToCursor(e, canvas.getBoundingClientRect()) + + const intersected = getIntersectedAnnotation(annotations, cursor_x, cursor_y) || currDrawnAnnotation.current?.intersected + const canvasWidth = canvasRef.current.width + + setTooltipProps(!intersected || intersected.isZoom ? null : { + title: colorsMap.get(intersected.tag)?.label || '', + range: `${formatTime(intersected.start)} - ${formatTime(intersected.end)}`, + top: getTooltipTopOffset(cursor_y), + left: getTooltipLeftOffset(cursor_x, canvasWidth) + }) + + canvas.style.cursor = (intersected?.isFocused || intersected?.isZoom) + ? getIntersectingEdge(cursor_x, intersected) ? 'ew-resize' : 'move' + : 'pointer' + + if (!currDrawnAnnotation.current || e.buttons !== 1) return + if (!currDrawnAnnotation.current?.action) { + currDrawnAnnotation.current.action = 'new' + currDrawnAnnotation.current.newAnnotationStart = currDrawnAnnotation.current.from + } + + let tooltipFrom = 0 + let tooltipTo = 0 + const { action, intersected: currIntersected } = currDrawnAnnotation.current + if (action === 'new' && currDrawnAnnotation.current.newAnnotationStart) { + const newAnnotationStart = currDrawnAnnotation.current.newAnnotationStart + const from = Math.min(cursor_x, newAnnotationStart) + const to = Math.max(cursor_x, newAnnotationStart) + tooltipFrom = from + tooltipTo = to + currDrawnAnnotation.current = { from, to, action: 'new', newAnnotationStart } + canvas.style.cursor = 'ew-resize' + } + else if (action === 'move' && currIntersected) { + const movedOffset = cursor_x - currDrawnAnnotation.current.from + const newCanvasStart = currIntersected.canvasStart + movedOffset + const newCanvasEnd = currIntersected.canvasEnd + movedOffset + if (newCanvasStart >= 0 && newCanvasEnd <= canvasWidth) { + currIntersected.canvasStart = newCanvasStart + currIntersected.canvasEnd = newCanvasEnd + currIntersected.start = canvasUnitsToSeconds(newCanvasStart, canvasWidth, duration) + currIntersected.end = canvasUnitsToSeconds(newCanvasEnd, canvasWidth, duration) + } + tooltipFrom = currIntersected.canvasStart + tooltipTo = currIntersected.canvasEnd + currDrawnAnnotation.current.from += movedOffset + canvas.style.cursor = 'move' + if (currIntersected.isZoom && setZoom) setZoom({ from: currIntersected.canvasStart, to: currIntersected.canvasEnd }) + } + else if (action === 'resize' && currIntersected) { + const { resized } = currDrawnAnnotation.current + const canvasWidth = canvasRef.current.width + if (resized === 'from') { + currIntersected.canvasStart = Math.max(cursor_x, 0) + currIntersected.start = canvasUnitsToSeconds(currIntersected.canvasStart, canvasWidth, duration) + } + else if (resized === 'to') { + currIntersected.canvasEnd = Math.min(cursor_x, canvasWidth) + currIntersected.end = canvasUnitsToSeconds(currIntersected.canvasEnd, canvasWidth, duration) + } + + const min = Math.min(currIntersected.canvasStart, currIntersected.canvasEnd, cursor_x) + const max = Math.max(currIntersected.canvasStart, currIntersected.canvasEnd, cursor_x) + currDrawnAnnotation.current.resized = getResized(cursor_x, min, max) || currDrawnAnnotation.current.resized + + tooltipFrom = min + tooltipTo = max + canvas.style.cursor = 'ew-resize' + if (currIntersected.isZoom && setZoom) setZoom({ from: currIntersected.canvasStart, to: currIntersected.canvasEnd }) + } + + redrawAnnotations() + setTooltipProps(currIntersected?.isZoom ? null : { + title: colorsMap.get(activeTag)!.label, + range: `${formatTime(tooltipFrom / canvas.width * duration)} - ${formatTime(tooltipTo / canvas.width * duration)}`, + top: getTooltipTopOffset(cursor_y), + left: getTooltipLeftOffset(cursor_x, canvasWidth) + }) + }, + onMouseLeave = () => { + currDrawnAnnotation.current = undefined + redrawAnnotations() + setTooltipProps(null) + }, + onClick = (e: React.MouseEvent) => { + const canvas = canvasRef.current + const ctx = ctxRef.current + if (!canvas || !ctx) return + + const action = currDrawnAnnotation.current?.action + const { cursor_x, cursor_y } = eventToCursor(e, canvas.getBoundingClientRect()) + const intersected = getIntersectedAnnotation(annotations, cursor_x, cursor_y) + + if ((!action || action === 'new') && !intersected) focusAnnotation(undefined) + + canvas.style.cursor = intersected + ? getIntersectingEdge(cursor_x, intersected) ? 'ew-resize' : 'move' + : 'pointer' + + if (!currDrawnAnnotation.current || !action) { + if (intersected && intersected.tag !== activeTag) setActiveTag(intersected.tag) + if (intersected) focusAnnotation(intersected) + return + } + + if (action === 'new') { + const { from, to } = currDrawnAnnotation.current + const annotationWidth = Math.abs(from - to) + if (annotationWidth < MIN_ANNOTATION_WIDTH) return + addNewAnnotation(createAnnotation(from, to, activeTag, canvasRef.current.width, duration), !setZoom) + } + else if (action === 'resize') { + const resized = currDrawnAnnotation.current.intersected + if (resized) { + const { canvasStart, canvasEnd } = resized + resized.canvasStart = Math.min(canvasStart, canvasEnd) + resized.canvasEnd = Math.max(canvasStart, canvasEnd) + resized.start = canvasUnitsToSeconds(resized.canvasStart, canvasRef.current.width, duration) + resized.end = canvasUnitsToSeconds(resized.canvasEnd, canvasRef.current.width, duration) + } + } + + if (action === 'move' || action === 'resize') { + moveOrResizeAnnotation(setZoom ? undefined : currDrawnAnnotation.current.intersected) + } + + currDrawnAnnotation.current = undefined + }, + init = React.useCallback((): U | undefined => { + // Set correct canvas coordinate system from default 300:150 since we resize canvas using CSS. + if (canvasRef.current) { + canvasRef.current.width = canvasRef.current.getBoundingClientRect().width + ctxRef.current = canvasRef.current.getContext('2d') + isDefaultCanvasWidthFixed.current = true + redrawAnnotations() + } + // If canvas is not ready or didn't resize yet, try again later. + if (!canvasRef.current || !isDefaultCanvasWidthFixed.current) return setTimeout(init, 300) as unknown as U + }, [redrawAnnotations]) + + React.useEffect(() => { + window.addEventListener('resize', init) + return () => window.removeEventListener('resize', init) + }, [init]) + + React.useEffect(() => redrawAnnotations(), [annotations, redrawAnnotations]) + + React.useEffect(() => { + const timeout = init() + return () => window.clearTimeout(timeout) + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []) + + return ( + + + + {tooltipProps?.title} + {tooltipProps?.range} + + {children} + + + + + + + + ) + } + +export const + recalculateAnnotations = (annotations: DrawnAnnotation[]): DrawnAnnotation[] => { + const visited = new Set() + const mergedAnnotations: DrawnAnnotation[] = [] + let zoomAnnotation: DrawnAnnotation | null = null + + for (let i = 0; i < annotations.length; i++) { + const currAnnotation = annotations[i] + if (currAnnotation.isZoom) { + zoomAnnotation = currAnnotation + continue + } + if (visited.has(currAnnotation)) continue + mergedAnnotations.push(currAnnotation) + + for (let j = i + 1; j < annotations.length; j++) { + const nextAnnotation = annotations[j] + if (currAnnotation.tag !== nextAnnotation.tag) continue + if (!isAnnotationIntersectingAtEnd(currAnnotation, nextAnnotation)) break + currAnnotation.end = Math.max(currAnnotation.end, nextAnnotation.end) + currAnnotation.canvasEnd = Math.max(currAnnotation.canvasEnd, nextAnnotation.canvasEnd) + visited.add(nextAnnotation) + } + } + + // Reset vertical position of annotations. + mergedAnnotations.forEach(annotation => annotation.canvasY = Infinity) + + let currMaxDepth = 1 + for (let i = 0; i < mergedAnnotations.length; i++) { + const annotation = mergedAnnotations[i] + const nextIntersections = [] + const prevIntersections = [] + // Perf: Think about when to stop to not iterate over all annotations every time. + for (let j = i - 1; j >= 0; j--) { + if (isAnnotationIntersectingAtStart(mergedAnnotations[j], annotation)) prevIntersections.push(mergedAnnotations[j]) + } + for (let j = i + 1; isAnnotationIntersectingAtEnd(annotation, mergedAnnotations[j]); j++) { + nextIntersections.push(mergedAnnotations[j]) + } + + const intersections = [...prevIntersections, ...nextIntersections] + const maxDepth = getMaxDepth(mergedAnnotations, i, annotation, 1) + const shouldFillRemainingSpace = !nextIntersections.length + const isFirst = !prevIntersections.length + currMaxDepth = intersections.length ? Math.max(currMaxDepth, maxDepth) : 1 + + const { canvasY, canvasHeight } = getCanvasDimensions(intersections, annotation, isFirst, shouldFillRemainingSpace ? 0 : maxDepth) + annotation.canvasY = canvasY + annotation.canvasHeight = canvasHeight + } + + return zoomAnnotation ? [zoomAnnotation, ...mergedAnnotations] : mergedAnnotations + }, + formatTime = (secs: F) => { + const hours = Math.floor(secs / 3600) + const minutes = Math.floor(secs / 60) % 60 + const seconds = (secs % 60).toFixed(2) + + return [hours, minutes, seconds] + .map(v => +v < 10 ? "0" + v : v) + .filter((v, i) => v !== "00" || i > 0) + .join(":") + }, + TimeComponent = ({ secs, isBig = false }: { secs: F, isBig?: B }) => { + const hours = Math.floor(secs / 3600) + const minutes = Math.floor(secs / 60) % 60 + const [seconds, miliseconds] = (secs % 60).toFixed(2).split('.').map(v => +v) + const [h, m, s, ms] = [hours, minutes, seconds, miliseconds].map(v => v < 10 ? '0' + v : String(v)) + return ( + + {h !== '00' && <> + {h} + : + >} + {m} + : + {s} + . + {ms} + + ) + }, + RangeAnnotator = (props: RangeAnnotatorProps) => { + const + { onAnnotate, activeTag, tags, trackPosition, items, duration, setActiveTag, + onRenderToolbar, backgroundData } = props, + [removeAllDisabled, setRemoveAllDisabled] = React.useState(!items?.length), + [removeDisabled, setRemoveDisabled] = React.useState(true), + [annotations, setAnnotations] = React.useState(itemsToAnnotations(items)), + [zoom, setZoom] = React.useState({ from: 0, to: 100 }), + annotatorContainerRef = React.useRef(null), + canvasWidth = annotatorContainerRef.current?.getBoundingClientRect().width || 0, + parsedAudioData = React.useMemo(() => parseAudioData(canvasWidth, backgroundData), [backgroundData, canvasWidth]), + parsedZoomAudioData = React.useMemo(() => parseAudioData(canvasWidth, + backgroundData.slice( + Math.ceil(zoom.from / canvasWidth * backgroundData.length), + Math.floor(zoom.to / canvasWidth * backgroundData.length) + )), [backgroundData, canvasWidth, zoom.from, zoom.to]), + theme = Fluent.useTheme(), + colorsMap = React.useMemo(() => new Map(tags.map(tag => { + const color = Fluent.getColorFromString(cssVarValue(tag.color)) + return [tag.name, { + transparent: color ? `rgba(${color.r}, ${color.g}, ${color.b}, 0.5)` : cssVarValue(tag.color), + color: cssVarValue(tag.color), + label: tag.label + }] + // eslint-disable-next-line react-hooks/exhaustive-deps + })), [tags, theme]), + recalcAnnotations = React.useCallback((submit = false) => { + setAnnotations(annotations => { + const mergedAnnotations = recalculateAnnotations(annotations) + if (submit) onAnnotate(mergedAnnotations[0]?.isZoom ? mergedAnnotations.slice(1) : mergedAnnotations) + return mergedAnnotations + }) + }, [onAnnotate]), + focusAnnotation = React.useCallback((annotation?: DrawnAnnotation) => { + setAnnotations(annotations => annotations.map(a => { a.isFocused = a.start === annotation?.start && a.end === annotation.end; return a })) + setRemoveDisabled(!annotation) + }, []), + addNewAnnotation = React.useCallback((annotation: DrawnAnnotation, unzoom = false) => { + setAnnotations(prev => { + if (unzoom) { + const startOffset = zoom.from + annotation.canvasStart = startOffset + (annotation.canvasStart / canvasWidth * (zoom.to - zoom.from)) + annotation.canvasEnd = startOffset + (annotation.canvasEnd / canvasWidth * (zoom.to - zoom.from)) + } + // TODO: Prev array is already sorted so adding a new element can be made O(N) instead of O(NlogN). + return [...prev, annotation].sort((a, b) => a.canvasStart - b.canvasStart) + }) + recalcAnnotations(true) + setRemoveAllDisabled(false) + }, [canvasWidth, recalcAnnotations, zoom.from, zoom.to]), + moveOrResizeAnnotation = React.useCallback((annotation?: DrawnAnnotation) => { + if (annotation) { + const startOffset = zoom.from + annotation.canvasStart = startOffset + (annotation.canvasStart / canvasWidth * (zoom.to - zoom.from)) + annotation.canvasEnd = startOffset + (annotation.canvasEnd / canvasWidth * (zoom.to - zoom.from)) + setAnnotations(annotations => { + annotations[annotations.findIndex(a => a.id === annotation.id)] = annotation + return annotations + }) + } + // TODO: Prev array is already sorted so adding a new element can be made O(N) instead of O(NlogN). + setAnnotations(prev => prev.sort((a, b) => a.canvasStart - b.canvasStart)) + recalcAnnotations(true) + }, [canvasWidth, recalcAnnotations, zoom.from, zoom.to]), + reset = () => { + setAnnotations(annotations => annotations.filter(a => a.isZoom)) + onAnnotate([]) + setRemoveDisabled(true) + setRemoveAllDisabled(true) + }, + removeAnnotation = () => { + setAnnotations(annotations => { + const newAnnotations = annotations.filter(a => !a.isFocused) + setRemoveAllDisabled(newAnnotations.length === 0) + return newAnnotations + }) + setRemoveDisabled(true) + recalcAnnotations(true) + }, + getZoomedTrackPosition = () => { + const trackPosPx = trackPosition * canvasWidth + const withinBounds = trackPosPx >= zoom.from && trackPosPx <= zoom.to + return withinBounds ? (trackPosPx - zoom.from) / (zoom.to - zoom.from) : null + } + + React.useEffect(() => { + const annotations = itemsToAnnotations(items) + annotations.sort((a, b) => a.start - b.start) + setAnnotations(recalculateAnnotations(annotations)) + }, [items]) + + React.useEffect(() => { + setAnnotations(annotations => annotations.map(a => { + if (a.isFocused) { + const tagChanged = a.tag !== activeTag + a.tag = activeTag + if (tagChanged) onAnnotate(annotations) + } + return a + })) + }, [activeTag, onAnnotate]) + + React.useEffect(() => { + setAnnotations(annotations => { + if (needsZoom(duration) && !annotations[0]?.isZoom) { + const zoomAnnotation: DrawnAnnotation = { + id: xid(), + canvasStart: 0, + canvasY: ZOOM_STROKE_WIDTH - 1, + canvasEnd: 100, + canvasHeight: WAVEFORM_HEIGHT - (2 * (ZOOM_STROKE_WIDTH - 1)), + start: -1, + end: -1, + tag: '', + isZoom: true + } + return [zoomAnnotation, ...annotations] + } + return annotations + }) + }, [duration]) + + return ( + <> + + + + + {onRenderToolbar && onRenderToolbar()} + + + { + annotatorContainerRef.current && ( + + + + ) + } + + {needsZoom(duration) && ( + + + + ) + } + > + ) + } diff --git a/ui/src/parts/waveform.tsx b/ui/src/parts/waveform.tsx new file mode 100644 index 00000000000..3605c2fcdae --- /dev/null +++ b/ui/src/parts/waveform.tsx @@ -0,0 +1,74 @@ +// Copyright 2020 H2O.ai, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import React from 'react' +import { stylesheet } from 'typestyle' +import { cssVarValue } from '../theme' +import { F, S } from 'h2o-wave' +import { debounce } from '../ui' + +interface Props { + color: S + data: F[] +} + +const + TICK_WIDTH = 1, + css = stylesheet({ + container: { + width: '100%', + height: '100%', + position: 'relative', + flexGrow: 1, + } + }) + +export const Waveform = React.memo(({ color, data }: Props) => { + const + ref = React.useRef(null), + [height, setHeight] = React.useState(0), + [width, setWidth] = React.useState(0), + xStep = React.useMemo(() => width / data.length, [data.length, width]), + updateDimensions = () => { + if (!ref.current) return + const { width, height } = ref.current.getBoundingClientRect() + setWidth(width) + setHeight(height) + } + + React.useLayoutEffect(() => { + updateDimensions() + const onResize = debounce(1000, updateDimensions) + window.addEventListener('resize', onResize) + return () => window.removeEventListener('resize', onResize) + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []) + + return ( + + + {data.map((d, i) => ( + + ))} + + + ) +}) \ No newline at end of file diff --git a/website/docs/examples/assets/audio-annotator.png b/website/docs/examples/assets/audio-annotator.png new file mode 100644 index 00000000000..58414d770ce Binary files /dev/null and b/website/docs/examples/assets/audio-annotator.png differ diff --git a/website/sidebars.js b/website/sidebars.js index 909ae70ab82..b98bf46ca6d 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -107,7 +107,6 @@ module.exports = { "widgets/form/file_upload", "widgets/form/frame", "widgets/form/image", - "widgets/form/image_annotator", "widgets/form/inline", "widgets/form/label", "widgets/form/link", @@ -176,7 +175,9 @@ module.exports = { "type": "category", "label": "AI", "items": [ + "widgets/ai/audio_annotator", "widgets/ai/chatbot", + "widgets/ai/image_annotator", ] }, ], diff --git a/website/widgets/ai/audio_annotator.md b/website/widgets/ai/audio_annotator.md new file mode 100644 index 00000000000..0c4dff65ccb --- /dev/null +++ b/website/widgets/ai/audio_annotator.md @@ -0,0 +1,30 @@ +--- +title: Audio annotator +keywords: + - audio + - annotator +custom_edit_url: null +--- + +Useful for labelling audio data. + +Check the full API at [ui.audio_annotator](/docs/api/ui#audio_annotator). + +```py sleep 2 +q.page['example'] = ui.form_card(box='1 1 7 7', items=[ + ui.audio_annotator( + name='annotator', + title='Drag to annotate', + path='/assets/examples/sample-audio.mp3', + tags=[ + ui.audio_annotator_tag(name='f', label='Flute', color='$blue'), + ui.audio_annotator_tag(name='d', label='Drum', color='$brown'), + ], + items=[ + ui.audio_annotator_item(start=0, end=10, tag='f'), + ui.audio_annotator_item(start=10, end=20, tag='d'), + ui.audio_annotator_item(start=20, end=30, tag='f'), + ] + ), +]) +``` diff --git a/website/widgets/form/image_annotator.md b/website/widgets/ai/image_annotator.md similarity index 99% rename from website/widgets/form/image_annotator.md rename to website/widgets/ai/image_annotator.md index 9e6b5128bc9..1aeb55f7120 100644 --- a/website/widgets/form/image_annotator.md +++ b/website/widgets/ai/image_annotator.md @@ -34,7 +34,7 @@ q.page['example'] = ui.form_card(box='1 1 9 10', items=[ ]) ``` -## Allowonly certain drawing shapes +## Allow only certain drawing shapes Use the `allowed_shapes` attribute to limit the available shapes your users might use. The attribute takes a list of strings (either 'rect' or 'polygon'). If not specified, the annotator allows every supported shape.