From 5743fec4add4584c764382e012a93ee170663f5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Perceval=20Wajsb=C3=BCrt?= Date: Wed, 28 May 2025 18:37:12 +0200 Subject: [PATCH 1/2] feat: new markup_to_doc converter --- changelog.md | 1 + docs/data/converters.md | 10 ++ docs/data/index.md | 13 ++- edsnlp/data/converters.py | 240 +++++++++++++++++++++++++++++++++++++- pyproject.toml | 1 + 5 files changed, 255 insertions(+), 10 deletions(-) diff --git a/changelog.md b/changelog.md index e8ef6dafa6..befb3221fd 100644 --- a/changelog.md +++ b/changelog.md @@ -7,6 +7,7 @@ - Handling intra-word linebreak as pollution : adds a pollution pattern that detects intra-word linebreak, which can then be removed in the `get_text` method - Qualifiers can process `Span` or `Doc` : this feature especially makes it easier to nest qualifiers components in other components - New label_weights parameter in eds.span_classifier`, which allows the user to set per label-value loss weights during training +- New `edsnlp.data.converters.MarkupToDocConverter` to convert Markdown or XML-like markup to documents, which is particularly useful to create annotated documents from scratch (e.g., for testing purposes). ### Fixed diff --git a/docs/data/converters.md b/docs/data/converters.md index 6914dfa998..1897eabcf9 100644 --- a/docs/data/converters.md +++ b/docs/data/converters.md @@ -218,3 +218,13 @@ one per entity, that can be used to write to a dataframe. The schema of each pro options: heading_level: 4 show_source: false + +## Markup (`converter="markup"`) {: #edsnlp.data.converters.MarkupToDocConverter } + +This converter is used to convert markup data, such as Markdown or XML into documents. +This can be particularly useful when you want to create annotated documents from scratch (e.g., for testing purposes). + +::: edsnlp.data.converters.MarkupToDocConverter + options: + heading_level: 4 + show_source: false diff --git a/docs/data/index.md b/docs/data/index.md index e1198590c7..4bbd9899fe 100644 --- a/docs/data/index.md +++ b/docs/data/index.md @@ -46,9 +46,10 @@ At the moment, we support the following data sources: and the following schemas: -| Schema | Snippet | -|:---------------------------------------------------------------------------|------------------------| -| [Custom](./converters/#custom) | `converter=custom_fn` | -| [OMOP](./converters/#omop) | `converter="omop"` | -| [Standoff](./converters/#standoff) | `converter="standoff"` | -| [Ents](./converters/#edsnlp.data.converters.EntsDoc2DictConverter) | `converter="ents"` | +| Schema | Snippet | +|:--------------------------------------------------------------------|------------------------| +| [Custom](./converters/#custom) | `converter=custom_fn` | +| [OMOP](./converters/#omop) | `converter="omop"` | +| [Standoff](./converters/#standoff) | `converter="standoff"` | +| [Ents](./converters/#edsnlp.data.converters.EntsDoc2DictConverter) | `converter="ents"` | +| [Markup](./converters/#edsnlp.data.converters.MarkupToDocConverter) | `converter="markup"` | diff --git a/edsnlp/data/converters.py b/edsnlp/data/converters.py index c1247a14d0..c601b4d3f3 100644 --- a/edsnlp/data/converters.py +++ b/edsnlp/data/converters.py @@ -24,6 +24,7 @@ from confit.registry import ValidatedFunction from spacy.tokenizer import Tokenizer from spacy.tokens import Doc, Span +from typing_extensions import Literal import edsnlp from edsnlp import registry @@ -707,6 +708,225 @@ def __call__(self, doc): ] +# ex: `[The [cat](ANIMAL) is [black](COLOR hex="#000000")]. + + +@registry.factory.register("eds.markup_to_doc", spacy_compatible=False) +class MarkupToDocConverter: + """ + Examples + -------- + ```python + import edsnlp + + # Any kind of reader (`edsnlp.data.read/from_...`) can be used here + # If input items are dicts, the converter expects a "text" key/column. + docs = list( + edsnlp.data.from_iterable( + [ + "This [is](VERB negation=True) not a [test](NOUN).", + "This is another [test](NOUN).", + ], + converter="markup", + span_setter="entities", + ), + ) + print(docs[0].spans["entities"]) + # Out: [is, test] + ``` + + You can also use it directly on a string: + + ```python + from edsnlp.data.converters import MarkupToDocConverter + + converter = MarkupToDocConverter( + span_setter={"verb": "VERB", "noun": "NOUN"}, + preset="xml", + ) + doc = converter("This is not a test.") + print(doc.spans["verb"]) + # Out: [is] + print(doc.spans["verb"][0]._.negation) + # Out: True + ``` + + Parameters + ---------- + preset: Literal["md", "xml"] + The preset to use for the markup format. Defaults to "md" (Markdown-like + syntax). Use "xml" for XML-like syntax. + opener: Optional[str] + The regex pattern to match the opening tag of the markup. Defaults to the + preset's opener. + closer: Optional[str] + The regex pattern to match the closing tag of the markup. Defaults to the + preset's closer. + tokenizer: Optional[Tokenizer] + The tokenizer instance used to tokenize the documents. Likely not needed since + by default it uses the current context tokenizer : + + - the tokenizer of the next pipeline run by `.map_pipeline` in a + [Stream][edsnlp.core.stream.Stream]. + - or the `eds` tokenizer by default. + span_setter: SpanSetterArg + The span setter to use when setting the spans in the documents. Defaults to + setting the spans in the `ents` attribute and creates a new span group for + each JSON entity label. + span_attributes: Optional[AttributesMappingArg] + Mapping from markup attributes to Span extensions (can be a list too). + By default, all attributes are imported as Span extensions with the same name. + keep_raw_attribute_values: bool + Whether to keep the raw attribute values (as strings) or to convert them to + Python objects (e.g. booleans). + default_attributes: AttributesMappingArg + How to set attributes on spans for which no attribute value was found in the + input format. This is especially useful for negation, or frequent attributes + values (e.g. "negated" is often False, "temporal" is often "present"), that + annotators may not want to annotate every time. + bool_attributes: AsList[str] + List of boolean attributes to set to False by default. This is useful for + attributes that are often not annotated, but you want to have a default value + for them. + """ + + PRESETS = { + "md": { + "opener": r"(?P\[)", + "closer": r"(?P\]\(\s*(?P[a-zA-Z0-9]+)\s*(?P.*?)\))", # noqa: E501 + }, + "xml": { + "opener": r"(?P<(?P[a-zA-Z0-9]+)(?P.*?)>)", # noqa: E501 + "closer": r"(?P[a-zA-Z0-9]+)>)", + }, + } + + def __init__( + self, + *, + tokenizer: Optional[Tokenizer] = None, + span_setter: SpanSetterArg = {"ents": True, "*": True}, + span_attributes: Optional[AttributesMappingArg] = None, + keep_raw_attribute_values: bool = False, + default_attributes: AttributesMappingArg = {}, + bool_attributes: AsList[str] = [], + preset: Literal["md", "xml"] = "md", + opener: Optional[str] = None, + closer: Optional[str] = None, + ): + self.tokenizer = tokenizer + self.span_setter = span_setter + self.span_attributes = span_attributes + self.keep_raw_attribute_values = keep_raw_attribute_values + self.default_attributes = dict(default_attributes) + for attr in bool_attributes: + self.default_attributes[attr] = False + self.opener = opener or self.PRESETS[preset]["opener"] + self.closer = closer or self.PRESETS[preset]["closer"] + + def _as_python(self, value: str): + import ast + + if self.keep_raw_attribute_values: + return value + try: + return ast.literal_eval(value) + except Exception: + if value.lower() == "true": + return True + elif value.lower() == "false": + return False + return value + + def _parse(self, inline_text: str): + import re + + last_inline_offset = 0 + starts = [] + text = "" + seps = list(re.finditer(self.opener + "|" + self.closer, inline_text)) + entities = [] + for i, sep in enumerate(seps): + is_opener = bool(sep["opener"]) + groups = sep.groupdict() + inline_start = sep.start("opener") if is_opener else sep.start("closer") + inline_end = sep.end("opener") if is_opener else sep.end("closer") + label = groups.get("closer_label", groups.get("opener_label")) + attrs = groups.get("closer_attrs", groups.get("opener_attrs")) or "" + attrs = { + k: self._as_python(v) + for k, v in (kv.split("=") for kv in attrs.split()) + } + text += inline_text[last_inline_offset:inline_start] + if is_opener: + starts.append((len(text), label, attrs)) + else: + try: + idx = next( + i + for i in range(len(starts) - 1, -1, -1) + if starts[i][1] == label or not label or not starts[i][1] + ) + except StopIteration: + warnings.warn(f"Unmatched closing tag for '{sep.group()}'") + continue + start, start_label, start_attrs = starts.pop(idx) + entities.append( + (start, len(text), start_label or label, {**attrs, **start_attrs}) + ) + last_inline_offset = inline_end + if last_inline_offset < len(inline_text): + text += inline_text[last_inline_offset:] + if starts: + warnings.warn( + f"Unmatched opening tags at indices {', '.join(s[1] for s in starts)}" + ) + entities = sorted(entities) + return text, entities + + def __call__(self, obj, tokenizer=None): + tok = tokenizer or self.tokenizer or get_current_tokenizer() + if isinstance(obj, str): + obj = {"text": obj} + annotated = obj["text"] + plain, raw_ents = self._parse(annotated) + + doc = tok(plain) + doc._.note_id = obj.get("doc_id", obj.get(FILENAME)) + + for dst in ( + *(() if self.span_attributes is None else self.span_attributes.values()), + *self.default_attributes, + ): + if not Span.has_extension(dst): + Span.set_extension(dst, default=None) + + spans = [] + for start, end, label, attrs in raw_ents: + span = doc.char_span(start, end, label=label, alignment_mode="expand") + if span is None: + continue + for k, v in attrs.items(): + new_k = ( + self.span_attributes.get(k) + if self.span_attributes is not None + else k + ) + if self.span_attributes is None and not Span.has_extension(new_k): + Span.set_extension(new_k, default=None) + if new_k: + span._.set(new_k, v) + spans.append(span) + + set_spans(doc, spans, span_setter=self.span_setter) + for attr, value in self.default_attributes.items(): + for span in spans: + if span._.get(attr) is None: + span._.set(attr, value) + + return doc + + def get_dict2doc_converter( converter: Union[str, Callable], kwargs ) -> Tuple[Callable, Dict]: @@ -716,7 +936,11 @@ def get_dict2doc_converter( filtered = [ name for name in available - if converter == name or (converter in name and "dict2doc" in name) + if converter == name + or ( + converter in name + and (name.endswith("2doc") or name.endswith("to_doc")) + ) ] converter = edsnlp.registry.factory.get(filtered[0]) nlp = kwargs.pop("nlp", None) @@ -726,7 +950,9 @@ def get_dict2doc_converter( kwargs = {} return converter, kwargs except (KeyError, IndexError): - available = [v for v in available if "dict2doc" in v] + available = [ + v for v in available if (v.endswith("2doc") or v.endswith("to_doc")) + ] raise ValueError( f"Cannot find converter for format {converter}. " f"Available converters are {', '.join(available)}" @@ -745,14 +971,20 @@ def get_doc2dict_converter( filtered = [ name for name in available - if converter == name or (converter in name and "doc2dict" in name) + if converter == name + or ( + converter in name + and (name.endswith("2dict") or name.endswith("to_dict")) + ) ] converter = edsnlp.registry.factory.get(filtered[0]) converter = converter(**kwargs) kwargs = {} return converter, kwargs except (KeyError, IndexError): - available = [v for v in available if "doc2dict" in v] + available = [ + v for v in available if (v.endswith("2dict") or v.endswith("to_dict")) + ] raise ValueError( f"Cannot find converter for format {converter}. " f"Available converters are {', '.join(available)}" diff --git a/pyproject.toml b/pyproject.toml index c391302ab6..feead31251 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -212,6 +212,7 @@ where = ["."] "eds.omop_dict2doc" = "edsnlp.data.converters:OmopDict2DocConverter" "eds.omop_doc2dict" = "edsnlp.data.converters:OmopDoc2DictConverter" "eds.ents_doc2dict" = "edsnlp.data.converters:EntsDoc2DictConverter" +"eds.markup_to_doc" = "edsnlp.data.converters:MarkupToDocConverter" # Deprecated (links to the same factories as above) "SOFA" = "edsnlp.pipes.ner.scores.sofa.factory:create_component" From c66fb22488c27a6a2a7e697c17c1758effea1b16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Perceval=20Wajsb=C3=BCrt?= Date: Wed, 28 May 2025 18:39:58 +0200 Subject: [PATCH 2/2] docs: add metrics pages --- changelog.md | 1 + docs/assets/images/ner_metrics_example.png | Bin 0 -> 49278 bytes docs/assets/stylesheets/extra.css | 99 +++++ docs/assets/termynal/termynal.css | 132 ------- docs/assets/termynal/termynal.js | 411 --------------------- docs/metrics/index.md | 12 + docs/metrics/ner.md | 63 ++++ docs/metrics/span-attribute.md | 43 +++ docs/scripts/clickable_snippets.py | 1 + edsnlp/__init__.py | 6 +- edsnlp/metrics/__init__.py | 2 +- edsnlp/metrics/ner.py | 254 +++++++++---- edsnlp/metrics/span_attribute.py | 311 ++++++++++++++++ edsnlp/metrics/span_attributes.py | 182 --------- mkdocs.yml | 20 +- pyproject.toml | 28 +- tests/test_docs.py | 53 ++- 17 files changed, 801 insertions(+), 817 deletions(-) create mode 100644 docs/assets/images/ner_metrics_example.png delete mode 100644 docs/assets/termynal/termynal.css delete mode 100644 docs/assets/termynal/termynal.js create mode 100644 docs/metrics/index.md create mode 100644 docs/metrics/ner.md create mode 100644 docs/metrics/span-attribute.md create mode 100644 edsnlp/metrics/span_attribute.py delete mode 100644 edsnlp/metrics/span_attributes.py diff --git a/changelog.md b/changelog.md index befb3221fd..7db6414ad3 100644 --- a/changelog.md +++ b/changelog.md @@ -8,6 +8,7 @@ - Qualifiers can process `Span` or `Doc` : this feature especially makes it easier to nest qualifiers components in other components - New label_weights parameter in eds.span_classifier`, which allows the user to set per label-value loss weights during training - New `edsnlp.data.converters.MarkupToDocConverter` to convert Markdown or XML-like markup to documents, which is particularly useful to create annotated documents from scratch (e.g., for testing purposes). +- New [Metrics](https://aphp.github.io/edsnlp/master/metrics/) documentation page to document the available metrics and how to use them. ### Fixed diff --git a/docs/assets/images/ner_metrics_example.png b/docs/assets/images/ner_metrics_example.png new file mode 100644 index 0000000000000000000000000000000000000000..09d50bd4582f7eaa77a45306aa6e780130c03f00 GIT binary patch literal 49278 zcmeFa1yq&Ywl_`*2pehHba!q#q#L9g>FzG+knWV0loUZgX*RG0MWvBWQ5pdW>F+`D zJ-+w6=icu>?zs1!?|;uw_kQ+T^O-B=Z_YL6Tx$)ZRh4Bi(1_6B;NURi<)qZ%;1H34 zXDI6RXU7wdMc@nGU0qfJu4aH_9S)8`-$Pp0!_mje*1-~vmP_*I6D=o)jkCK4EteE6 zC#R{43!8R6g7+5+5Vc)0l3xOjnL z21PkdrTes;lEAZrt-U4iC2MJJ?|l7=q>Y=iBTym(;R0H{{sR2 zZtk|uPCp03$p?_~{2YS2xv7KY&tk5hC1%cU7M5;5ivWI{v|Q4(9O3|v>%Ux*H?siz za54qj{xLhMz(lY@e!gyN@w1GT)t%4X$_66kYRPZwX>Ts&DfM^Ue(`klk##e5u~Bli z_%TNoJ~tiWHhQG8$u6f zX9o{kmp>PpJ3Bd9n%_wBhS1c_&Ds0UWLC}&H-o#OaRJ8tC+^n;8h@M7-^g@qEj$1y zTw{r!^JZAr)N+=#);7OdhVTmf9I&J5FXEe#xZ9XoID7wm{~t`_ugP?Cb_V$UCbORv zzkgmsT+-6v8jru`@unRBHviw>{bQJJf2hNMh8=*{ACNV5@VuGbp9St74}U?`&Dqn* z!tz=e5J0`WZEQU(HC#;1ugSduBmreM9*zz-)qq+%NIE+>|HL$xg{dXKl{rA-?&0Qa zZ~0pduQ|V^nH5k2Xp4s}AVzTqTkC6`akRCtxNaIyIrx7za|3$5o4NG<8-gG=%4_o*c&=Gqch2+%v(|qwzApX0kt;xR z1B-vZ^M8S!8@2TMU1Yot01PFpoB<2T$zfsYVaj^_%Kf>DS9Yd-lVATw6MR7eE$(uzytpPuACcRh3o;9)FDMrmw$h{tlr4Kz@$y z4;Y%>fXN(Cl56|=k5Tqlc>Xiuulsvr?gRi%KYre5$F)NIj(7`8D^t%OUH->-zoyr? zdEedH;o2trH$k2IPf+Iv7QAb;{topUi}*X#f0_6H5vbqv_)k#3?(^S3#rbDh{gpKT zAGS`MH@4>=TPGf1I<9Brcai;%TBn;H|4U>)3@<0wjY|EdW&d;{yDT>p~}{>_r{7f5k;F?9k2{WtgLm(y(iGvvDVr`@d07=Yeir!anfG5(lsjvtQq zFTehp^f#CNS{y$K-2d>*A)Gg^`Ax-7r}g?pCucWDQy}E~<0UU33t|cc9Dv(x>fz~j zod@`L>C8=U;*dYFaqpde&wbaZy-_g=Z^isi`CoG z%-+_6^#{+Nen0CCzW&ta4~_W218C;D=J#3r@v?=bxib)F0l6bq4;x!^dne25Q0$u2 z*2&hx_ScO6TiP2q{D*YEf#gs8tQ?$y;O?eNzvjTg*4@Ry6i}O=Y#nT`jSB~*t)q*x zn+G83A2am}et(P1ep)m$fb{P{3Q*m|p}*+WEv+npJjQh{0chrrU=Mixr?ibp z75|$c;Ce=Y4*iwQ`OW+Phqmg!!xBNbINA9ALu15$V|V^(Y|*dq?_b8~S`U8L*nb(L zzuDj44A=j=#z@u8(!$pK+WP*lv_`x)4)brW(LbLxy2$|lg*Cc%ZU7DaSMF2dU%Age zvhjbU`FZ{v;`86c&%a?E^3Rt0gxusD{)P2FcA{?Fyx&~9e_{PU#5&L4+x34wtp6|B z(~^?c_${RQxn%wOA}Fkbus1fOK1N%Hx6<%rCjxF~FV=MF-9291xg#Eid>bpYRdE-|8+~AT2WK`Ech-|&YVj`1 z!qq~yhqH?F;Qc{|eVpbCvYQ=U;=a&dBRH5aQsdp{RfR5_=@G zh)#!$?R)uf_uo_niv>r+vm?m_W1Ap>;eY!wgWCUH8Pa!H@Wh{4U;nCQ#(w_$t3MgR zA&{6uH~*eTCU}?V-?Rn1AOczB`Mt|OTiH^8{Y5W=;rhV4f`8A04!I5aPh*XsmFSkU)TzG0h~b11_4x3goTLOy0fSL`S} zR-)H(`Ju~}Zx!Ux?iTr6W>K?&v3*wj&9Fk~pF|FzX2y$|?AlNJtlc$iI)rW2G+m&E zihXRjU9O+pkVd6;KapgwO6b+PJ`@K{#Iu|F(7Y53-;PStpPC3Hs-^1G%ZB~XjaLy- zIUu_s@+*`a2>DUax>!ux;iGr?JOUtgtS^Z)6!YC9D7m+=SK|2+Dmky~xPCVm4{tdepl|nYhUFmq7sho(OmI!JD za*@EI$e_-mD(|rM<#<``Y#$tfA*x$%-M$n_W!0;0;W09}HP|dgQF(tr<(F9;Jv%<& zY>c#(H1cc~b^lU$qQH*RXh?+RFRIc9sIT$e&e+o_PwsQqGP9*t*{vARAz};v(>r!fK=>tx647gc5avaA_c342~Yv`Y}t9&Q&r% zxw1Yw81=Ln15+kAIs`@KPE{|e4>7_U5$?bfTfOa5e78OMkXK4Y1Z%OWVV`Ft4Mafr1uhw{a=&-x(%lBH&>B&tvp|E`@$jI2i-Hl(Y+-D)xiR{uOQ6@ zIqY54=rIwOD%>5^iRk^b%Yn2b;v=(pluMvrdY>fKm#3eAjfiZ0D&AuOb45~^)1U@!f^0($YdP1s}ZOdnNdA*+{D<7 zKE8CKOkmwjk3NLDQhYS&?WdOD!V@Q+>CkPmX|8fbZ1&L`b`s`)Nnv0WBE*9W44RIXk^FCm-1&6e-?Ki+;Sb_5?oRD9BM*^>M_ECq z^S=G$-sa5a#}rS7saK*Zgw%_vs9U6v`(!%d>GlC`arYR|fjRJ@1*}D|DWEnSAO$Bm z6rji>81|S00hs~{gTi68m)xd9HsVyPrlVfOxK^$Ry?s*BEb#XMgMGPs#K7?Kg+*oT z`e0Bw?q*gQiqPx6g|fr(VLpit$$?+pp<)48QVopB@|*3hkJ7gUU5U&!yGJ@LbSOPH z?tJR8K9bzoDH`zY10SA%+e#GI**$d`s0mUg7^~g)B(z96QCY26h6`-yq#O^28y3|_ zM|OF^H`nGYADJtt=cbUsKpAlI`7tNz)#VwxaL{4Qa$huVU9fA9O%}Kb-U4%-N&*`( zDFom#97o8-z2rRG1mSU>&yB%jlo&4Blbw_N$WXN;jVk3~#^L6InayW@?{ySmtW4Y! zrS!`2y`rgaB3fBspM_k3s=cerV^RGUOJ+|Zo*1L{hxVR+(|C+FK4l39hEtp#8zZ!K zWA%J!E&HX^ill};JM{>b(oCSi@Z3R{{;h@wJ99a^DkSr+9QI62hC--|NL4z| zjh4}pMqNMkKEyY|&H^LNcS@G-mp)!)R`L~{c3msd`S9cw<2x91#_IjU0HFjD>3Z#& z7Z1(9c&x7-RN{}dU5NohkM*C6U`#FdU+U)l+Umb~cYJ#~zGkkOxkesORKzg{lJ~+2 z^1KT6jL>3Z$mc$V()Wi{1^& z;j+Hd=rF}e1tnuM_dthWVH#BHa=R`|RX%vd_BxJm^jTQD?_nhco!@c9if$wVqsG&c zY##fVPnC`K6Y9f7(zaWZb!qn=y1m-y%Yz``T$?I$)?C5BM+3RFc!QM&^<=b48S0PC zh_z9MmYX#gwYT`kN(VXmGpG}5&e}c~6+l}X9K$S+c?VxfGw94CEL95PhPi|<91J`F zff)dw72VvF^W(y;<@M7!}y{EN%8uglJl*^Te!Gq8N2 zSmZUMs1S9YzgKD8DH3ovuJa{MZhT{P@QH0-0;y)Rn`M=828*6Vqun@;uZhmle7m2= zw=YUe22C8&@kHaSEw1!&!9>^QrH(GpNiEXLNzeL(1TkU^}kv0ue5`r;68&Q!EUsxP4R2tS) zubUra+?kaJ!TgPzC6cM6i$H{&s8Lu1Zbw^s3K=Yfw4ZgcG#z#Hg`8hhhy*3 z07lHG*MS2vl#fD(TIYcIr=S9pv61ff#*K0UF4|xy~-C+*wky!J1<{IV9K1X4Hw_~#`QqN|2st# z>iHN=g?cHPQl4PCdYQ7L*YT_f^&%dCn%vNL4Y65Zpa-o@ncy|){*z4_Gq{DTMZ~i_ z=isj#AYgtY#Mk#QvSdIUZcS^8YpZ2X0*i`e6Z=gHoZt)|CbgqpUg#r)k;%&Y9c^J_ zugNBmP{PnmUWt0|%q}kgHVbzTZiCmT-B;TFV|5`hpA(ic=nko%6v`cvq}Hxz6!lc9 z?Q|Mc-Uwnf04mQo8B9Sc`S-9diZPOMw`X3(en&ER!mPvQs**q=u+<{F#C^+Lz-PCB zA192Wl8gqOFsa74Q#t5-4~0}T(BL542c<+?DUE?9Q*8u4%YGnNfO;CSV+dVbAe~98 zC|AH;{cQ^E`iygDsn6c>ltD0f0zemVp%8=4(r4Y-Yj=4 z{(`(|`R&UGD(G#CI>(upY3#-wF7nY|?wIYKpSah}@A?b-?21)OT448xqVA%_mWL&h zBh3{nt`5e~EhXU6E5FJ1Kj(;IGd_NTbzx{cC;w$dO#CVm**CokerTq69P2!(rqUETr@R(iop#@9nw5&bbN3a`o71a4l@lpwllA@cP8A{C!k1*=_HuIgIg2Y>Zvdt3-Xlg+(jga{n?knm*3s zk}B~esd}ADH5)JEd+)2Ob4kAhQrxTUC%mksS6_%vMAl*iP|*VK2Ud)#*0mjd%8biq zw~h|MUdVdoyfD%ca0Z(+ebvP_!n~{B#mC0L5M|;G%?O=~c%QZRsqb-4NIYuJolTUl zA=P^Ic3$v)w;P{RSoWtC0_v~-n5ov;YkQiuCOrPu1KH@)qTsMB zwy#fd5J5)o(ouqpn_LP}8Pv)k;qU>c2ez(7wZ4aTk>4r9wPNqGRwG2X>W4IFYdpR0 z>eIO&YCHNgTF(2n3s$J*y9~7_A^|WISU%K7Mo^Q*;Mwg_!DcwdwXq6Z$Jr*eI2}gO z^IgP?qZ#Kk4pWJfL0z!iTzKGE8He3mEffWlj72@rHegFk*(8^GUNYujFoiaj|8|S# z=H0`!;hvq$y`%&}E}7Zq^o`6QBa^A;#-fqBW-RsAgLD>jwTFegc22v?;>K@sX>S|6 zPdbEK8J(M|cu$4wLsy%R)kgc_)kj~M*$=>6Fr9e{9>QTKXM+#E&Sz+|}T|(Y2KC?D|UIZEl6v3m52WqaxLDUrO z@L0#eAmo%%&MGC)u;4*ZQw!2%#**?|a!5~SJSrDx zXbcPYdoju9(MKR&Qb7-8AE@+nD(Q*wY;eD5(af{cR1wmL6&O!^Y~VDhX_~`^Otzcj zdfHD&z1sOO<5fs@A){(R-1@@KY^Y7i+HRul<<1dGVzvZEkPWfjT4Vp`GW1o6IBeO; zNh)NSL;_!aDrAZ-A8X}Ij;`eC{jZY;(6?+LY~f`_7+hDgXNyN_m?!D_Wx(ft?$yU# zc=Bojx|{g~&y6)U#U*Vn@`V$!@jA=oQZkdq`_w8`pY`%?O(`C>cutICvN37Y!a3&i zY8eFW^(ysi=#WF8PFE_Opi(WN!&2OrJWthG_4`XCI2R9@I&BvMD@R{TkbJZJAb~=_ zn&zy9gNSF;Bi|2Y%y4Crjs<7(7#~H2=q(UhN5MPLOd36S-nbctg8Sc>4~tPS2wn{t};lKlCZ^;egPkT^lYz>{B>P2jVUGaBxxTT+oh{CmN9%vaoh@D z3|Nf3X_4D;I;}`H(W#V7Zs~0*eT8AG7jM?{;yob7l?Jbm43mQ%Qm-Nl!BIThzy&Aa zR8#ux=q7l0fA%hphF7-jxVrSy!v_oVS9HI_l6?v$PhfQQMOv=FyN6PtzV2?Gyi+d= z>esGz@>B@U+xSYvjVtsW2>wbVF?U9Hwux=$`d@4NdTc?WroHR8@)<3^wR|2cb&Lv0 zwCgG7&K(wIwtSzKz6^_(6+iwuvIoFxiDVNY-cMy7^tmX^&=pHhu2{d+BCKUF38Gox z@>cc2`fWb**unu$*kM(MYT^-PYy43AR3d4G%N}PMgDZ~j;eM%$JX$8-TtCBegLXEh z%tlRjTgGVk>D1XE7l^W*-MGC(#fSwKo13?jVHs`^SxLNo^^AU-7hk}GH-bvkUMI#K zK9r}qeD3$ez2(;TwJOXLy9zzzQ`y2?wA5n=KS`e@M8SKFr={<>5}{JVEkwW1;);!@ zX(BO^R5k!-Ajcw*Qs+sx+f6mKIOVNay7*wv|bkN58g1~Shw z(Ad5S2}fs8FJXV?BoF3eD?Me`R%3?*pJ}XrVO>A?{LbwGSYvc!{4KkZT?~-`nlS3i zINKv8`{&`s@QT$2-7ZQOPQp?`(DVMM@w`>6Redq1ppl&*=<{OfLo4|-7~FVXsm87I zJ2=Ur4|pX^Un#tf(bux)b??~1My&7P0@erKu%*kY`@YoMl|7cESMdt{wd8l%jT{=W zQJpav(*t~P5PjI8>H4;@i!_M55cT@*j_!3tIYbA;Qs@9@3Q49i$RQ6*!9eK5y*ru$ zlG~cC#NY-4cj@=Pd|>ukO4h~`vDTLn@_M{61u=$9YM_d&&~tWmZM#vN_DT_}iNMj=dK5EYDK zU^|48&@mR8NV37P=!3?l$IpiHKpTf+ovbAKRzmqx^kGTRkce0Op7UDJ<@v*GkpSQF zJZl&!^iXCyCvyKYe}|44`+67(ei_wwF~z+*E~FaP_1)*6O2ZNb$Vj%%2?h9eMPcvY z*wIU&nZbcLMukwmbxgpVHho!|>T(aaMl6$kIa|Lt81)_d?KrAEcAelrl(1QWgY|wc zzGmTucec_($HVxJ=SJQAtTvZ1+ub(W{i`ZZ%B@b$yFZjF_05kxl~jo#RGg7L{}|6a zRx88Utki_BlxN?XU0K!D=Nkp@QK3o$lV;M$*lO_kK#oW66tz(0mP?ItT=0;q_gxSX zuNCsR{VWrn*GB)lky`;lgv6fVmvI zvSO*A;3Tv*9RqVAZDYLZT~9bOlX4?zbQu<+yh_PruBboQ_M~^=4yY&t5k;@Lcy_)n zK849ZJN?te!%5Ur!@WD10yY#pRoJ1)`}*HsJhnY7-s(mQJDPk^T4a-1wR~TjsvTA^ zT0X2zWA$+{Boxo)cCTiMJZ|@GL?m;;4u8hi=YyRjJ7QUt+Nrm*rlGHRc2Wu0)suUb zUJvR&l8^_L@CJXX?h32WCY%x3uo`$QY-aM&TLu#XwNeJc_-bXe+NsF*sgp>M>4KAKVe28w+Xj_fL8jXg(3#z=$?9E!pkBm`_d->hOZC6LO zEm(cm%n3*vl^Z?)r=YS#uxD!SA{=)uMuwmu8tr35ilb8-GG&Bpn$&(0@eBbq#6orN zM~rOW){6@~`AmR4S#?{*=<7V4rc&yBBErY*I^vSJ@mhR_d3m6yo--8B$e??$yN25bVtco zHhT}ZD3?*T-kxkuRA_#?`+`zu!?mCTj8^?nAD?gbW`3uBAI6es#M}o^c^=ipo zPPZ9IMR?W551)0%f)_n-h^te?f+j_dkcdZXEomRve&1c{wwy!-)^R8)yv*GMJK7M} zsA?(Hv2ggrxd5>L8-MECbf{O8?w^C73xEPk;X&pk^e8y)ZV_PUn?76kI6L_Rq#`N? z@K!^>*^%~fNXeoPdPFR%S@2O8PDr>fW!UYWrrwHRbukaDCP#Zd;$zAlY~6a!A-o!W zleZ?rFM(xg-?b$;6`S>C{F%#Egy3BTDx~*f;F39;gIMWG8mCNLB;*j6Nwa;N41Oj= z0r(SfY(cJ7I`=sgMh!KfrC zSC86-Cuw75abYscohDxbw5adKZ32-VRV#2`$z8C0)39!4qZLC?HjdjHPGl8{O>L=@ z?ermgWLKGzFm%h*ZEIG5X}-F-38{5yg4fwiAUbRqCPp|mS1;{;TH5{zmolzrK67#O zNr_KX6{SJ`zIA;b_tGVh<}Os1TRbkl@C$E!tUd;VCch)t0YU;2Y(&|?=u>au1wvt9 zS#QVFfy+TzUq5uR>Jl}!H4Z0$sJ7heHqxdqt_x@76I&#jYE8~il(mLxT@zSl1C#V#%ektOMwiSvDzGu!>V z*Le#4f+cN6N}v)JRbQ9vdtV5;MU%9*TQmkkrn3ZcX_2iG2wI4gK#RVW@0lWxUkbYC z6RD_BA`c*SQa!VN%_|yRXfc?n*3`#vi;WZVjUegD!+i6u>B?sM{dBw_=sp*FWX%y7$`T z*I59GFqewxuF)Qpb$cz)lh)ODHf9B@LZ70*Ce&)SLS1uH+AV6&nuDJeahLRjfuYFS z9p;O)RF;sDZ|~EXv=kDOgI}Slzm3!~ph8A<3-*iDP6&XzI9>>f#Z%>&ZnXH^pB`Fj zIsq2y0`hGQf(Rjl_Y8zcwlx(x*852V*COWpnl8k0DUtO<$~n>;xAU3%j=p>#77uo* z267HoV%)VZjR(*>g8og(TJo`qQI}UB}wg( z?!60v^Wxaw4gf9fBX%P!Q{GOPb3G5g6+fr67_3>L5Q51b6<M2U;X9bG1ObSt59>dL}b4&!Js@vD~l}lDj(7-QW;*K_v?& zc3Yv|JzBWZ+X!bUwV&ywTDu$SkIT#3kYpV_4Yshtzr)^!*+JicS>#yo!tQ>sKh7(yUQDsYt{u3A~*5 z!oeiE{KiP78`vhBOwpNOl~cq@74GrEmRU1xF2$i41qc+B zLzSPS?KKUJOpt#2%mU$9{vm%!Ui02K1e{4Sj>OVa~IDCwX6j2oiECakgk-sqW`eDNu)GBOL!lWO8ADkv80 z3K8~iiv}CawW$pZeX@T}Lb$0P;$FM@OCxVT~lm^1#cE8hW zQ-PFNFR{Y*Nk@ZOYq-gw?RW}uV}0%$2Fs8AT17IqN|c-3sK}%gfw-irS23vmN%GK} zx~Xh-VHkAnYaF#%uoQQ#J;kI+IC^6}0x^ajCnX>kETQPo1{qYj&=+*6RgNv`!1^l9 zOJQl`+vbCUK|~X$P;BuogOw^ao(l#S!FiX^|9Bfe*iA+jreqRHCUDsp1s|%k=(O_n z>+>m=Nsif)5>5SqgJ+(s37J6Z5K9GE{gDvG2g*0B2URd&aAk=7sYJ98s4j$UOf?B9 z;FHuXq%J*(pHuF5-}}uE6v(I!5)RSKk0#{I3h38#aGxLof2> zJU6Nzp+Ia@+nWpoCiMcwC$2ot@TC(k)6cDGA!1L9<693W$oPG2*m<5gPL1=VfHMV* z)+!^xi-s$9L|eO5$c{mT3g0H45qv9r1=SnMpDT=UnbJ2e?fZbwzU3ZW|}K1^k86sRC`DC$<@W<7vtr$Wn{0T!!;ZA<`PrD z!NI{%lepgRA5)u|^m_MVLsU;Esj&zSWpnX;Jmrqc2w`xqhiE38pCMSgh(>;}d<4zN zkW<#3c1MRm6ZnBJPNwK+{I~=+tS1r^-E1)izLe+o1}!kz4`l4+J_ZjXf}xG<>~RgbUd(BmL-^`ZL%i~X3cnzO1&{VgXH-79_i;{Mgw z3}!>M3YOVIV=ot)eQVHp99UMTFfq*vs;NM8yh?e?HUiVBHZFia+%0Ld_nLhLE#n#u?JaGE=6ce&kH`b%h#^K!W9@C54VcQV=i zV9@GoSi?O~a=Qq-2wff5)3NkNe{R&yv^cH^a%o@bUa=8ot|zy}M)bE&cf~f{!xHyr zyC>1jloSDz9L(yNz^)s`ZS!qorVHOuCIc=y@&{I%fmcUTWNShDXC8i*)76yQj&-cry;RIL-gw@109spHiRvsl8w+cqjZ1@sKgPh>DVsBc_ zVy0cK`S3KrHi~#)3^)O?{5*qtF;rS{I5$`ckEgO!Ig)jKu7dWNp407YU`9Etbd9rl zUN7I}F!_#TH^NDis;4`V`BOA)fM_gD$T&YTekyWUdiFj8=dM}7Y~jb8UL3(fIj!?< zO7T5F{nN6%YR>|$5)E5kkcN9=RDA{PUU}am+`G^8F--}3TIO0S#JyuX`QG$QjaRLV zdRvOuTbc)!W|ulTqX9nXEaWDCCkqoJZ9*?5f)x5nleRe|f+^f-WFkh1x^S|&fV_IEPu9+_oWjzM7866 z(%nhzd$DeD3{NAG;IXKI{FjjDcEtxO3w?g(dep|vkwn2`A}Zsj9vUGtpjm4Y`7(0i zp^xzy-B!Q^dN~*=Za>fu?QubPaj|$Sa@y3^xjSOY1QA#h2RADn62KS3Ryo&tEDjbtnwIKDN>vQ=gCab^r$vuslCvdcg8P zo0haFwGblS?E?-n2^}bj?oQ6Fo(PQ8)nPJ7xp8Mlt{3?&)%3=?EmkT^K`mg-^m8?% z$QvQX7bD3CcjSizbMjk-EM)A7?N2O4isS!{GsA>weZ zFZ!~YPZ5TZY+>`<8tWp~MJmsXCOLkIgv`&`Y{J4%sisrlE`wQt*$^$AaEmdZe3$q@ z5>2b*TLUe{txOcW$C7ucd7`<#!Qccyunt8yCjJ79~{TI0P${4yCXW<{Nw z0I1NAj$R*}?4zSEf-IQ1#eP7*s_^0**s_hZ^-gak7#g=RgS!H926xqHi18WIzq1%} zq$bs9rjSOmt7Us+fbg(7ODMHiRg3~8>dgA;(7#qaW@mVAjBrBa2NPotQT1Lc-{0#? zBww3(FE90xMsD*+xZ*+QQbZEsE3B}S(c4)OG;^)T3KUcDNK+dJ z7H6ubET1}RvBFlL8>x-plv?<|y#GX|6nzqt6XthTR@k%h9buNl2xjsnCjAjp7nTLv zsri7zzGCY1F!TGN1z;px+SLUE#Vqt*97Epkv)5nbq&^E#e(*B4<KLYQ$%-!l9r_j|X)-Lx5DgF%%2Ot%)vpaGa zWgXnU%8(p0t@H+PbL)~Tt9!I%vR!0fJ}<<~u7qerg*-!ez%h^AP(o0MwX`Hvt*XBvz zB|I%&wg%4(1?JiA%_fBcR;%lI=fP8f(cY++rsv(k6VyNoZ#q-pA$L6KyoyteZPlHz zb)XSKNtlE#}E^F6SY|!F0$qm zoqV^5aIjb>5vkzFk!0j8ZWmU8W-|fcc+TrD>KmfAobg%Y>g31-5d;vQqkY;FtqP{H zk4OTOBJRkR^3IGy){1v>?@43$9o`i=(o)2m_%=D&)(b>dJWuoLty8XoJQE!l-PR6l z?WSLm7oKueD$d|~c5z`LRwJS+CpbCGDAbw5YG)#tVbHCU7qX!Drvq!0$Z+aedZ<^J zh$Iz@jdAuqG!(ZJw7ZD`%p2x`ZC6Zjr&EA)A5r=S3TJ(HAy7YLLef$UlO)7Rl>|FZ z=q4il`5WHbn8d{}JTth@yD8F(1aT~-(8W;rXKV%GZyb@NyTj#JQOko1F;tggZ&AU^ zpmL-km=j`8^)F)$5Un(kwQjwnBRM(67Ed7c0k7ybaZIO&x7BhbAUMjMF%oVMY*5Cd zBV*j9>3zB-c}n^ozvCQ=>ZCteA={$u3FVw^#d<`aXOXD{Dj+u~6iZ3NFQjcRL^%kt zNAHbVG|R;)zlDiPCWeKSgi*YT%%-55*l*@I1aD7pYkGEj(;6`G+L|zTc=ETJMB?rg zuYf%rjKlj`;-FymH{!24dKl$76)&yi=i37$-w_NZ#|AHbEyn^q&Vk9NJ}|6e35+<% zeaE|9w!)|$t}j_`&tazOtzYMpFzyUMs6V(FiQ0H0XeQ*FcFTOTXWr$-aXqSsb_|)> z?#U><-F!icfi3trNT-v~b4x%@<0^J_Ky~#c#`+P4Vz5Ep*PPL)y^mi-&gV#hj0-41 zWR-130oaaCrSv&j7BK8|OICH4z^U(zJpyb^+oxJJ8f^2nMA!SzweIil%(YUvKj!QJ zZAYeq84#6+jhuplvPE3rKJULW;$vTEE3ErQFtoO?8lm60Bn>Kv5}d&R&7AZ^s)bWY zGXUorwOgoo&Ca`}xjlOCpM15}e(qu85lK|Le4U5qem0=|2*L`6f;nURrF z#R_1*RK2<@GdwvKGa0X8D~1dQ`OSVKZPkWRva+1Sl$j^)#veHtV3uq@? z%c7J-3#RXw{aDH}{e3CE=Z1xi%y-p8x@m2Oi8kA&uD3CK_K(f&4g=CvJqc8!1&V{T zV=l)hJTCfg33{H(G2H9DZzyNUq>FM?tmKV_=ueXT5ifj|rb6pEW9jj?vOY}mp$*Fn z1g|sY`JzeJ<^G9Gh!OV=!*_6N1aOcs;}F_+7YSVU@w+n~L3gW`Ba0m0qjYcqspW=R zg&`hBm@6T@HcQLZF1F*mvtbh^8^egaB(P<$CS-j`WG6a1Z3!wvDcxT!PLoen->tXy zSU$fcKqhWPV5uG^Bw&0l7$%2MD1Y?OVk^OLpi)-yo$qQQQrtiRn?kuNtwan-`pzp)X zT!hV)kz<&XY|3{`YAd}PN!neOvPUbmn6vQYFq5;o`V{i84k9)1-zRc~V)Bz|-UBN@ z+??2qJ6LLyUtq{x1P?9Kh?6oe$6eMKRM8>VG=%sCHy@+~$KFjKOdl+G<|PiRoi7b$ z0o+oii>L3BqNguZA1D*rIs5C?uRfJ`&lu&aaBO9Rf}Lg>=qNoasMjGTH0=*BvY*D2 zv#~YGf_BfIHohz0REgi_ZM_(}Ri%LlBvj%%g03**iHDzj`7rjF7rVrC>p(h<<)NIV zQK{Pw1@anZ88QI15t;`nBK*V9w-M@bWbD&GZrcT8_TG}+#+opjR*|e&5#CoTB#0QK zt^=2Djah(!<7?Sdfp2HM>0UX5r1x`_q}JOzuLE6ObZkyq+Y>YUgD!u=p~Zy}mOs z-Dodu5MGRyuiLgu*q2auTK-NuglnrdBrl>AnsrxM7lk4kRVE{0+hOUpdbk*g=JSjf zG3C#B?3`R^Q1V(l)~URgR##=+zr3fc#`WXp*T<|mcS42)lY!}#opwjA57v*PRkyYdqKnP=jBpwvNw-hqA4FG6&)AE_*6^L=3)bG%24i<6*!}u z#%e9npDZM*RW%NE8shCWWXO3v*FS||4Xtsqj6GACi(s|+-lXT{dLr~4n-YKh=!1PV zDgq~KpsB-)vbu1-Bz=@Ry-={B zHqNL{6H>IQ)N9eqoQ+KTq=^&eO`kZ!)1U5%7PB&-6m(8DJ*OPM6q!hFJJTiEU~+*x z9kl;wJX350ZS}ppzy(+ISW$u*^O$2*y&|b@Q}Dj^yUJA_1yfq&HQ3sTn-u8%_W|fQ z!5|IN0v$|QkUEMv^uwbd>Rz0qr_)cx%m{6%iE_ForCexiFpn3>4`o<8%688p%Mcyd zEv@7xt7^kYZnsGRS!}v3%_cD8g3hGBjF4P4oHuPT&|N{`?tv<*3FR(s-y1AVw%)y{ z_G~Xi6a295-PT6y!S5>HB}YW8KVFt$ z(g5?7N1>R5L-}u6oT~LwP3ADH#Z~j@rV+3iV38A;godL%2E`#>^&-5qlp`vPRF`cU z6C-Hqh9gh9g+;v3oWbMA}?DSRa)Lde2CPP;wA@WpuPP^}20MUL*Epd?s=A-iI8%SW;vYWSt!U zll}FNUBi+)(VgbPh~9W4+?k9|#pJbD@)~cRwqkLZP+CSlXH#wBA zom7+0J2nyJd30=UnhA@2GQ`~-<&C!s?flfIQqJs`%KQnkpHIM^KOA}dmC>o~J07iq zW?A-~O`k&?#KY{KXrg&@ALI3aiAkbNBeR0Gs}HT-2OA%V_bwH3Dd43~_Z7=rzCDV? z)(D13MUzN-zqV8vn#sSD`S|g3<5;*oBv(m;rF(|X!l&V$*XQOM$*cmb58`yb@-Wyi zPgAPa3Whfy&HkjnC?Q(Fm+8CGJ6m;04v?1cn78K!Q*`98hpoDM~YdEC>mmmFc z;eq3*&PC8-Hv$A85-j#~4+n#vA-?JZF5(3P{=Y+B+O|33v>v%1o}rf2w=S!OXCE}Z z6KLDc%*?Rnxu6$Z2qKSwn{wpY_u0W{z>T>~8tq|vtX&H;3d@5K6;XqChge8W)xa$k zr2!HoJ{Jo$3raIHXw~%%NsuIPU(8h1O;Qlc#H;|3rsEMd2OPW#EwW2}$QkAf6N$x7 ziI14&KHVX(ZiEJ4>%9@9Dp|Cc4TJGTWgAPfpbT3@z+JA7_(PCZ^G_y9XJN^LOs_4N%uquapMP!lk4 zNFEfHLOc=z6p>Qw(Z&VCqxvvGphH-&M5p!Glnq-{bevE$cIziZLX?YqAdDK>QXabptPk&{wx)p| z*SYSxHbHmr^E!NYgAr8n3(ELvMkD#0+a#D2USAd zLza6Nulenbe>s~G8fP#^K zBVUpvQ#0hcv)V*A@MYa2VM7pKPD}JzOMF|GArtgAH}eDH2GTc}BJ=Fg^1T=@N!=g;tdAr|QAAB1nfk z&*6tH*(X}2QtUmLAJawmCO?dm*G|H9#Co^Ct1|#gYT_|_DN5v>{?o^&0r1lCq%+KY zJqkkCHw?|=zM}zGc88(?S>=f}FL8dh6>lFQtk506b6Oh@FeKq8mSgrZP8G5+TNK%9 zrk_E**Xb8Iz+yzh7DXlskN<=>wId4e?Iz_}y|J@z;9(3{_T`gljhGS3w!r)M;2Z%Y z?e<^av+coJ=NDfdIyr1N%aKHUrlu-q=J^pLQmArXN!85D7)U9LM#hVm7gO}T-tOCWEq7~m z<7kx{iP9?8F-0J3WKuTQ4Ojkb_~s_!TYrsuOJpI~v^_0(fKI$M) z;bb|MUCM=QKd>*3Fty`+U-84hR->t+5aSxVeuGVsv7g{#HjsB2(J>AvwXf~Rt=tua z@Q>6Jx3_s}tmPDoOU685%VV26p<0ll*oC>ayfkkG8*C_Oz3G%@epmn;{>BkF5cU6F zZ~|Ix?wRi6DqkfObA4V=KyiA0!X=$Rk_B8wbNVKtdHT8$NKNABAjzlD6vCo$V>@=Q z5>^I2e(-p>L-N5@pyhGUG269@PWacoj0={f{&|tJUp<6V@svjMW$+W(&CPfHz*g|t z@mE{GV2>3_qO|rLefw7YY_*gTW9lc?1N6GD>lO&Dl~V z*~wxi+;IC$u5X{K}Sr7|RivL!}gBE{2!l=-7@sMPC>TgxNR2wp<~SsdnV zwc6sc^(W}}VaC8%Uu0i0jV!a7*olrABM6)3*Z=%h@Y7Nj!yR7Un+pUVRf}(Vs@`TF zlRM#J>i{S0glzipvBVs#XRrF_;Y5=T!=!*PO9=>zRP)#VH(5MHAF|g25npA1h+4#I zSdQWTyd~hsn)_s}uz6TYs6h>y*_#{PBn&)^(B#ku;=!M_2oeQQqXf>-Okc`k9xl|W zWb@jl&PO`o7W8%l2{%$@AZdd}Z{*OnN(TaBh-E;=Z`4KVQ_awSya1c8J_5RXI;|Y~ z#e}5)uV`#?VJSBKTHGKG(~fi?vSaNdQ>hCq`AoH(H~T;;Rb1GMD_$x>nwtAoAmOz$ zPb$5q=F1R}goyhUy8A0ki*vxaF*>e>!UM+*cGu?iJGc1A!}`9Thz3Y6^$Wc|NGzR? z?6$0qftXPMZNCDLg(#MJk8bowHUOQ+&u34N;`E0crT;{!5{pjRc%Ub27x-=K2Miz} zr$;y}Z1R3i;^}X6wfE^R&TcYC4# z4-iJPTlKfX5f@tkOo+)&}zPlzvSj|$C+Bgpu_!$EBy9?hGQjo z*>NIT4$V3~$ z&6j|OV}{|)hS^IY+v52N+Ffd`U3NlIIdlnW_ko<72+nwF(5$c&Zt(M?@8Mee=UW0R zw{z9I`}S!~5vd+Ma*vCO|B!9;Kgabu<674aZ~;xU@-_f?CuuflQzRkWTqnARZ7~KAcFw zwiatO7`@4hu8L_AId$X;Ts$A0VRSx(AK`Fov4WHx3?t-zNxMB|RiA;=Oc0wvJs~6x z7fI4;5x-~blor(DeS3G*urqeSZv{65)A#1Eh#~ia8MDPG{KQ0%$3+xF0djmK+2PdD zd{8C0*}D~5kTARy%wxWq=0%hL&8gZbZX~-o8b19@MzOAlJ|k#qqRZDD$jo-`B{kb4 zbs@mpu)Xx+$f{otQBNxDPKzsg_x@6%sPyw7Z0BnxzyGf{q&!v;U~wxyl17;@=w zm~tCGH4JZSqUt>dMS7KrcnVlCi`w{GNr}S#@({)^MrKXUuw7KXr*&Uk?l3U*>E6;O z>;436_@QXzEPe<4&A~pibDlN$iwD4}PeWMbu2ij65*h6_S|Hx3$PQD?0t?HG`Ucxu z!9=~r=q&#dBBbP{T=Bt1`GE`hTNzj9lw~VZ^MT#izDGeZ|(UxlB83nobd*joW*94l9wQA6jTd&c{*c+ z;%_wuT;sVy4U_S&+A@I>v)-Uii-#v#*lUu`76}YNjR>W)D#~yq75oPpxH|1mHN8UE`cUV9*Y7`2p9Q&JJZ%6xw@*#!=51cV3^#GYe z5=at|5+ukx68CAGZ(&28odagy44+q^njeh?KToJ-ukBaj6g||iz0FMXV7%X$^o`Z|@iLzbq!-9#qp1OVg{)c* zKc474a2EGl({-(lrr>J6#vrcn+zmH*G=(mTRDb}Hi6o4~d}8#0ZJe zeHxOF^q3)IsL{k6IZN*&jNz+^oL-;+YHTz1(zmsCFDA7DF(b?xAg@@=P2fF?58y+d z#IfK3VW=SLujd>36Ih$yugtdsW?6A7AI-T5{U8LfX<-H_QVZ1BDw-C>7cj0(;Aa1> zpY!exzy;tKyJr9Wtq^xvC?1b{iz5%nil1j@b`zMk5p%JXC2-QxOnIctMZJ$o%#!C6y)9dr%Yw2>;Y-YV`-H%PA zz=?h7D>;98LK|SZ_rB7n*A0;?;$6{3CkRLVQq*lz2{`K`!}}qCERyW|x6F~kp^_*C zQm?ZQf3E7}*Xm704;LG#7&Hn@o;o?K&iB93d2kevp=x9OECf2txMu|_Mc&ZZ-BE8O z7K8e{(YNV5*79nl0487zfnQ3E5d;byRr?3R0&_qG)Z+aYd{%@DwMMdKDw&MBd<#0% zSWE=WhY@KAsJx#nS_$bPF%ZhRKRM?z&=*!k1|uJ3qtIJlwjr|&5%Zv8ViJ0wp>i(J zQ8QJho3y!GPJT!D(l;oDliK3}-02%x3Y5tI;BVe0>^645fAlbDLT{JBoLNj`(s%`7 zWZBow7StnUE(D(!-|!;=Dl_$PG*@U}{SLYf*D##)*Uq;m_#TM4LT)W+1ls5WrYt~^ z_t#FQ`ASz}45Xrt|LT{el?7$X8t*?JvKbk=@^yvdPT7FA4Nas(R#OufNM=D`d|p^>@xtv!lL3DiNWL=V5mWJ@b)7=y8;~KWtm;l@aSz07B=I3bi!<{9tvu zBK{mEA5P0fhQ5PgaTutL*C(%0W5LEVGnKN4s0Q~NUR$d*nQ5N54m7z*)o6U>+P*M! zdv|5tn**AKR2Ukok*j20IeYB?bM)zWF}J|Hn^t%FS|>nRt0P_rJKSgC z|LSG`(tiLTOGsXu!V&fwu4Bjrvevrxi>fz?+h!r2p=n{^A;{mx#TON0L+Xv##WTik z5?a;pj*d!D$@gaxe@FyU%V$ytV2>|Vq75k(_OU74UrXbfn)jqrlE1v&=-kF&6|tB2 z?^-kqq%M5;ohpa{wp7$?;OoCF{vd#{5etl-N~HR^AhAAgRL&@=nR6(xHcxRwfDN>e zs3kPAL{7TPXQFJ-VCk$_B)!n2jF5 z=?rRttjPi|7!q$XzPqep`b*o_BY$tcpGNz6e}3`#yURHi9s#6Mi#A3elgHL%Jc9?X zOofuk3IPvZ;qmyA!QcBAs(7a#w&wG#v|p}a1o2KwslOjbMwHE>+9e2jBq z(^~a3&H3g}CjZ+Twhn)xEY2T(HUbiiC_Mt}z{`!MOwyUXh-33wJJCtE?smL-3F zJ&AW?VwS>GNDlgz`Se$KM9v$>c(o`PL5yKv)%XznaYLscXa0nYXYO8WNDp0>e)p806QkG@je5hElObQQ0UH0Ydv zU%Z6+3r&r<%1Llfx>LPe>y8skHB?GVH7^?{WHa)l0_WdgZO(^NcQpB6bO(byV@>@g z{NA!%_EdSlwq^vW%MJ^1BMj3vUcl5=JRa)b5HcY*E87dTAch+oci!g(dTL69uvY8O z-m-V2(-7Js?vY^ zKZ6p+K$#*Ev9$UDwEks=xc&K%Z8#p3cXz+|s)Sq@bO8Ml#=_U<{|eClxg?}-!Z}2X z^y2J4db1Gt@e>Q3Nq$tj=R&8@VT@JkCf!T>EqDF#;;Y8jXY*beP6?Lt{pDZkJMNy1 zfdSDqvYB`7BE~7c>SZ&e%Tb9~`)osiY|lXs>Z>JCXu@31TQ}%yBU2xdwC+_WBwp% zg+5Mv?lnjLJcZg80j-y880P^FSW8RKlx^IrD0)~P@Y=*eIMSB`r2->r?%)6-TyWUc%iD*3;GS}#Zc0c!1K z^XTu=z?L~AyWgcfy(XqkkIxsRAiDFZwP%}Hd5;EYl$K#hZgxohiJ@G0;jTK^qeEX; z7B~h!AzPYLu7zPC;W}|!@8Dgq#TF&FoKckKF!A#vbdm}s$7K?oEQDKQh&iWFFTh|+ z8xi`rJAA?!Wrz zk9HkLp!~SnONoK(O{Zv)^73$_E4fDDx#PjR@s|UggYa@-?nmII+=Nr^PVq6)ZPE>$ z!2R9v@$wD>C@dcdFX6f&Y=Y=}An!C>sF z5y8r)w;&VwU|#7p-r;u}319ncZKXlmI!@*~e!y&)a}ZPSZ*t zU{5F+92nlrTP~9P0ymo~Jbr&J8GL{JPoD<}H@JVh zuuMCEg;Ub??n)czSt>O@%A`w%1S@B8f6RhKQvLgNy50r znG$UtXh5rgChafdi|x_f@f>Jww6_Lh#I@Zt43w2HU(b)K^!3oiUzi_C)g!>T6V+CTEBo&}6g1YdVdABmS5FhJ96O|9^pfttW&t-R!Ab zHChgU{)!Kwh`F_Z@KpvhF|$wz;`@A$!#S3uoBH$ZcJI<$0Q6NefJY}Ey?y~O&F;md zXJj!OuO#fJ$Q?I&x`|AXF^EgwUF`x$xZaX?7?~S|V()0x`GFR}az5l}p$^RplW}@W z3_;myrY%g8Xd*||b^CBYV@ha0QZx%30s1o(XP+*qL;EM{ukE>CkA^AAHR}WFBEVVP z$nrEEgv}g$*DbdlMf3STCxL~#9UQf~p$*bme15Mv7<(b$k|manzB7J61|=0n_r(wQ z|9$ukINN`J-Ngle&ihP*1Y zYnMDKRSGTBITR>OZ{B^ClZN;@tb)Ftx)iv@Y1Vfk_v`sEf;Q26E%R4tK^!2=^n~jL zpQD*liSD|)z=v&?p#+M*#e@tbiBhLmDQ&<1H<@l{pvd8R(|gy)-C8r*lc=QR26KU6 zayTpBuE0Ei009Ny@ zLZfhhuP~%syG;4dabS+bi&uk#Pzq+z#gS=_!E$JMc|Yhtthe=noPgVZ69X9sJ|PQ{Z-H!#Yg1XFsRooRZL-)jl@J?b*=;i$`7dwD)LGq#DjA?KY=tYH|oRq zyyz-7A)ac$?YU(bq?X|?~U;O9_Q0UVrjFIrb$1}jYJ;V@>vc)_F zw#084XINwioOt2DEiwz7Q=EW8ptrAPP}g*x?vA+FI7GeosS$F5zA@#;NG}oFyz0sY zHloRw1c*Sd+sYbbK&Jo*RL^}V*Y})oX5730@{wB!R0Zgjp}~rLktKzN!FNjA$C*gk zaqfSv&w2sSagP6F+qvn-OqppXHa-E z@k_vss}V^&p0(UMlC2B8ZHU`O578%taUF{IGWGnIgnV|YFGxxa>h$Y!pcq$3fjrj< zWJ2hfw5VI?)dmfm=GGXxgG^8^+$%qTnDp}PG1#=rFC{q!&8YSqM?J;exw7`A$Q9>h zo~UnPawmbj{k&C~5CMTjsyG_a$g%RfA z=FkoDnTyy4FqO*yRr{#_^|YLyk;+gc%@lo zZlbTV3ZM;MG6;7|0;2GGr>{od)=%Frf9+2qUXdJ)#GI6i4dx_wAJ5!+{>VRs`vOc= zJA9toUuV6)+I6Gv)&Ii*qv7$G-PjsRl-sbI=6oXAi0mFo`W}^3sQAZMylA7%H>rN3 zTfvt~tSCgHtAa2lZAM5bO_MVuB(}IxveX%)O6R6fjOHnwTHS;-i!Z96zP28cOL!My zqgDu+Ep42*S`B&nC_a)H^L(w5`(ZZZDj|@{yW9UB8yQ)Wv3Jg)($MVQL$!_gW-Z`0 z`Nw0(R$97}%y8rqX7?65kCYlyEkP}bm{;Yu2z$(D>SV(ts6J#88B&4n%+ZZ-4+DW=>xPV4T*(k(@oN56t@P>^r2o3 zQwsG1hp{|B5X0jN2tbha063#oDI)$Uo7>j&)q1pKNrZTAH7-av18qYrXz@Nub3T~o zzJXa5?qN{HFv9ZNT-`INWgKD-cL4SA5}0jMn``}9&nK-L5nAy)j=%RR@k)`yA@PDz zCQ{1W3L7EM-SNnj3&6G*D~xFS@kN!(gd{2v!ZeRtB9c>az|HB_&oVjOBED;0T0j|$A#A+frMmkUSWGYd1RyqT@niI9q=u4z48q0X8w3)!FT8eDDO73@c4yOi zIV8Tk)~@$dI~1`XEK_&7IQqiiA0z#I>f$i8B~qL{znZ9hZSn^d((P6LB;VQM(zqN9 zSkNksj?}mJ8Xi;if4mu7G7}3?{_~ix(i*^W-hB?yi}mc{BdKFbMhWnIKIM2VY>z%! zDDZ99c1Xp5J~^$J;p?)CN;YS+&GIx*caL#0TkyjN&Mb$kAA!iR+S8B{7ZAPy zTYhf6$(LjbaVsvyfzb;Y62Ni^LjUo{4`n$(lA6K`sO+(n;2>Cp&N)(=_i}&KJtFAv zbzr7Az=oT^99+e~JzDl!_*T9v`0;gD4NzlRT?HU}&H$LT6c)_}Sa;l+@QVBvCP&{5 zLT>0es`~<&6fH<0jB+M7nmB z?Ih;&zZ=7U=^-s@>p8k~c*KedFf*WLVe1fT%YbYA1wQROl?%icM)VDD@Dc73KlQ{l z(ocC_LL2qL-6{_0JhlR_WFPR!QgkHjPgGqoL!%P?83}I0T-AwNrJ{?44siq$f-y3( z3q?ZTg#D!J-t*%>u&^i!D_q25Tor_K(YMFUZ%><}A>un3gK!v|z)^9Sl;T0%FHG{q zcnZ-EKo|A#4_@dG3#ZZixc9`wXly*U!0v@qa@j9Meny&tp#AFb3WX zpV|*N`}dj4m_OtG0eL}bwu1)ZoTUY+3p{NMQTqe*+m^df2Ry1OmsO=W|&|&yLfLd}ohtgS<38aa@N$>ZGc|E3)EiT^@9Q+pm zhIR&E!`$6G;p|6CtO<)(qZUwZA9X<+Z$CD`6R5)Eknd)`DW!Y zkU;DN5wi)e?lV9QMgJ|-J!tV*%`jIIH0nyXTlW9N{mTBoalZ^rexI&&s53o@DUvN3 zwl@2nxZbrS`{c+UU-kxG$ovv<+w}`OO`hGU(H@};mpw>jGzD!|mlE;;c@+u(;1!^Hw?mCyM)#Zm_Jy+*T`az!o>Ue*|{eTQ6u52hN;_DGBC=w?R7||Pak$U;S`g?fE-5G`4{Vue`n-Epn zy^S72OzOa&;UFUE7N`w} zqLgyT#0TMSEJ+ZaL7xyJg+CY^g^3_xr)tnFuu2r%7O@_wRHCI8pnT1CnxF?2N(L3@ z>|9|OBnwgm=;UIHv)Hk_NFEvs)oTurmf0B4{iS==n0m|+0UltR|4_oX*Z)Ha+j008 zB`ns+T9&FFM0G?1f&$=ZT#_nPqo(g$0wJ1tZ5IL;V39VI$y^?@CzqwH7rG4|HJXdn zY+NcNzaDLC-Q{NS2XVYzAG_lPwm>b%t?I-dyQVX0U)d#)_=^CH8F}~Zq%+Wu1L~WW zx1$-6Vk?oN*gsAyiPl~Qr< zPdsbdmI*t<|4{+`ks^#PG5na=gb$dO^5qj+cw_CcD47)@AE1)GifN1y1R&J-Uq8QQ zYEL{reBHzb@EignLskC^@r&4ilSAesJxH4j6WUC^VBE$iNWpKZT8G>;Yok9I-Bl=y5{`t^4)Kkyb#lY_U zBUJ697z;F#))~ckF!=Z3d`@oNrV{4b?^+Rm;)~7$V4DR=+$jsjBwmC;E+Q!mffy6P zd-Oh36D?3kf3z$do0+yU2?J8x)8$vLeg)Gh=h6Z6 ztIl%0H^rKU1v8ruL<{>T@yVM%^X*YbS$);sb#+~rO%@7rIzP?>Qw)zkID{%53_Sf4J zDI)`blKJy}KM+!_?~6ne6#ilrI|qezzgHuS77v%3_M)H!;?o0?%Vv05xbOfbe*>lK zY%Yv|z_~y%%nc79exd#NxfGo$Xo>4q@f7T^e#U797jj>$CS%3+59{m5Xf4SZ4Ix6M z^$kl}IE`d$iCOHH!}rXFB7pkk1R_einP?{5Y+@SkclkF}?Cq*YAu*4;?g)HFQJlb| z^yyoZY09~Jh!2l;v-)0xd4=|*@e+CNrN06XDEeogue=63F= z6Z^)pI2~_$w#Z@|&{mDOb(9)q<0&k5PJ2m_UPXs1g*h{y4P4+S76S!f>`a69xo3Do zS6vY+xD^m#@!pTmZ38M)C^gZs8JlV2d2u8LW@5jK?HC$1Ht`^wLYFY(ut!s>NUjnl z^oM02U9@4lY|@qs_sR(}8BGz&sjbCTDs*XN&0-DQ0HhX89|5s5Mu*l|`m3Qib)d3# z^l!xeqP6v8UP+n~6y6uqaWaM`=;Y$jb_9qDk2N7&=^X7{;eq?lE20FAxz#O68?5}O zO4-Wcm^6n@^!RSP+iVQ7(OsnU5wX01q++E)L~0SSHmVmNN@wZk^_Ynml;q5=#7NG3 zAFmw>?eiax5Ay8K_{5KT>N!0Y%#M1_ny*)mdQTmeU(*F7WZ)x%1`0{J>Q;7wyVm^f z|5OmoKE#?xx{20EgPXqD8$v_J`wYy$XypUz_CNa10`>HEyE|%B*!nc6n&RL;A%y3? z+@Qfp7(L2#uK27jB=~_#BH@<)=S#W3Fp41z%>Hzerw6!jzXjBKP@n?!)An0FEoDJ& z>P9@33e!vU8q79ak4*%R)$ySZ8M;F3JwqmIw=_KlCaZxA7uJTN+@4V8~f~GLQw?<@+ zWV{5~_6$rl8B`X~%^zQgEm)x818yK`D@)zQOu^j#^rXW<%8q_ z94=uh){Hr4HFYRl(MZ-?>e=HswP+l2vM41(1lXe69ls=(UYE-(7TDWiRhKx3T&Y3O zc1l5pJ@oHLOV&$nzf;s4A)S4$-&YgKj`b`Uog^k|C~!Da5L|hWj0rr~=lGdc=$68@ zT_;kElx%U6k-$|_Xam=yg7-6a)kz%`I)SjGprIl8)Ob$i?Q|1Rw(-1dzZ*>gj$q;H z0%2jc!L)Al*`r-s&s;myfPqe)bt^{c2`V*-IBx1bf7II9O&y%5-Le-?DVyrrA869Z zc_X9@;)b47EDdGKFW>%N_bol%(DydyWq}MMq5nK`n}uqX*lrIho}a5t=6+R?r8`Td zz6gqr#W_@6lz5LF9yIch1m;iNrkHoa#CGO@fXL2@3#-W@#pe+K1H8jH)P>AjFc z;mm3u5v9KaQls=b9mz0~aMn6rN%nOTeJ;!0pcE(wK$9)Zv7Nb_h!hV}u;Rlh< zx-5=*6$(#+i$v?0GT{Fy^@Yw*?`MM#?AeBD2{wp`u7_w}e#ss};)1e+j2Y(FQDLD0 zE@H2=aX2lhdu{+`m=E43z zt;?f-)e7w*^7jLa9{UXmks77%l}Cwb0mHT(F z5M8)%n_j2jUun49=ifi+kYRU_|2p3j8p8%>q4;#t;>qT8$u(}1(vPy`Hx@hqfPSReRAMsv0+w!TnNM4(|FV_)xQv|&3PYKfY$ypow| z%^t`Vgu{3UhA#$YY8o|JT(0>?ilzRS5S5&byusjKWNx`OaN!iIt>ZOZ;#_CbxdaA> zW7Lqo~!VRo`vSb8*8MQ&fmd?AGw*41F?`pwtCxhlgrx7lv>Q8I^kO zV+@(MMlTCo;LrwR+kx?c2=qYHJTgH^>)#Fhecn)gFaC~*XfynvDBX$TdcrE(v5{SY zvXM5~jdMLEL4a#hj_mU@7l-ggjc$nkE@CD>ul*7{qJ(pssU@lJmFvaKPC*_@;0`AyIMis6MHja>&Y3-=!t71OX8aRcbJy@^>)C21PorFBZq1)Y(Hvz@id_*E(eI|v5y5avF**^)=y?plI#*Iy3WungVu*e| z-+J}jDyqt>g+wo^2S02(;{W+f!JSNKGR1_?5%e(eRFu=)`5btT= zQ7U7;$gSZ`wqI9XgU8<3Yij}*SC_Zw4}-R3j!=CS94HV)&)mgHP^i~)CK1fnfZhsR zOUIWwxBk}Y&&9s_@7M~pw5C3db!&w3!om9opHm-r2Dz${Hy34RvZbM8`fsdhF?;UH zJI>p$bpJl)yHM3qUHzm%eOIKHsl)ie*Qp{sWn{NI@(Kb^ zJ?Q=TlN@?~n2|7A4ff;57`a0ReGxLv>LRSOlWal+jRh}u-R|6IhQjA^1{)93HX3S1 z=Bp%+S#&`|_u7{Ch!r`e)x{AdA?^A@xP4RoJ1`~2qK857QoAsN6%XufZjtd?{an)P zjuEg$-{y`s;#4-1$pC%=PmyGk44$HkJ;y8*`bRJ1I_4wq;U~qzm9K{a=@&XbQfcL4 zJ5I4~HrDmb3MtWMu<^luJEN`E${beuy_@sAmX7sg>V{2y3WiYF#Ye>L_x7}|@Znws zskxoIb_!RYEjyc){xuH$7~o3jMw?BopK*EKRdy<4INZ>>&FUIj{VYFMzL+3Q9l8nx zFl^j$SiiA1^{-z41exD@5sP)06F!!+!W%l%kqBAnga=CV~ zSl_w1X!*+sRoaJyJTBxdWRU*y0*9Y@EHMjyrL!wElz-b!oW;a{bY{QVjM?9*fB>jo z7NDkZ>>lO8W29ZjAk5KXhmL3hewUtAGwV-#76ReNq0&to0$jm=#Az&W(;GU+ENG#j zmy1Y3Rnqkf&I+#Ruu!GiFe1|xThad6KALrY6p`OwBh^Lu_Q#zwQMsw5uzl&3*J=ji zkY=yPl?Ub3sR7%lNml)~&aN z49R+sv1s;<9etU3GBot*L;cFGp~Yk(mq486jRazu@AI0of#n#8$`U@@cdv&=yh9#= z=Tqp!vv>R1oWI%Chs3{2_qQlAZtAg!7mI_NA8r{W9irMpxpg@9P{8i*Y#C!4y%s%k zH0H&_1cCcLK5huw_3bx-)N!-KdawqF+v4G>zE@iR@w&5be1b`S{Ml@oTGDy%-NNhh z_}u%NLlZ`okXz{q)~R4t`VkCFi|sJ7Rn!lNqb#AqlKmaNrWdl1>eJ^tk!VgBbv}ri^R9jirCvVE zp(@}b28aHt9DRf3Xw^T8zWqTu8j<?G>Oi}-V2k8qj z{_uf9bS7Dbe~J%aK-#o3){bhY(^Z#Sdl;aJn;El5A1@AhRQe+G0%I)cRJOCF=Jb6+OEmDhi= zB+zR0zgOfCBr|ivJwOZFMRFfS^dyFB<a%>o!Bp%osA9CtpEsWhu4tq={;Vw%0q>+@IB7#`5?$gm+f$8U0ssMYC z7<;j{82xBLIbUS`xx@wzv01rkzWJ+?q~+LGKVZ2+NOB~#YCyywj%2HEqh-uh2Fjp^ zs@C^}swKRQxy_kpsm;lT)uHfBpKo$W#6l4f7`iV9y*vhnW`F?irC~_^f?`u>-?eFH z>9_z#@qvtpRIiY0t?s?3@LWucTkwQRQM`B7f=RUF7bxWx+*l*cjQY;FdZb%qJw zd-#VMC{!ZnCuAP-5rxkZD3Tfa(Ch^SKE<(oA>(e;XDUBu-9un{-R<8zS7$85D4j^a zt+?B{yC?eL8#GjQ&8eY#S6F1bl1T?^X9MVnl1yE%P%fL_7?^$q#e#6~Y;9Izyzl}JvxCth)KOe4Nb^=+j2cCr z*gJ<&%&Hb}8*qfK$8$?Ww|RmtIs9*CsOz^(g3Y8>A{=-$F(Oj>Jy(o@(|>fLt44X# za7)?Kj2CGmBM-#D`UwZ^6G!wcRc%M-`FXO@YEZ60NyULMoTQ67yt6ykv}3&z$v_r9 zk#||_J}NX4V!wIA)AiJ{YY!xh8u13}QmHK5ZyS0`RXXjyDG!K+$gWP!)@{nw$|4T-gWXKHvKCPWxc7ArJ>Hh4_}m*C;Hu8@%(^*cr7?jkse3V!=YPZ5c5bT1uaX= z&_>3D4dn2=KVu*^S#zp$H8NafA$GXaxe~j7Zr>|7YA0gE07Ih%MDe12#|(IGA9ncc zWR)Z(Zu)FM5HGh&4YpHwtV^DIY=*t7Jk%Ksvgg9=9R~v34%lS_#DS9G;9Cys1!e-b+E@7YB)3=LpW-7IVA@C&GZHBq^N_HE0 zLGE5w>$8zxw*imTVEm*=w5-H72ve9UFXZVDPiF5R4(z3CLWKoY`(`BBrOg{p1khOE zV%Gv?uRTAv6ddsikTgx0E04jAl2}W+eN!nUvgU6s*#DlnHyzMvmf%QTQ)^off^T^t z8bH?9O3?GcTm;Vgr7s;q%nS~dVK1;JS5hnQsF-vgLm?z*a9k$&qh)o&5TJLn)xCW>g}d=kAn~P?!1S8!N)4-Ce%7*UtkKP=q1=!TUF7+%oSS2;VycvGZq}H z6EaP!C`ZyOXEUgw8KLy;dg$_OXMa4r61($zSXR1BV*E7Ucfj`o8k$7spvEF*Kl|I| zrBWH|yv_H>=h0e+6oj`_Tt;+5D%->$3|X_`3v`KH&Gl-%8s>)MsPg&L=RK>ipmBdg zZmLl(?jkCI-<8W;#4EtVN=bWW2yL6p_TA3~xRT2c*% z5~_mSCZTA%dt&c}5Rar?tXXM+F%1?bb6+U7OQqV9$-t20OTn6MgGUTTF?4S;hzW|g zLbs={O$+`KH2gRz1*?1zK&<%`m_!L(TUaxeL{o5uvl4W{VD;SLjIL zTpQN%>KdnJl^Ep&cwW(3Wh#ZnCjtrKLDA5ux}Zoj%>CTh^)>R>i+Cs~X6}-Gab|a4 zo=!oSTPI(gpXbLXwToYsMpL5A302Ln7x@YtjWpP_!IYG#1IQuAnt&NY=(;Q zI45`UFPN{qk{TG@Gf(l{(P-5(1}gMVB##dzUmRbL5G&@yTYiH!ebk=0oNWa*KrGjS zwUvM%vZb%W5d?*{W?4w^m`hIWdS5#9SJY@xTupcy`JpK3juvn@7rT`xs)Jrena&oW zs-;BTRpudotbZ~#xO}^PJb=d;t*diA|HURGdbIQm|L&ywrZ}k8X<@Fd(9!t}4vwT) z>6Nr@_l+!2oBVnv`ki%pv}fF6kAjt4FSG_5&oQ7tv?5a?x7t&8IRPJm&q`k0k|A+( zkHBgNMbNk~I$s(fF%w*kW=Hy(O_C1Ua>Jkrjghx%890j4@_f7V7-0IJfSmUy$}|z!j6<1r{kzofnXmf22c2h!(G0GO?4Bfo7{u?%g`mqxtX0U;HCi{Mw-%3 zc?{&f9aY8~5y`JNukj#lG2aWspW z{X=5U{TXQ^?q@Zcw%a%j$@l|OGj}zI)eAwok>0|*Iq9$Eyb)y6y^%k-;v|A4BQ?Bd zuX<=amGTV>*Ze{u%bOh=i=%mqW*H)%%U`X(pV*k{J=d->x~Hx3=^MK2U2OHjTCr0< znu~8U9x{f-`+VzoH~3iLb04BSMbLh<;X3ICL`W3nUWS|=@xVb;Ro4A~XCA%wao-kT zez*A*Sh^_m4cH3@lRQc)!jN3CMVON%QfOmsH25I!_r1o|Cw|w6*o1)!ZDn7=2-#5q zA3Qgw>1gR-aJVg^r%GR?9d>L5G&^TE1%tPi*ertRHsALL4?>9tkG5?Q4z>wWz)zXY zksw8iX&mmQ-fgx;+4k2JuuBZiWMqQNj#VRDu?W>d&eRyMCyqjPBk~0CA`0S&j%Fv7 z6Gy#-xni@PyM>D&pm6Q`A5dNZec|?G`O|VPD9Sp8O)8Yfqy_mEy57 z>+=eQb8|>yyXXuTyO<|@C!L&wZ;(J;56n)01#g%JhxYcv>h}kIn7|90E%!7_>u}*U z%N!bw+V3A1l!X0Csfcj}&~9;|l5yytiGze!nRXsQiOE9c%C35?rqn zz!=TVeE%glYoU&-&W92Pfs=5Z)1`FJ=?-EV#;2f~%~QD#w4GA=4H8#~m~%bZQ75Y{qvj!3o@*DU((lXy zF9L#XKa&kC8MHX_E-WgQDUUS!-fk{CBc97qJs4H%I+!o#ZXHO*w}|?>X3C=AD37Hn zD{b~)?5IySbOl2oK9ir$nyAiyL|G#Ln&k@(Z$nrhy04*vJD#_13faSr#tcM!&^LLz z^$iV7Z(QDefAjT!bakeIPu3 zUubdvuCY2^W5~e!*hoG0#p{4l?g?ge3AWWZYy)N-GAgAU$X7gp&HZYHlk+~@Y9&cO zcMfdS54ocjJ?$2_)fqo5-uAlt=Uy_ItSwaip6W|goJOe9qg4|}+G3Pw{SUX(QY3yM zz%Bx-!~3l4KTBs|l*jT*4f!BnD$kl!URR0w+y-5rd>H)WfXPSb~W_mGTAY6Ii1yV1kMI8wE$m8E1e~^7) z_rBwB=OYyrZ^zM=`aACl#pcMETu}N>t$w(4hkwc$AT%2L3fGMZAK4w$QP7Vxkm|OZt^7cz2sL$ZwVKzmISY zkda3Vl1W5juEOX)?`|Sj_?A}MHSG%1=s0u0@$x7fkH$SgYdS{Hub`DG0^LsNbs;9N z*5l7V7yF4&oV*e2L{3$FSkHdT4$(tK+T}61A>pvQ`p=O`HfPW!msm>_73fOgOPptX zG?X9^9}h}lIdqyrerL$Raqf|?E(ZOMV7J&pYp)3Lr-}X4ScE~Vrft7vO3EdZv)!vd zrJq*O1ys=pLSpWILKW%2 zSKNa%e2j!de8xl*Dmh^B6=b#?d6+(Ahyh0R-l}pi{bIW7@QWqyxN)`CXsA^8vj^Px zsKIX$tz}(*VLq~YgOvaZ#H#JfgFV2c;uwc9d7?W6!!{HjhxoxgYp)U%3iSSxe4YH? z%aN5F$B{C=WUapCTbdJGy@^A1$PwR$IWDxc_{3B5kr#}J_Wce{%mS_K872Nx*8Cn)&N`N(}HUz{dFVK&?__W zit&5)4_-=1L%4slkeBn?Ol^2u=eZtZ5yIa6NbRbFgtxO7)z2>@lsz_IP`h(+zqhLf z)B;j>w;Xzg>dOElHP0gJ02gGa=5_Jb35pG+*LE)C>V(UeXU(%Y8R4aKErrZNyWRNw zbmdTwZN&tj2I+8ay|SReZNSwXq(@Ne43x=wRu$v5lg&+EOgkFXwW>3;PBBO~(&6$j zNt%q!@ze}~VmDq#tm1x$N*WZtTFdGmvJtWxMME3jUSEFoI44qX!y_S)N(yFm`8Cos z$(MsY@tl2E;Q741;3GObF;%6qH4b$R-tzFA{kbu1#Hm$a5?Q+ebcgI=i*?^j4dcG~ z&|`OX31tLu8ksKfE3ZAAx`;M1yL4UB4=j*;#17=sn`j?398ztKXB;mkf-%G zr*3=@%J#Xk_SrG!cUD)s5VYXwSQ0Nhhs%@UUyTIbqs1Fb8N{_a$4GM~9u}*`M)8vm zmpWs$d7(3t_q)ekcm5c2*$J0~gw9QlRTQS56j;EfjaayT=|TKqiI{E|4+5Eyv%lEf z9NfsrAy!c8y717r+*T*~uBb%apXhQR-3nLO{dJp69u7)U>G&Yb?>Jl@^;lbB;P)FL zovJ=dh7h&!wVqpK8B*$+7AEkI4?+luk+Vu&_y8=*IvofrvMCGJL9^#RX$eXm~i25%U|Mih{e`x30=r{8T2N_!1|-kBs!x!T0TB`m~G_v$^b;G^v@RM>@#(lux z3qGlfsaD$}FW~Xp^>2P#b4gjg`_ga^h(x6dpnA_?+?=voWKz;Cx6^%)`}$z3!1m8v z2z{ceEOP=*u%2a{8B9n{djX_Ihsq}gDBQd@vy?MgL zDi%zBi_&awNwofb59PaM!)8#HJ6jWFHW6M2Yn9uLj1p}CD40>;YS$0)5ji{5^PDfL zq#cJbO%z7Ajq$M(sF4`c>dqg@*XyQLK;@R@GS?Aq4?-I@k?S%R_V`Fd-HJ|`5dafh5_55%@5p5pT+*lo`{~U5@)|-!hTpODp60>*Opmk3+fV(azx9FroPC`^S^7 z{o=o<*Un2z9g+2K<+m=|y*U>L#l#Xn_Gs0^32ELswCf*<9OSFv9FxmWfK*1g^OCYb&6jooXGKTe!w+mqK zWZ#@QUrQE1fRc$el%@3tYVc0!pSSss{@anRKE2s|%HU<=c_D$@ZXgsas1Ead1o00` zkm`O>O(3r*TF2Xe;7&NzQ~@v}2U`Sk7W?);RUl@ZF0uea$;Bz9ff_Ka*E1;BQqxSV z^Br96G){&nm#ssgAlY6U+Lx_;Q?!-E8ESMUM{k1;wwKJ}KANuVltuzxZGPQDjrt`< z8#`neqU-h$gm7_e%VI%L==d#)*lEz?J4o!(wmDr2It7Dex(z0xm94GF=UXmnz^3rF zEq>Bwo|653@YYKRbZD6phb_yHYd3&{4IEXgBLFi(Y@MNFy~KzH2)2glNp`O=d8Bx* zw!)4ckN!;b%Q3e>t}XaN((7a$k=9V)pp-|uhhU)G?aGQ1(85f*AzdwE6n`Ec9cYwU zcRvdWxyH@8Ikg1vd8`soM&ryJB~@dZ=NkjT=g&Zgv&4=T3|v3fBhDc>kxL@QW1b1$NBVZoru&q^%YFV3VxeJ2N>;nf3x6YA$%0#X1} zbq_Sr_PH*sc6T)B+}8el9d3I(%jzw)g>ev5E)4b7T4kP=X3IcKfeCM#l z`O~bGWXTUk4n?31mNJdbiPNN?2cs5Ox2g;63-Z^ta18poY`!VP-%olFM-i|C#>Y;F zE*ak&307Hyge-t5LQ>m+U!F%bI7C8ZNTe-I%RoSP&bds}9A$@(%xwM;@$}RN2SHcx zh73lq+nyRi#q$Yq!1Ik2N_KDTSpgE+x%q_WH=K57z~*tVG1$tOQo#_YqBBCrbiTig z3uQnVI&uH644eEN!9vNlv&ByRIZ;vG6m23#^pYi+tV{E>;&kx_k4z;Jc?+|(xr^O$ z%!Vm8MHr!>3?j(V-8_6=w-a~4y%xyVZdd*XGMBsG8Fl)*a&uXIXKDtn^R3$28Fm@?jz7C?SXLa*-(l4NDu+YsgS_sB%J^q zV6ppD2w&nnqmm(3l*%H*vR4db2`v*`{?&4)6S~wDC7>m|QF=wc$aKgc7nF7a#~2Dc zppjCrUP-o3MPB}w(J$;%y>ARH>~K-FrhLCQ+9^g1@$M|=UhIC<1D*>~%~R}|(U-vS zzYr3gU+59brksfzVOK z9nWjkcwcWuoV&=$vZCxod-j#QA+nMa;ui-rjBoj!@PHp1j^oQhud5pcMBG}w-ueoZ z(*5Z*XLmR(I>$jluc61+1IayBqcobe&}XJWlG^cDp1Gc&{kKYL%ve0~5IFD|xi#D( z>*-it2h_z)0XD7SS1L@WkBxyB2pgAX0>UT=f{G?0z_{KH4#R}Kq-MqznblNm6{wS< zE*1lB8Xza0b5aS@0pQ!FEVe-dvwhj-P{HoNK{B#i@P1j$K7w;=`I&L0T?C(Mf=X;QXE;HfBH|2SM3IpNN!k6y3nFqXa0`Ppxtd^SxS3m`Yf^{#*0-v_FNCoJldE z77Lt^7GLt>dbOepiGD9^#m`9zdX6n_ZD$i56clapP4^SlR=t1|t|l6a>4(Uq;^4Yn z>b6Y;f|6|>Jkw&0!u}57CImdhPEkS8T;XjTtIO6%ME(rxjpL0d{%xlb10AM+yS(@V zMi9>Q-aZ7bEHg()q?k@P3m@zhE9br?W3Nr%5Y(`X_CVwF~V%GxTCRjEG`m8 zAuk-QN0|}4C1s1eSJqrSVywBo*D2%^7JkOC&6uOTMZKv*x(i>TxFllO$;Kd&u{e+> z!;%htA%d%Qv7pk>(KIOQciR3S_v(}aVZKF63;v*E)f zwMq)&oGv790NLjKcr3nxJ<|gkEnN?Vw)d9tD1-bA$j~eyz-X*ySjF>*pt$QeitJQ5 z7FpK)07ucX_SY|9 a { font-size: 1rem; align-content: center; + white-space: nowrap; } .doc-param-details .subdoc { @@ -207,3 +208,101 @@ a.discrete-link { margin: 0; font-weight: normal; } + +/*.chip { + position: relative; + box-sizing: content-box; + display: inline-block; + padding: 2px 1px; + margin: 1px 0px 14px; + border-radius: 6px; + font-style: normal; + background: #dae8fc; + border: 1px solid #6c8ebf; + --border-color: #6c8ebf; + white-space: nowrap; +} + +.chip::after { + content: attr(data-chip); + position: absolute; + right: -1px; + background: white; + border: 1px solid var(--border-color); + border-radius: 3px; + line-height: 1; + top: calc(100% - 6px); + box-sizing: border-box; +}*/ + +.chip { + position: relative; + box-sizing: content-box; + display: inline-block; + padding: 0 0 0 2px; + margin: 1px 0px; + border-radius: 4px; + font-style: normal; + background: #dae8fc; + border: 1px solid #6c8ebf; + --border-color: #6c8ebf; + white-space: nowrap; +} + +.chip::after { + content: attr(data-chip); + display: inline-block; + right: -1px; + background: white; + border: 1px solid var(--border-color); + border-radius: 0px 3px 3px 0px; + padding: 0 1px; + margin: -2px -2px -2px 2px; + box-sizing: border-box; +} + +.chip.tp { + background-color: #cef8ce; + border-color: #50b950; + --border-color: #50b950; +} + +.chip-green { + display: inline-block; + padding: 2px 2px; + margin: 1px 1px; + border-radius: 6px; + font-style: normal; + background: #cef8ce; + border: 1px solid #50b950; + white-space: nowrap; +} + +.chip-red { + display: inline-block; + padding: 2px 2px; + margin: 1px 1px; + border-radius: 6px; + font-style: normal; + background: #f8cecc; + border: 1px solid #b95450; + white-space: nowrap +} + +.chip.fp, .chip.fn { + background-color: #f8cecc; + border-color: #b95450; + --border-color: #b95450; +} + +.chip.na { + display: inline-block; + padding: 2px 2px; + margin: 1px 1px; + border-radius: 6px; + font-style: normal; + background: #efefef; + border: 1px solid #bababa; + color: #bababa; + white-space: nowrap; +} diff --git a/docs/assets/termynal/termynal.css b/docs/assets/termynal/termynal.css deleted file mode 100644 index affc90e34f..0000000000 --- a/docs/assets/termynal/termynal.css +++ /dev/null @@ -1,132 +0,0 @@ -/** - * termynal.js - * - * @author Ines Montani - * @version 0.0.1 - * @license MIT - * - * Modified version from https://github.com/tiangolo/typer - */ - -:root { - --color-bg: #252a33; - --color-text: #eee; - --color-text-subtle: #a2a2a2; -} - -[data-termynal] { - width: auto; - max-width: 100%; - background: var(--color-bg); - color: var(--color-text); - font-size: 18px; - /* font-family: 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace; */ - font-family: 'Roboto Mono', 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace; - border-radius: 4px; - padding: 75px 45px 35px; - position: relative; - -webkit-box-sizing: border-box; - box-sizing: border-box; -} - -[data-termynal]:before { - content: ''; - position: absolute; - top: 15px; - left: 15px; - display: inline-block; - width: 15px; - height: 15px; - border-radius: 50%; - /* A little hack to display the window buttons in one pseudo element. */ - background: #d9515d; - -webkit-box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930; - box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930; -} - -[data-termynal]:after { - content: 'bash'; - position: absolute; - color: var(--color-text-subtle); - top: 5px; - left: 0; - width: 100%; - text-align: center; -} - -a[data-terminal-control] { - text-align: right; - display: block; - color: #aebbff; -} - -[data-terminal-copy] { - text-align: right; - position: absolute; - top: 5px; - right: 5px; -} - -[data-terminal-copy].md-icon { - color: #aebbff; -} - -[data-ty] { - display: block; - line-height: 2; -} - -[data-ty]:before { - /* Set up defaults and ensure empty lines are displayed. */ - content: ''; - display: inline-block; - vertical-align: middle; -} - -[data-ty="input"]:before, -[data-ty-prompt]:before { - margin-right: 0.75em; - color: var(--color-text-subtle); -} - -[data-ty="input"]:before { - content: '$'; -} - -[data-ty][data-ty-prompt]:before { - content: attr(data-ty-prompt); -} - -[data-ty-cursor]:after { - content: attr(data-ty-cursor); - font-family: monospace; - margin-left: 0.5em; - -webkit-animation: blink 1s infinite; - animation: blink 1s infinite; -} - - -/* Cursor animation */ - -@-webkit-keyframes blink { - 50% { - opacity: 0; - } -} - -@keyframes blink { - 50% { - opacity: 0; - } -} - -/* tooltip */ - -[data-md-state="open"] { - transform: translateY(0); - opacity: 1; - transition: - transform 400ms cubic-bezier(0.075, 0.85, 0.175, 1), - opacity 400ms; - pointer-events: initial; -} diff --git a/docs/assets/termynal/termynal.js b/docs/assets/termynal/termynal.js deleted file mode 100644 index 8a572449ae..0000000000 --- a/docs/assets/termynal/termynal.js +++ /dev/null @@ -1,411 +0,0 @@ -/** - * termynal.js - * A lightweight, modern and extensible animated terminal window, using - * async/await. - * - * @author Ines Montani - * @version 0.0.1 - * @license MIT - * - * Modified version from https://github.com/tiangolo/typer - * - */ - -'use strict'; - -/** Generate a terminal widget. */ -class Termynal { - /** - * Construct the widget's settings. - * @param {(string|Node)=} container - Query selector or container element. - * @param {Object=} options - Custom settings. - * @param {string} options.prefix - Prefix to use for data attributes. - * @param {number} options.startDelay - Delay before animation, in ms. - * @param {number} options.typeDelay - Delay between each typed character, in ms. - * @param {number} options.lineDelay - Delay between each line, in ms. - * @param {number} options.progressLength - Number of characters displayed as progress bar. - * @param {string} options.progressChar – Character to use for progress bar, defaults to █. - * @param {number} options.progressPercent - Max percent of progress. - * @param {string} options.cursor – Character to use for cursor, defaults to ▋. - * @param {Object[]} lineData - Dynamically loaded line data objects. - * @param {boolean} options.noInit - Don't initialise the animation. - */ - constructor(container = '#termynal', options = {}) { - this.container = (typeof container === 'string') ? document.querySelector(container) : container; - this.pfx = `data-${options.prefix || 'ty'}`; - this.originalStartDelay = this.startDelay = options.startDelay - || parseFloat(this.container.getAttribute(`${this.pfx}-startDelay`)) || 600; - this.originalTypeDelay = this.typeDelay = options.typeDelay - || parseFloat(this.container.getAttribute(`${this.pfx}-typeDelay`)) || 50; - this.originalLineDelay = this.lineDelay = options.lineDelay - || parseFloat(this.container.getAttribute(`${this.pfx}-lineDelay`)) || 500; - this.progressLength = options.progressLength - || parseFloat(this.container.getAttribute(`${this.pfx}-progressLength`)) || 40; - this.progressChar = options.progressChar - || this.container.getAttribute(`${this.pfx}-progressChar`) || '█'; - this.progressPercent = options.progressPercent - || parseFloat(this.container.getAttribute(`${this.pfx}-progressPercent`)) || 100; - this.cursor = options.cursor - || this.container.getAttribute(`${this.pfx}-cursor`) || '▋'; - this.lineData = this.lineDataToElements(options.lineData || []); - this.loadLines() - if (!options.noInit) this.init() - } - - loadLines() { - // Load all the lines and create the container so that the size is fixed - // Otherwise it would be changing and the user viewport would be constantly - // moving as she/he scrolls - const finish = this.generateFinish() - finish.style.visibility = 'hidden' - this.container.appendChild(finish) - // Appends dynamically loaded lines to existing line elements. - this.lines = [...this.container.querySelectorAll(`[${this.pfx}]`)].concat(this.lineData); - for (let line of this.lines) { - line.style.visibility = 'hidden' - this.container.appendChild(line) - } - const restart = this.generateRestart() - restart.style.visibility = 'hidden' - this.container.appendChild(restart) - this.container.setAttribute('data-termynal', ''); - } - - /** - * Initialise the widget, get lines, clear container and start animation. - */ - init() { - /** - * Calculates width and height of Termynal container. - * If container is empty and lines are dynamically loaded, defaults to browser `auto` or CSS. - */ - const containerStyle = getComputedStyle(this.container); - this.container.style.width = containerStyle.width !== '0px' ? - containerStyle.width : undefined; - this.container.style.minHeight = containerStyle.height !== '0px' ? - containerStyle.height : undefined; - - this.container.setAttribute('data-termynal', ''); - this.container.innerHTML = ''; - for (let line of this.lines) { - line.style.visibility = 'visible' - } - this.start(); - } - - - /** - * Start the animation and rener the lines depending on their data attributes. - */ - async start() { - this.addCopy() - this.addFinish() - await this._wait(this.startDelay); - - for (let line of this.lines) { - const type = line.getAttribute(this.pfx); - const delay = line.getAttribute(`${this.pfx}-delay`) || this.lineDelay; - - if (type == 'input') { - line.setAttribute(`${this.pfx}-cursor`, this.cursor); - await this.type(line); - await this._wait(delay); - } - - else if (type == 'progress') { - await this.progress(line); - await this._wait(delay); - } - - else { - this.container.appendChild(line); - await this._wait(delay); - } - - line.removeAttribute(`${this.pfx}-cursor`); - } - this.addRestart() - this.finishElement.style.visibility = 'hidden' - this.lineDelay = this.originalLineDelay - this.typeDelay = this.originalTypeDelay - this.startDelay = this.originalStartDelay - } - - generateRestart() { - const restart = document.createElement('a') - restart.onclick = (e) => { - e.preventDefault() - this.container.innerHTML = '' - this.init() - } - restart.href = '#' - restart.setAttribute('data-terminal-control', '') - restart.innerHTML = "restart ↻" - return restart - } - - generateCopy() { - var dialog = document.getElementsByClassName('md-dialog')[0] - var dialog_text = document.getElementsByClassName('md-dialog__inner md-typeset')[0] - const copy = document.createElement('a') - copy.classList.add("md-clipboard") - copy.classList.add("md-icon") - copy.onclick = (e) => { - e.preventDefault() - var command = '' - for (let line of this.lines) { - if (line.getAttribute("data-ty") == 'input') { - command = command + line.innerHTML + '\n' - } - } - navigator.clipboard.writeText(command) - dialog.setAttribute('data-md-state', 'open'); - dialog_text.innerText = 'Copied to clipboard'; - - setTimeout(function () { - dialog.removeAttribute('data-md-state'); - }, 2000); - } - copy.setAttribute('data-terminal-copy', '') - return copy - } - - generateFinish() { - const finish = document.createElement('a') - finish.onclick = (e) => { - e.preventDefault() - this.lineDelay = 0 - this.typeDelay = 0 - this.startDelay = 0 - } - finish.href = '#' - finish.setAttribute('data-terminal-control', '') - finish.innerHTML = "fast →" - this.finishElement = finish - return finish - } - - addRestart() { - const restart = this.generateRestart() - this.container.appendChild(restart) - } - - addFinish() { - const finish = this.generateFinish() - this.container.appendChild(finish) - } - - addCopy() { - let copy = this.generateCopy() - this.container.appendChild(copy) - } - - /** - * Animate a typed line. - * @param {Node} line - The line element to render. - */ - async type(line) { - const chars = [...line.textContent]; - line.textContent = ''; - this.container.appendChild(line); - - for (let char of chars) { - const delay = line.getAttribute(`${this.pfx}-typeDelay`) || this.typeDelay; - await this._wait(delay); - line.textContent += char; - } - } - - /** - * Animate a progress bar. - * @param {Node} line - The line element to render. - */ - async progress(line) { - const progressLength = line.getAttribute(`${this.pfx}-progressLength`) - || this.progressLength; - const progressChar = line.getAttribute(`${this.pfx}-progressChar`) - || this.progressChar; - const chars = progressChar.repeat(progressLength); - const progressPercent = line.getAttribute(`${this.pfx}-progressPercent`) - || this.progressPercent; - line.textContent = ''; - this.container.appendChild(line); - - for (let i = 1; i < chars.length + 1; i++) { - await this._wait(this.typeDelay) / 4; - const percent = Math.round(i / chars.length * 100); - line.textContent = `${chars.slice(0, i)} ${percent}%`; - if (percent > progressPercent) { - break; - } - } - } - - /** - * Helper function for animation delays, called with `await`. - * @param {number} time - Timeout, in ms. - */ - _wait(time) { - return new Promise(resolve => setTimeout(resolve, time)); - } - - /** - * Converts line data objects into line elements. - * - * @param {Object[]} lineData - Dynamically loaded lines. - * @param {Object} line - Line data object. - * @returns {Element[]} - Array of line elements. - */ - lineDataToElements(lineData) { - return lineData.map(line => { - let div = document.createElement('div'); - div.innerHTML = `${line.value || ''}`; - - return div.firstElementChild; - }); - } - - /** - * Helper function for generating attributes string. - * - * @param {Object} line - Line data object. - * @returns {string} - String of attributes. - */ - _attributes(line) { - let attrs = ''; - for (let prop in line) { - // Custom add class - if (prop === 'class') { - attrs += ` class=${line[prop]} ` - continue - } - if (prop === 'type') { - attrs += `${this.pfx}="${line[prop]}" ` - } else if (prop !== 'value') { - attrs += `${this.pfx}-${prop}="${line[prop]}" ` - } - } - - return attrs; - } -} - -/** -* HTML API: If current script has container(s) specified, initialise Termynal. -*/ -if (document.currentScript.hasAttribute('data-termynal-container')) { - const containers = document.currentScript.getAttribute('data-termynal-container'); - containers.split('|') - .forEach(container => new Termynal(container)) -} - -document.querySelectorAll(".use-termynal").forEach(node => { - node.style.display = "block"; - new Termynal(node, { - lineDelay: 500 - }); -}); -const progressLiteralStart = "---> 100%"; -const promptLiteralStart = "$ "; -const customPromptLiteralStart = "$* "; -const commentPromptLiteralStart = "# "; -const colorOutputLiteralStart = "color:"; -const termynalActivateClass = "termy"; -let termynals = []; - -function createTermynals() { - document - .querySelectorAll(`.${termynalActivateClass} .highlight`) - .forEach(node => { - const text = node.textContent; - const lines = text.split("\n"); - const useLines = []; - let buffer = []; - function saveBuffer() { - if (buffer.length) { - let isBlankSpace = true; - buffer.forEach(line => { - if (line) { - isBlankSpace = false; - } - }); - var dataValue = {}; - if (isBlankSpace) { - dataValue["delay"] = 0; - } - if (buffer[buffer.length - 1] === "") { - // A last single
won't have effect - // so put an additional one - buffer.push(""); - } - - const bufferValue = buffer.join("
"); - dataValue["value"] = bufferValue; - useLines.push(dataValue); - buffer = []; - } - } - for (let line of lines) { - if (line === progressLiteralStart) { - saveBuffer(); - useLines.push({ - type: "progress" - }); - } else if (line.startsWith(promptLiteralStart)) { - saveBuffer(); - const value = line.replace(promptLiteralStart, "").trimEnd(); - useLines.push({ - type: "input", - value: value - }); - } else if (line.startsWith(commentPromptLiteralStart)) { - saveBuffer(); - const value = "💬 " + line.replace(commentPromptLiteralStart, "").trimEnd(); - const color_value = "" + value + "" - useLines.push({ - value: color_value, - class: "termynal-comment", - delay: 0 - }); - } else if (line.startsWith(customPromptLiteralStart)) { - saveBuffer(); - const prompt = line.slice(3, line.indexOf(' ', 3)) - let value = line.slice(line.indexOf(' ', 3)).trimEnd(); - useLines.push({ - type: "input", - value: value, - prompt: prompt - }); - } else if (line.startsWith(colorOutputLiteralStart)) { - let color = line.substring(0, line.indexOf(' ')); - let line_value = line.substring(line.indexOf(' ') + 1); - var color_line = "" + line_value + "" - buffer.push(color_line); - } else { - buffer.push(line); - } - } - saveBuffer(); - const div = document.createElement("div"); - node.replaceWith(div); - const termynal = new Termynal(div, { - lineData: useLines, - noInit: true, - lineDelay: 500 - }); - termynals.push(termynal); - }); -} - -function loadVisibleTermynals() { - termynals = termynals.filter(termynal => { - if (termynal.container.getBoundingClientRect().top - innerHeight <= 0) { - termynal.init(); - return false; - } - return true; - }); -} -window.addEventListener("scroll", loadVisibleTermynals); -createTermynals(); -loadVisibleTermynals(); diff --git a/docs/metrics/index.md b/docs/metrics/index.md new file mode 100644 index 0000000000..25201285ad --- /dev/null +++ b/docs/metrics/index.md @@ -0,0 +1,12 @@ +# Metrics + +EDS-NLP provides several metrics to evaluate the performance of its components. These metrics can be used to assess the quality of entity recognition, negation detection, and other tasks. + +At the moment, we support the following metrics: + +| Metric | Description | +|:---------------------|:---------------------------------------------------| +| `eds.ner_exact` | NER metric with exact match at the span level | +| `eds.ner_token` | NER metric with token-level match | +| `eds.ner_overlap` | NER metric with overlap match at the span level | +| `eds.span_attribute` | Span multi-label multi-class classification metric | diff --git a/docs/metrics/ner.md b/docs/metrics/ner.md new file mode 100644 index 0000000000..52855565d6 --- /dev/null +++ b/docs/metrics/ner.md @@ -0,0 +1,63 @@ +# NER Metrics + +We provide several metrics to evaluate the performance of Named Entity Recognition (NER) components. +Let's look at an example and see how they differ. We'll use the following two documents: a reference +document (ref) and a document with predicted entities (pred). + +### Shared example + ++-------------------------------------------------------------+------------------------------------------+ +| pred | ref | ++=============================================================+==========================================+ +| *La*{.chip data-chip=PER} *patiente*{.chip data-chip=PER} a | La *patiente*{.chip data-chip=PER} a | +| une *fièvre aigüe*{.chip data-chip=DIS} | *une fièvre*{.chip data-chip=DIS} aigüe. | ++-------------------------------------------------------------+------------------------------------------+ + +Let's create matching documents in EDS-NLP using the following code snippet: + +```python +from edsnlp.data.converters import MarkupToDocConverter + +conv = MarkupToDocConverter(preset="md", span_setter="entities") + +pred = conv("[La](PER) [patiente](PER) a une [fièvre aiguë](DIS).") +ref = conv("La [patiente](PER) a [une fièvre](DIS) aiguë.") +``` + +### Summary of metrics + +The table below shows the different scores depending on the metric used. + +| Metric | Precision | Recall | F1 | +|--------------------|-----------|--------|------| +| Span-level exact | 0.33 | 0.5 | 0.40 | +| Token-level | 0.50 | 0.67 | 0.57 | +| Span-level overlap | 0.67 | 1.0 | 0.80 | + +## Span-level NER metric with exact match {: #edsnlp.metrics.ner.NerExactMetric } + +::: edsnlp.metrics.ner.NerExactMetric + options: + heading_level: 2 + show_bases: false + show_source: false + only_class_level: true + +## Span-level NER metric with approximate match {: #edsnlp.metrics.ner.NerOverlapMetric } + +::: edsnlp.metrics.ner.NerOverlapMetric + options: + heading_level: 2 + show_bases: false + show_source: false + only_class_level: true + + +## Token-level NER metric {: #edsnlp.metrics.ner.NerTokenMetric } + +::: edsnlp.metrics.ner.NerTokenMetric + options: + heading_level: 2 + show_bases: false + show_source: false + only_class_level: true diff --git a/docs/metrics/span-attribute.md b/docs/metrics/span-attribute.md new file mode 100644 index 0000000000..07db17157b --- /dev/null +++ b/docs/metrics/span-attribute.md @@ -0,0 +1,43 @@ +# Span Attribute Classification Metrics {: #edsnlp.metrics.span_attribute.SpanAttributeMetric } + +Several NLP tasks consist in classifying existing spans of text into multiple classes, +such as the detection of negation, hypothesis or span linking. We provide a metric +to evaluate the performance of such tasks. + +Let's look at an example. We'll use the following two documents: a reference +document (ref) and a document with predicted entities (pred). + ++-------------------------------------------------------------------+-------------------------------------------------------------------+ +| pred | ref | ++===================================================================+===================================================================+ +| Le patient n'est pas *fièvreux*{.chip data-chip="SYMP neg=true"}, | Le patient n'est pas *fièvreux*{.chip data-chip="SYMP neg=true"}, | +| son père a *du diabète*{.chip data-chip="DIS carrier=PATIENT"}. | son père a *du diabète*{.chip data-chip="DIS carrier=FATHER"}. | +| Pas d'évolution du | Pas d'évolution du | +| *cancer*{.chip data-chip="DIS neg=true carrier=PATIENT"}. | *cancer*{.chip data-chip="DIS carrier=PATIENT"}. | ++-------------------------------------------------------------------+-------------------------------------------------------------------+ + +We can quickly create matching documents in EDS-NLP using the following code snippet: + +```python +from edsnlp.data.converters import MarkupToDocConverter + +conv = MarkupToDocConverter(preset="md", span_setter="entities") +# Create a document with predicted attributes and a reference document +pred = conv( + "Le patient n'est pas [fièvreux](SYMP neg=true), " + "son père a [du diabète](DIS neg=false carrier=PATIENT). " + "Pas d'évolution du [cancer](DIS neg=true carrier=PATIENT)." +) +ref = conv( + "Le patient n'est pas [fièvreux](SYMP neg=true), " + "son père a [du diabète](DIS neg=false carrier=FATHER). " + "Pas d'évolution du [cancer](DIS neg=false carrier=PATIENT)." +) +``` + +::: edsnlp.metrics.span_attribute.SpanAttributeMetric + options: + heading_level: 2 + show_bases: false + show_source: false + only_class_level: true diff --git a/docs/scripts/clickable_snippets.py b/docs/scripts/clickable_snippets.py index 2b901448a1..affd0d4f90 100644 --- a/docs/scripts/clickable_snippets.py +++ b/docs/scripts/clickable_snippets.py @@ -99,6 +99,7 @@ def on_post_page( for ep in ( *self.get_ep_namespace(ep, "spacy_factories"), *self.get_ep_namespace(ep, "edsnlp_factories"), + *self.get_ep_namespace(ep, "spacy_scorers"), ) } diff --git a/edsnlp/__init__.py b/edsnlp/__init__.py index 965d8db6b2..59bc1a46d7 100644 --- a/edsnlp/__init__.py +++ b/edsnlp/__init__.py @@ -45,7 +45,11 @@ def find_spec(self, fullname, path, target=None): # pragma: no cover spec = importlib.util.spec_from_loader(fullname, AliasLoader(new_name)) return spec if fullname.startswith("edsnlp.metrics.span_classification"): - new_name = "edsnlp.metrics.span_attributes" + fullname[34:] + new_name = "edsnlp.metrics.span_attribute" + fullname[34:] + spec = importlib.util.spec_from_loader(fullname, AliasLoader(new_name)) + return spec + if fullname.startswith("edsnlp.metrics.span_attributes"): + new_name = "edsnlp.metrics.span_attribute" + fullname[30:] spec = importlib.util.spec_from_loader(fullname, AliasLoader(new_name)) return spec if "span_qualifier" in fullname.split("."): diff --git a/edsnlp/metrics/__init__.py b/edsnlp/metrics/__init__.py index e96f74017a..df17ab40a3 100644 --- a/edsnlp/metrics/__init__.py +++ b/edsnlp/metrics/__init__.py @@ -25,7 +25,7 @@ def average_precision(pred: Dict[Any, float], gold: Iterable[Any]): for i in range(1, len(precisions)): if recalls[i] > recalls[i - 1]: ap += (recalls[i] - recalls[i - 1]) * precisions[i] - return ap + return float(ap) def prf(pred: Collection, gold: Collection): diff --git a/edsnlp/metrics/ner.py b/edsnlp/metrics/ner.py index d39306445d..17bc9f5f71 100644 --- a/edsnlp/metrics/ner.py +++ b/edsnlp/metrics/ner.py @@ -1,3 +1,27 @@ +""" +We provide several metrics to evaluate the performance of Named Entity Recognition (NER) components. +Let's look at an example and see how they differ. We'll use the following two documents: a reference +document (ref) and a document with predicted entities (pred). + ++-------------------------------------------------------------+------------------------------------------+ +| pred | ref | ++=============================================================+==========================================+ +| *La*{.chip data-chip=PER} *patiente*{.chip data-chip=PER} a | La *patiente*{.chip data-chip=PER} a | +| une *fièvre aigüe*{.chip data-chip=DIS} | *une fièvre*{.chip data-chip=DIS} aigüe. | ++-------------------------------------------------------------+------------------------------------------+ + +Let's create matching documents in EDS-NLP using the following code snippet: + +```python +from edsnlp.data.converters import MarkupToDocConverter + +conv = MarkupToDocConverter(preset="md", span_setter="entities") + +pred = conv("[La](PER) [patiente](PER) a une [fièvre aiguë](DIS).") +ref = conv("La [patiente](PER) a [une fièvre](DIS) aiguë.") +``` +""" # noqa: E501 + import abc from collections import defaultdict from typing import Any, Dict, Optional @@ -13,26 +37,6 @@ def ner_exact_metric( micro_key: str = "micro", filter_expr: Optional[str] = None, ) -> Dict[str, Any]: - """ - Scores the extracted entities that may be overlapping or nested - by looking in the spans returned by a given SpanGetter object. - - Parameters - ---------- - examples: Examples - The examples to score, either a tuple of (golds, preds) or a list of - spacy.training.Example objects - span_getter: SpanGetter - The span getter to use to extract the spans from the document - micro_key: str - The key to use to store the micro-averaged results for spans of all types - filter_expr: str - The filter expression to use to filter the documents - - Returns - ------- - Dict[str, Any] - """ examples = make_examples(examples) if filter_expr is not None: filter_fn = eval(f"lambda doc: {filter_expr}") @@ -65,27 +69,6 @@ def ner_token_metric( micro_key: str = "micro", filter_expr: Optional[str] = None, ) -> Dict[str, Any]: - """ - Scores the extracted entities that may be overlapping or nested - by looking in `doc.ents`, and `doc.spans`, and comparing the predicted - and gold entities at the TOKEN level. - - Parameters - ---------- - examples: Examples - The examples to score, either a tuple of (golds, preds) or a list of - spacy.training.Example objects - span_getter: SpanGetter - The span getter to use to extract the spans from the document - micro_key: str - The key to use to store the micro-averaged results for spans of all types - filter_expr: str - The filter expression to use to filter the documents - - Returns - ------- - Dict[str, Any] - """ examples = make_examples(examples) if filter_expr is not None: filter_fn = eval(f"lambda doc: {filter_expr}") @@ -130,30 +113,6 @@ def ner_overlap_metric( filter_expr: Optional[str] = None, threshold: float = 0.5, ) -> Dict[str, Any]: - """ - Scores the extracted entities that may be overlapping or nested - by looking in `doc.ents`, and `doc.spans`, and comparing the predicted - and gold entities and counting true when a predicted entity overlaps - with a gold entity of the same label - - Parameters - ---------- - examples: Examples - The examples to score, either a tuple of (golds, preds) or a list of - spacy.training.Example objects - span_getter: SpanGetter - The span getter to use to extract the spans from the document - micro_key: str - The key to use to store the micro-averaged results for spans of all types - filter_expr: str - The filter expression to use to filter the documents - threshold: float - The threshold to use to consider that two spans overlap - - Returns - ------- - Dict[str, Any] - """ examples = make_examples(*examples) if filter_expr is not None: filter_fn = eval(f"lambda doc: {filter_expr}") @@ -239,6 +198,54 @@ def __call__(self, *examples) -> Dict[str, Any]: deprecated=["eds.ner_exact_metric"], ) class NerExactMetric(NerMetric): + r""" + The `eds.ner_exact` metric + scores the extracted entities (that may be overlapping or nested) + by looking in the spans returned by a given SpanGetter object and + comparing predicted spans to gold spans for **exact** boundary and label matches. + + Let's view these elements as collections of (span → label) and count how + many of the predicted spans match the gold spans exactly (and vice versa): + + +----------------------------------------------+--------------------------------------------+ + | pred | ref | + +==============================================+============================================+ + | *La*{.chip .fp data-chip=PER}
| *patiente*{.chip .tp data-chip=PER}
| + | *patiente*{.chip .tp data-chip=PER}
| *une fièvre*{.chip .fp data-chip=DIS}
| + | *fièvre aiguë*{.chip .fp data-chip=DIS}
| | + +----------------------------------------------+--------------------------------------------+ + + Precision, Recall and F1 (micro-average and per‐label) are computed as follows: + + - Precision: `p = |matched items of pred| / |pred|` + - Recall: `r = |matched items of ref| / |ref|` + - F1: `f = 2 / (1/p + 1/f)` + + Examples + -------- + + ```python + from edsnlp.metrics.ner import NerExactMetric + + metric = NerExactMetric(span_getter=conv.span_setter, micro_key="micro") + metric([ref], [pred]) + # Out: { + # 'micro': {'f': 0.4, 'p': 0.33, 'r': 0.5, 'tp': 1, 'support': 2, 'positives': 3}, + # 'PER': {'f': 0.67, 'p': 0.5, 'r': 1, 'tp': 1, 'support': 1, 'positives': 2}, + # 'DIS': {'f': 0.0, 'p': 0.0, 'r': 0.0, 'tp': 0, 'support': 1, 'positives': 1}, + # } + ``` + + Parameters + ---------- + span_getter: SpanGetter + The span getter to use to extract the spans from the document + micro_key: str + The key to use to store the micro-averaged results for spans of all types + filter_expr: str + The filter expression to use to filter the documents. Evaluated with `doc` as the variable. + """ # noqa: E501 + def __init__( self, span_getter: SpanGetterArg, @@ -265,6 +272,58 @@ def __call__(self, *examples): deprecated=["eds.ner_token_metric"], ) class NerTokenMetric(NerMetric): + r""" + The `eds.ner_token` metric + scores the extracted entities that may be overlapping or nested by looking in + `doc.ents`, and `doc.spans`, and comparing the predicted and gold entities at the + **token** level. + + Assuming we use the `eds` (or `fr` or `en`) tokenizer, in the above example, there + are 3 annotated tokens in the reference, and 4 annotated tokens in the prediction. + Let's view these elements as sets of (token, label) and count how many of the + predicted tokens match the gold tokens exactly (and vice versa): + + +------------------------------------------+------------------------------------------+ + | pred | ref | + +==========================================+==========================================+ + | *La*{.chip .fp data-chip=PER}
| *patiente*{.chip .tp data-chip=PER}
| + | *patiente*{.chip .tp data-chip=PER}
| *une*{.chip .fp data-chip=DIS}
| + | *fièvre*{.chip .tp data-chip=DIS}
| *fièvre*{.chip .tp data-chip=DIS} | + | *aiguë*{.chip .fp data-chip=DIS} | | + +------------------------------------------+------------------------------------------+ + + Precision, Recall and F1 (micro-average and per‐label) are computed as follows: + + - Precision: `p = |matched items of pred| / |pred|` + - Recall: `r = |matched items of ref| / |ref|` + - F1: `f = 2 / (1/p + 1/f)` + + Examples + -------- + + ```python + from edsnlp.metrics.ner import NerTokenMetric + + metric = NerTokenMetric(span_getter=conv.span_setter, micro_key="micro") + metric([ref], [pred]) + # Out: { + # 'micro': {'f': 0.57, 'p': 0.5, 'r': 0.67, 'tp': 2, 'support': 3, 'positives': 4}, + # 'PER': {'f': 0.67, 'p': 0.5, 'r': 1, 'tp': 1, 'support': 1, 'positives': 2}, + # 'DIS': {'f': 0.5, 'p': 0.5, 'r': 0.5, 'tp': 1, 'support': 2, 'positives': 2} + # } + ``` + + Parameters + ---------- + span_getter: SpanGetter + The span getter to use to extract the spans from the document + micro_key: str + The key to use to store the micro-averaged results for spans of all types + filter_expr: str + The filter expression to use to filter the documents. Will be evaluated + with `doc` as the variable name, so you can use `doc.ents`, `doc.spans`, etc. + """ # noqa: E501 + def __init__( self, span_getter: SpanGetterArg, @@ -291,6 +350,71 @@ def __call__(self, *examples): deprecated=["eds.ner_overlap_metric"], ) class NerOverlapMetric(NerMetric): + r""" + The `eds.ner_overlap` metric + scores the extracted entities that may be overlapping or nested + by looking in the spans returned by a given SpanGetter object and + counting a prediction as correct if it overlaps by at least the given + Dice‐coefficient threshold with a gold span of the same label. + + This metric is useful for evaluating NER systems where the exact boundaries + do not matter too much, but the presence of the entity at the same spot is important. + For instance, you may not want to penalize a system that forgets determiners if + the rest of the entity is correctly identified. + + Let's view these elements as sets of (span → label) and count how many of the + predicted spans match the gold spans by at least the given Dice coefficient + (and vice versa): + + +---------------------------------------------+------------------------------------------+ + | pred | ref | + +=============================================+==========================================+ + | *La*{.chip .fp data-chip=PER}
| *patiente*{.chip .tp data-chip=PER}
| + | *patiente*{.chip .tp data-chip=PER}
| *une fièvre*{.chip .tp data-chip=DIS} | + | *fièvre aiguë*{.chip .tp data-chip=DIS}
| | + +---------------------------------------------+------------------------------------------+ + + Precision, Recall and F1 (micro-average and per‐label) are computed as follows: + + - Precision: `p = |matched items of pred| / |pred|` + - Recall: `r = |matched items of ref| / |ref|` + - F1: `f = 2 / (1/p + 1/f)` + + !!! note "Overlap threshold" + + The threshold is the minimum Dice coefficient to consider two spans as overlapping. Setting + it to 1.0 will yield the same results as the `eds.ner_exact` metric, while setting it to a + near-zero value (e.g., like 1e-14) will match any two spans that share at least one token. + + Examples + -------- + + ```python + from edsnlp.metrics.ner import NerOverlapMetric + + metric = NerOverlapMetric( + span_getter=conv.span_setter, micro_key="micro", threshold=0.5 + ) + metric([ref], [pred]) + # Out: { + # 'micro': {'f': 0.8, 'p': 0.67, 'r': 1.0, 'tp': 2, 'support': 2, 'positives': 3}, + # 'PER': {'f': 0.67, 'p': 0.5, 'r': 1.0, 'tp': 1, 'support': 1, 'positives': 2}, + # 'DIS': {'f': 1.0, 'p': 1.0, 'r': 1.0, 'tp': 1, 'support': 1, 'positives': 1} + # } + ``` + + Parameters + ---------- + span_getter: SpanGetter + The span getter to use to extract the spans from the document + micro_key: str + The key to use to store the micro-averaged results for spans of all types + filter_expr: str + The filter expression to use to filter the documents + threshold: float + The threshold on the Dice coefficient to consider two spans as overlapping + """ # noqa: E501 + def __init__( self, span_getter: SpanGetterArg, diff --git a/edsnlp/metrics/span_attribute.py b/edsnlp/metrics/span_attribute.py new file mode 100644 index 0000000000..d1c7b6dac4 --- /dev/null +++ b/edsnlp/metrics/span_attribute.py @@ -0,0 +1,311 @@ +""" +Metrics for Span Attribute Classification + +# Span Attribute Classification Metrics {: #edsnlp.metrics.span_attribute.SpanAttributeMetric } + +Several NLP tasks consist in classifying existing spans of text into multiple classes, +such as the detection of negation, hypothesis or span linking. + +We provide a metric to evaluate the performance of such tasks, + +Let's look at an example: + ++-------------------------------------------------------------------+-------------------------------------------------------------------+ +| pred | ref | ++===================================================================+===================================================================+ +| Le patient n'est pas *fièvreux*{.chip data-chip="SYMP neg=true"}, | Le patient n'est pas *fièvreux*{.chip data-chip="SYMP neg=true"}, | +| son père a *du diabète*{.chip data-chip="DIS carrier=PATIENT"}. | son père a *du diabète*{.chip data-chip="DIS carrier=FATHER"}. | +| Pas d'évolution du | Pas d'évolution du | +| *cancer*{.chip data-chip="DIS neg=true carrier=PATIENT"}. | *cancer*{.chip data-chip="DIS carrier=PATIENT"}. | ++-------------------------------------------------------------------+-------------------------------------------------------------------+ + +We can quickly create matching documents in EDS-NLP using the following code snippet: + +```python +from edsnlp.data.converters import MarkupToDocConverter + +conv = MarkupToDocConverter(preset="md", span_setter="entities") +# Create a document with predicted attributes and a reference document +pred = conv( + "Le patient n'est pas [fièvreux](SYMP neg=true), " + "son père a [du diabète](DIS neg=false carrier=PATIENT). " + "Pas d'évolution du [cancer](DIS neg=true carrier=PATIENT)." +) +ref = conv( + "Le patient n'est pas [fièvreux](SYMP neg=true), " + "son père a [du diabète](DIS neg=false carrier=FATHER). " + "Pas d'évolution du [cancer](DIS neg=false carrier=PATIENT)." +) +``` +""" # noqa: E501 + +import warnings +from collections import defaultdict +from typing import Any, Dict, Optional + +from edsnlp import registry +from edsnlp.metrics import Examples, average_precision, make_examples, prf +from edsnlp.utils.bindings import BINDING_GETTERS, Attributes, AttributesArg +from edsnlp.utils.span_getters import SpanGetterArg, get_spans + + +def span_attribute_metric( + examples: Examples, + span_getter: SpanGetterArg, + attributes: Attributes = None, + include_falsy: bool = False, + default_values: Dict = {}, + micro_key: str = "micro", + filter_expr: Optional[str] = None, + **kwargs: Any, +): + if "qualifiers" in kwargs: + warnings.warn( + "The `qualifiers` argument of span_attribute_metric() is " + "deprecated. Use `attributes` instead.", + DeprecationWarning, + ) + assert attributes is None + attributes = kwargs.pop("qualifiers") + if attributes is None: + raise TypeError( + "span_attribute_metric() missing 1 required argument: 'attributes'" + ) + if kwargs: + raise TypeError( + f"span_attribute_metric() got unexpected keyword arguments: " + f"{', '.join(kwargs.keys())}" + ) + examples = make_examples(examples) + if filter_expr is not None: + filter_fn = eval(f"lambda doc: {filter_expr}") + examples = [eg for eg in examples if filter_fn(eg.reference)] + labels = defaultdict(lambda: (set(), set(), dict())) + labels["micro"] = (set(), set(), dict()) + total_pred_count = 0 + total_gold_count = 0 + + if not include_falsy: + default_values_ = defaultdict(lambda: False) + default_values_.update(default_values) + default_values = default_values_ + del default_values_ + for eg_idx, eg in enumerate(examples): + doc_spans = get_spans(eg.predicted, span_getter) + for span in doc_spans: + total_pred_count += 1 + beg, end = span.start, span.end + for attr, span_filter in attributes.items(): + if not (span_filter is True or span.label_ in span_filter): + continue + getter_key = attr if attr.startswith("_.") else f"_.{attr}" + value = BINDING_GETTERS[getter_key](span) + top_val, top_p = max( + getattr(span._, "prob", {}).get(attr, {}).items(), + key=lambda x: x[1], + default=(value, 1.0), + ) + if (top_val or include_falsy) and default_values[attr] != top_val: + labels[attr][2][(eg_idx, beg, end, attr, top_val)] = top_p + labels[micro_key][2][(eg_idx, beg, end, attr, top_val)] = top_p + if (value or include_falsy) and default_values[attr] != value: + labels[micro_key][0].add((eg_idx, beg, end, attr, value)) + labels[attr][0].add((eg_idx, beg, end, attr, value)) + + doc_spans = get_spans(eg.reference, span_getter) + for span in doc_spans: + total_gold_count += 1 + beg, end = span.start, span.end + for attr, span_filter in attributes.items(): + if not (span_filter is True or span.label_ in span_filter): + continue + getter_key = attr if attr.startswith("_.") else f"_.{attr}" + value = BINDING_GETTERS[getter_key](span) + if (value or include_falsy) and default_values[attr] != value: + labels[micro_key][1].add((eg_idx, beg, end, attr, value)) + labels[attr][1].add((eg_idx, beg, end, attr, value)) + + if total_pred_count != total_gold_count: + raise ValueError( + f"Number of predicted and gold spans differ: {total_pred_count} != " + f"{total_gold_count}. Make sure that you are running your span " + "attribute classification pipe on the gold annotations, and not spans " + "predicted by another NER pipe in your model." + ) + + for name, (pred, gold, pred_with_prob) in labels.items(): + print("-", name, "pred/gold", pred, gold, "=>", prf(pred, gold)) + return { + name: { + **prf(pred, gold), + "ap": average_precision(pred_with_prob, gold), + } + for name, (pred, gold, pred_with_prob) in labels.items() + } + + +@registry.metrics.register( + "eds.span_attribute", + deprecated=["eds.span_classification_scorer", "eds.span_attribute_scorer"], +) +class SpanAttributeMetric: + """ + The `eds.span_attribute` metric + evaluates span‐level attribute classification by comparing predicted and gold + attribute values on the same set of spans. For each attribute you specify, it + computes Precision, Recall, F1, number of true positives (tp), number of + gold instances (support), number of predicted instances (positives), and + the Average Precision (ap). A micro‐average over all attributes is also + provided under `micro_key`. + + ```python + from edsnlp.metrics.span_attribute import SpanAttributeMetric + + metric = SpanAttributeMetric( + span_getter=conv.span_setter, + # Evaluated attributes + attributes={ + "neg": True, # 'neg' on every entity + "carrier": ["DIS"], # 'carrier' only on 'DIS' entities + }, + # Ignore these default values when counting matches + default_values={ + "neg": False, + }, + micro_key="micro", + ) + ``` + + Let's enumerate (span -> attr = value) items in our documents. Only the items with + matching span boundaries, attribute name, and value are counted as a true positives. + For instance, with the predicted and reference spans of the example above: + + +--------------------------------------------------+-------------------------------------------------+ + | pred | ref | + +==================================================+=================================================+ + | *fièvreux → neg = True*{.chip .tp}
| *fièvreux → neg = True*{.chip .tp}
| + | *du diabète → neg = False*{.chip .na}
| *du diabète → neg = False*{.chip .na}
| + | *du diabète → carrier = PATIENT*{.chip .fp}
| *du diabète → carrier = FATHER*{.chip .fn}
| + | *cancer → neg = True*{.chip .fp}
| *cancer → neg = False*{.chip .na}
| + | *cancer → carrier = PATIENT*{.chip .tp} | *cancer → carrier = PATIENT*{.chip .tp} | + +--------------------------------------------------+-------------------------------------------------+ + + !!! note "Default values" + + Note that there we don't count "neg=False" items, shown in grey in the table. In EDS-NLP, + this is done by setting `defaults_values={"neg": False}` when creating the metric. This + is quite common in classification tasks, where one of the values is both the most common + and the "default" (hence the name of the parameter). Counting these values would likely + skew the micro-average metrics towards the default value. + + Precision, Recall and F1 (micro-average and per‐label) are computed as follows: + + - Precision: `p = |matched items of pred| / |pred|` + - Recall: `r = |matched items of ref| / |ref|` + - F1: `f = 2 / (1/p + 1/f)` + + This yields the following metrics: + + ```python + metric([ref], [pred]) + # Out: { + # 'micro': {'f': 0.57, 'p': 0.5, 'r': 0.67, 'tp': 2, 'support': 3, 'positives': 4, 'ap': 0.17}, + # 'neg': {'f': 0.67, 'p': 0.5, 'r': 1, 'tp': 1, 'support': 1, 'positives': 2, 'ap': 0.0}, + # 'carrier': {'f': 0.5, 'p': 0.5, 'r': 0.5, 'tp': 1, 'support': 2, 'positives': 2, 'ap': 0.25}, + # } + ``` + + Parameters + ---------- + span_getter : SpanGetterArg + The span getter to extract spans from each `Doc`. + attributes : Mapping[str, Union[bool, Sequence[str]]] + Map each attribute name to `True` (evaluate on all spans) or a sequence of + labels restricting which spans to test. + default_values : Dict[str, Any] + Attribute values to omit from micro‐average counts (e.g., common negative or + default labels). + include_falsy : bool + If `False`, ignore falsy values (e.g., `False`, `None`, `''`) in predictions + or gold when computing metrics; if `True`, count them. + micro_key : str + Key under which to store the micro‐averaged results across all attributes. + filter_expr : Optional[str] + A Python expression (using `doc`) to filter which examples are scored. + + Returns + ------- + Dict[str, Dict[str, float]] + A dictionary mapping each attribute name (and the `micro_key`) to its metrics: + + - `label` or micro_key : + + - `p` : precision + - `r` : recall + - `f` : F1 score + - `tp` : true positive count + - `support` : number of gold instances + - `positives` : number of predicted instances + - `ap` : [average precision](https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision) + """ # noqa: E501 + + attributes: Attributes + + def __init__( + self, + span_getter: SpanGetterArg, + attributes: AttributesArg = None, + qualifiers: AttributesArg = None, + default_values: Dict = {}, + include_falsy: bool = False, + micro_key: str = "micro", + filter_expr: Optional[str] = None, + ): + if qualifiers is not None: + warnings.warn( + "The `qualifiers` argument is deprecated. Use `attributes` instead.", + DeprecationWarning, + ) + self.span_getter = span_getter + self.attributes = attributes or qualifiers + self.default_values = default_values + self.include_falsy = include_falsy + self.micro_key = micro_key + self.filter_expr = filter_expr + + __init__.__doc__ = span_attribute_metric.__doc__ + + def __call__(self, *examples: Any): + """ + Compute the span attribute metrics for the given examples. + + Parameters + ---------- + examples : Examples + The examples to score, either a tuple of (golds, preds) or a list of + spacy.training.Example objects + + Returns + ------- + Dict[str, Dict[str, float]] + The scores for the attributes + """ + return span_attribute_metric( + examples, + span_getter=self.span_getter, + attributes=self.attributes, + default_values=self.default_values, + include_falsy=self.include_falsy, + micro_key=self.micro_key, + filter_expr=self.filter_expr, + ) + + +# For backward compatibility +span_classification_scorer = span_attribute_scorer = span_attribute_metric +create_span_attributes_scorer = SpanAttributeScorer = SpanAttributeMetric + +__all__ = [ + "span_attribute_metric", + "SpanAttributeMetric", +] diff --git a/edsnlp/metrics/span_attributes.py b/edsnlp/metrics/span_attributes.py deleted file mode 100644 index be1b2a948e..0000000000 --- a/edsnlp/metrics/span_attributes.py +++ /dev/null @@ -1,182 +0,0 @@ -import warnings -from collections import defaultdict -from typing import Any, Dict, Optional - -from edsnlp import registry -from edsnlp.metrics import Examples, average_precision, make_examples, prf -from edsnlp.utils.bindings import BINDING_GETTERS, Attributes, AttributesArg -from edsnlp.utils.span_getters import SpanGetterArg, get_spans - - -def span_attribute_metric( - examples: Examples, - span_getter: SpanGetterArg, - attributes: Attributes = None, - include_falsy: bool = False, - default_values: Dict = {}, - micro_key: str = "micro", - filter_expr: Optional[str] = None, - **kwargs: Any, -): - """ - Scores the attributes predictions between a list of gold and predicted spans. - - Parameters - ---------- - examples : Examples - The examples to score, either a tuple of (golds, preds) or a list of - spacy.training.Example objects - span_getter : SpanGetterArg - The span getter to use to extract the spans from the document - attributes : Sequence[str] - The attributes to use to score the spans - default_values: Dict - Values to dismiss when computing the micro-average per label. This is - useful to compute precision and recall for certain attributes that have - imbalanced value repartitions, such as "negation", "family related" - or "certainty" attributes. - include_falsy : bool - Whether to count predicted or gold occurrences of falsy values when computing - the metrics. If `False`, only the non-falsy values will be counted and matched - together. - micro_key : str - The key to use to store the micro-averaged results for spans of all types - filter_expr : Optional[str] - The filter expression to use to filter the documents - - Returns - ------- - Dict[str, float] - """ - if "qualifiers" in kwargs: - warnings.warn( - "The `qualifiers` argument of span_attribute_metric() is " - "deprecated. Use `attributes` instead.", - DeprecationWarning, - ) - assert attributes is None - attributes = kwargs.pop("qualifiers") - if attributes is None: - raise TypeError( - "span_attribute_metric() missing 1 required argument: 'attributes'" - ) - if kwargs: - raise TypeError( - f"span_attribute_metric() got unexpected keyword arguments: " - f"{', '.join(kwargs.keys())}" - ) - examples = make_examples(examples) - if filter_expr is not None: - filter_fn = eval(f"lambda doc: {filter_expr}") - examples = [eg for eg in examples if filter_fn(eg.reference)] - labels = defaultdict(lambda: (set(), set(), dict())) - labels["micro"] = (set(), set(), dict()) - total_pred_count = 0 - total_gold_count = 0 - - if not include_falsy: - default_values_ = defaultdict(lambda: False) - default_values_.update(default_values) - default_values = default_values_ - del default_values_ - for eg_idx, eg in enumerate(examples): - doc_spans = get_spans(eg.predicted, span_getter) - for span_idx, span in enumerate(doc_spans): - total_pred_count += 1 - for attr, span_filter in attributes.items(): - if not (span_filter is True or span.label_ in span_filter): - continue - getter_key = attr if attr.startswith("_.") else f"_.{attr}" - value = BINDING_GETTERS[getter_key](span) - top_val, top_p = max( - getattr(span._, "prob", {}).get(attr, {}).items(), - key=lambda x: x[1], - default=(value, 1.0), - ) - if (top_val or include_falsy) and default_values[attr] != top_val: - labels[attr][2][(eg_idx, span_idx, attr, top_val)] = top_p - labels[micro_key][2][(eg_idx, span_idx, attr, top_val)] = top_p - if (value or include_falsy) and default_values[attr] != value: - labels[micro_key][0].add((eg_idx, span_idx, attr, value)) - labels[attr][0].add((eg_idx, span_idx, attr, value)) - - doc_spans = get_spans(eg.reference, span_getter) - for span_idx, span in enumerate(doc_spans): - total_gold_count += 1 - for attr, span_filter in attributes.items(): - if not (span_filter is True or span.label_ in span_filter): - continue - getter_key = attr if attr.startswith("_.") else f"_.{attr}" - value = BINDING_GETTERS[getter_key](span) - if (value or include_falsy) and default_values[attr] != value: - labels[micro_key][1].add((eg_idx, span_idx, attr, value)) - labels[attr][1].add((eg_idx, span_idx, attr, value)) - - if total_pred_count != total_gold_count: - raise ValueError( - f"Number of predicted and gold spans differ: {total_pred_count} != " - f"{total_gold_count}. Make sure that you are running your span " - "attribute classification pipe on the gold annotations, and not spans " - "predicted by another NER pipe in your model." - ) - - return { - name: { - **prf(pred, gold), - "ap": average_precision(pred_with_prob, gold), - } - for name, (pred, gold, pred_with_prob) in labels.items() - } - - -@registry.metrics.register( - "eds.span_attribute", - deprecated=["eds.span_classification_scorer", "eds.span_attribute_scorer"], -) -class SpanAttributeMetric: - attributes: Attributes - - def __init__( - self, - span_getter: SpanGetterArg, - attributes: AttributesArg = None, - qualifiers: AttributesArg = None, - default_values: Dict = {}, - include_falsy: bool = False, - micro_key: str = "micro", - filter_expr: Optional[str] = None, - ): - if qualifiers is not None: - warnings.warn( - "The `qualifiers` argument is deprecated. Use `attributes` instead.", - DeprecationWarning, - ) - self.span_getter = span_getter - self.attributes = attributes or qualifiers - self.default_values = default_values - self.include_falsy = include_falsy - self.micro_key = micro_key - self.filter_expr = filter_expr - - __init__.__doc__ = span_attribute_metric.__doc__ - - def __call__(self, *examples: Any): - return span_attribute_metric( - examples, - span_getter=self.span_getter, - attributes=self.attributes, - default_values=self.default_values, - include_falsy=self.include_falsy, - micro_key=self.micro_key, - filter_expr=self.filter_expr, - ) - - -# For backward compatibility -span_classification_scorer = span_attribute_scorer = span_attribute_metric -create_span_attributes_scorer = SpanAttributeScorer = SpanAttributeMetric - -__all__ = [ - "span_attribute_metric", - "SpanAttributeMetric", -] diff --git a/mkdocs.yml b/mkdocs.yml index 35d3c30484..272d50ec99 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -146,6 +146,10 @@ nav: - concepts/pipeline.md - concepts/torch-component.md - concepts/inference.md + - Metrics: + - metrics/index.md + - metrics/ner.md + - metrics/span-attribute.md - Utilities: - utilities/index.md - utilities/tests/blocs.md @@ -171,15 +175,14 @@ extra: extra_css: - assets/stylesheets/extra.css - assets/stylesheets/cards.css - - assets/termynal/termynal.css + #- assets/termynal/termynal.css extra_javascript: - - https://cdn.jsdelivr.net/npm/vega@5 - - https://cdn.jsdelivr.net/npm/vega-lite@5 - - https://cdn.jsdelivr.net/npm/vega-embed@6 - - assets/termynal/termynal.js + #- https://cdn.jsdelivr.net/npm/vega@5 + #- https://cdn.jsdelivr.net/npm/vega-lite@5 + #- https://cdn.jsdelivr.net/npm/vega-embed@6 - https://polyfill.io/v3/polyfill.min.js?features=es6 - - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js + # - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js watch: - contributing.md @@ -245,8 +248,9 @@ markdown_extensions: slugify: !!python/object/apply:pymdownx.slugs.slugify kwds: case: lower - - pymdownx.arithmatex: - generic: true + #- pymdownx.arithmatex: + # generic: true + - markdown_grid_tables - footnotes - md_in_html - attr_list diff --git a/pyproject.toml b/pyproject.toml index feead31251..47d1e83f92 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,6 +74,7 @@ docs-no-ml = [ "mkdocstrings-python~=1.1", "mkdocs-minify-plugin", "mkdocs-redirects>=1.2.1;python_version>='3.8'", + "markdown-grid-tables==0.4.0", "pybtex~=0.24.0", "pathspec>=0.11.1", # required by vendored mkdocs-autorefs PR "astunparse", @@ -279,21 +280,22 @@ where = ["."] "linear" = "edsnlp.training.optimizer:LinearSchedule" [project.entry-points."spacy_scorers"] -"eds.ner_exact" = "edsnlp.metrics.ner:NerExactMetric" -"eds.ner_token" = "edsnlp.metrics.ner:NerTokenMetric" -"eds.ner_overlap" = "edsnlp.metrics.ner:NerOverlapMetric" -"eds.span_attributes" = "edsnlp.metrics.span_attributes:SpanAttributeMetric" -"eds.dep_parsing" = "edsnlp.metrics.dep_parsing:DependencyParsingMetric" +"eds.ner_exact" = "edsnlp.metrics.ner:NerExactMetric" +"eds.ner_token" = "edsnlp.metrics.ner:NerTokenMetric" +"eds.ner_overlap" = "edsnlp.metrics.ner:NerOverlapMetric" +"eds.span_attribute" = "edsnlp.metrics.span_attribute:SpanAttributeMetric" +"eds.dep_parsing" = "edsnlp.metrics.dep_parsing:DependencyParsingMetric" # Deprecated -"eds.ner_exact_metric" = "edsnlp.metrics.ner:NerExactMetric" -"eds.ner_token_metric" = "edsnlp.metrics.ner:NerTokenMetric" -"eds.ner_overlap_metric" = "edsnlp.metrics.ner:NerOverlapMetric" -"eds.span_attributes_metric" = "edsnlp.metrics.span_attributes:SpanAttributeMetric" -"eds.ner_exact_scorer" = "edsnlp.metrics.ner:NerExactMetric" -"eds.ner_token_scorer" = "edsnlp.metrics.ner:NerTokenMetric" -"eds.ner_overlap_scorer" = "edsnlp.metrics.ner:NerOverlapMetric" -"eds.span_attributes_scorer" = "edsnlp.metrics.span_attributes:SpanAttributeMetric" +"eds.ner_exact_metric" = "edsnlp.metrics.ner:NerExactMetric" +"eds.ner_token_metric" = "edsnlp.metrics.ner:NerTokenMetric" +"eds.ner_overlap_metric" = "edsnlp.metrics.ner:NerOverlapMetric" +"eds.span_attributes_metric" = "edsnlp.metrics.span_attributes:SpanAttributeMetric" +"eds.span_attributes" = "edsnlp.metrics.span_attribute:SpanAttributeMetric" +"eds.ner_exact_scorer" = "edsnlp.metrics.ner:NerExactMetric" +"eds.ner_token_scorer" = "edsnlp.metrics.ner:NerTokenMetric" +"eds.ner_overlap_scorer" = "edsnlp.metrics.ner:NerOverlapMetric" +"eds.span_attributes_scorer" = "edsnlp.metrics.span_attributes:SpanAttributeMetric" [project.entry-points."edsnlp_readers"] "spark" = "edsnlp.data:from_spark" diff --git a/tests/test_docs.py b/tests/test_docs.py index b6975e7b74..58de9a3cfa 100644 --- a/tests/test_docs.py +++ b/tests/test_docs.py @@ -4,6 +4,7 @@ import sys import textwrap import warnings +from math import isclose import catalogue import pytest @@ -30,6 +31,42 @@ assert len(url_to_code) > 50 +class nested_approx: + def __init__(self, value, rel=1e-12, abs=1e-12): + self._value, self._rel, self._abs = value, rel, abs + + def __eq__(self, other): + return self._match(self._value, other) + + def __req__(self, other): + return self._match(other, self._value) + + __hash__ = None # keep it un-hashable + + def _match(self, a, b): + if isinstance(a, (int, float)) and isinstance(b, (int, float)): + return isclose(a, b, rel_tol=self._rel, abs_tol=self._abs) + if isinstance(a, (list, tuple)): + return ( + isinstance(b, (list, tuple)) + and len(a) == len(b) + and all(self._match(x, y) for x, y in zip(a, b)) + ) + if isinstance(a, dict): + return ( + isinstance(b, dict) + and a.keys() == b.keys() + and all(self._match(a[k], b[k]) for k in a) + ) + return a == b + + def __repr__(self): + return f"nested_approx({self._value!r}, rel={self._rel}, abs={self._abs})" + + +pytest.nested_approx = nested_approx + + def printer(code: str) -> None: """ Prints a code bloc with lines for easier debugging. @@ -62,16 +99,22 @@ def insert_assert_statements(code): if stmt.end_lineno == lineno: if isinstance(stmt, ast.Expr): expected = textwrap.dedent(match.group(1)).replace("\n# ", "\n") + expected_s = expected begin = line_table[stmt.lineno - 1] if not (expected.startswith("'") or expected.startswith('"')): - expected = repr(expected) + expected_s = repr(expected) end = match.end() stmt_str = ast.unparse(stmt) if stmt_str.startswith("print("): stmt_str = stmt_str[len("print") :] repl = f"""\ -value = {stmt_str} -assert {expected} == str(value) +val = {stmt_str} +try: + import ast + expected = ast.literal_eval({expected_s}) +except (ValueError, SyntaxError): + expected = None +assert str(val) == {expected_s} or val == pytest.nested_approx(expected, 0.01, 0.01) """ replacements.append((begin, end, repl)) if isinstance(stmt, ast.For): @@ -83,7 +126,7 @@ def insert_assert_statements(code): repl = f"""\ printed = [] {stmt_str} -assert {expected} == printed +assert printed == {expected} """ replacements.append((begin, end, repl)) @@ -123,6 +166,8 @@ def reset_imports(): def test_code_blocks(url, tmpdir, reset_imports): code = url_to_code[url] code_with_asserts = """ +import pytest + def assert_print(*args, sep=" ", end="\\n", file=None, flush=False): printed.append((sep.join(map(str, args)) + end).rstrip('\\n'))